Revert "sched: replace some global variables to macro"

sched implementation not depends on macro abstraction, so revert below commit:

This reverts commit 4e62d0005a
This reverts commit 0f0c370520
This reverts commit ad0efd04ee

Signed-off-by: chao an <anchao@lixiang.com>
This commit is contained in:
chao an 2024-06-06 16:49:50 +08:00 committed by Xiang Xiao
parent f4d9005bea
commit e456c88c09
22 changed files with 87 additions and 103 deletions

View File

@ -136,13 +136,13 @@ static void exec_swap(FAR struct tcb_s *ptcb, FAR struct tcb_s *chtcb)
pndx = PIDHASH(ptcb->pid);
chndx = PIDHASH(chtcb->pid);
DEBUGASSERT(nxsched_pidhash()[pndx]);
DEBUGASSERT(nxsched_pidhash()[chndx]);
DEBUGASSERT(g_pidhash[pndx]);
DEBUGASSERT(g_pidhash[chndx]);
/* Exchange nxsched_pidhash() index */
/* Exchange g_pidhash index */
nxsched_pidhash()[pndx] = chtcb;
nxsched_pidhash()[chndx] = ptcb;
g_pidhash[pndx] = chtcb;
g_pidhash[chndx] = ptcb;
/* Exchange pid */

View File

@ -187,9 +187,9 @@ static int elf_get_ntcb(void)
int count = 0;
int i;
for (i = 0; i < nxsched_npidhash(); i++)
for (i = 0; i < g_npidhash; i++)
{
if (nxsched_pidhash()[i] != NULL)
if (g_pidhash[i] != NULL)
{
count++;
}
@ -313,11 +313,11 @@ static void elf_emit_note(FAR struct elf_dumpinfo_s *cinfo)
if (cinfo->pid == INVALID_PROCESS_ID)
{
for (i = 0; i < nxsched_npidhash(); i++)
for (i = 0; i < g_npidhash; i++)
{
if (nxsched_pidhash()[i] != NULL)
if (g_pidhash[i] != NULL)
{
elf_emit_tcb_note(cinfo, nxsched_pidhash()[i]);
elf_emit_tcb_note(cinfo, g_pidhash[i]);
}
}
}
@ -395,11 +395,11 @@ static void elf_emit_stack(FAR struct elf_dumpinfo_s *cinfo)
if (cinfo->pid == INVALID_PROCESS_ID)
{
for (i = 0; i < nxsched_npidhash(); i++)
for (i = 0; i < g_npidhash; i++)
{
if (nxsched_pidhash()[i] != NULL)
if (g_pidhash[i] != NULL)
{
elf_emit_tcb_stack(cinfo, nxsched_pidhash()[i]);
elf_emit_tcb_stack(cinfo, g_pidhash[i]);
}
}
}
@ -520,11 +520,11 @@ static void elf_emit_phdr(FAR struct elf_dumpinfo_s *cinfo,
if (cinfo->pid == INVALID_PROCESS_ID)
{
for (i = 0; i < nxsched_npidhash(); i++)
for (i = 0; i < g_npidhash; i++)
{
if (nxsched_pidhash()[i] != NULL)
if (g_pidhash[i] != NULL)
{
elf_emit_tcb_phdr(cinfo, nxsched_pidhash()[i], &phdr, &offset);
elf_emit_tcb_phdr(cinfo, g_pidhash[i], &phdr, &offset);
}
}
}

View File

@ -164,11 +164,11 @@ static void note_sysview_send_tasklist(void)
{
int i;
for (i = 0; i < nxsched_npidhash(); i++)
for (i = 0; i < g_npidhash; i++)
{
if (nxsched_pidhash()[i] != NULL)
if (g_pidhash[i] != NULL)
{
note_sysview_send_taskinfo(nxsched_pidhash()[i]);
note_sysview_send_taskinfo(g_pidhash[i]);
}
}
}

View File

@ -187,7 +187,7 @@ int foreach_inode(foreach_inode_t handler, FAR void *arg)
ret = inode_lock();
if (ret >= 0)
{
ret = foreach_inodelevel(inode_root()->i_child, info);
ret = foreach_inodelevel(g_root_inode->i_child, info);
inode_unlock();
}
@ -211,7 +211,7 @@ int foreach_inode(foreach_inode_t handler, FAR void *arg)
ret = inode_lock();
if (ret >= 0)
{
ret = foreach_inodelevel(inode_root()->i_child, &info);
ret = foreach_inodelevel(g_root_inode->i_child, &info);
inode_unlock();
}

View File

@ -141,7 +141,7 @@ static void inode_insert(FAR struct inode *node,
void inode_root_reserve(void)
{
inode_root() = inode_alloc("", 0777);
g_root_inode = inode_alloc("", 0777);
}
/****************************************************************************

View File

@ -214,7 +214,7 @@ static int _inode_linktarget(FAR struct inode *node,
static int _inode_search(FAR struct inode_search_s *desc)
{
FAR const char *name;
FAR struct inode *node = inode_root();
FAR struct inode *node = g_root_inode;
FAR struct inode *left = NULL;
FAR struct inode *above = NULL;
FAR const char *relpath = NULL;

View File

@ -65,8 +65,6 @@
} \
while (0)
#define inode_root() g_root_inode
/****************************************************************************
* Public Types
****************************************************************************/

View File

@ -38,14 +38,11 @@
* initialization.
*/
#define nxsched_set_initstate(s) g_nx_initstate = (s)
#define nxsched_get_initstate() g_nx_initstate
#define OSINIT_MM_READY() (nxsched_get_initstate() >= OSINIT_MEMORY)
#define OSINIT_HW_READY() (nxsched_get_initstate() >= OSINIT_HARDWARE)
#define OSINIT_OS_READY() (nxsched_get_initstate() >= OSINIT_OSREADY)
#define OSINIT_IDLELOOP() (nxsched_get_initstate() >= OSINIT_IDLELOOP)
#define OSINIT_OS_INITIALIZING() (nxsched_get_initstate() < OSINIT_OSREADY)
#define OSINIT_MM_READY() (g_nx_initstate >= OSINIT_MEMORY)
#define OSINIT_HW_READY() (g_nx_initstate >= OSINIT_HARDWARE)
#define OSINIT_OS_READY() (g_nx_initstate >= OSINIT_OSREADY)
#define OSINIT_IDLELOOP() (g_nx_initstate >= OSINIT_IDLELOOP)
#define OSINIT_OS_INITIALIZING() (g_nx_initstate < OSINIT_OSREADY)
/****************************************************************************
* Public Types

View File

@ -470,7 +470,7 @@ static void idle_group_initialize(void)
tcb = &g_idletcb[i];
hashndx = PIDHASH(i);
nxsched_pidhash()[hashndx] = &tcb->cmn;
g_pidhash[hashndx] = &tcb->cmn;
/* Allocate the IDLE group */
@ -539,7 +539,7 @@ void nx_start(void)
/* Boot up is complete */
nxsched_set_initstate(OSINIT_BOOT);
g_nx_initstate = OSINIT_BOOT;
/* Initialize RTOS Data ***************************************************/
@ -555,7 +555,7 @@ void nx_start(void)
/* Task lists are initialized */
nxsched_set_initstate(OSINIT_TASKLISTS);
g_nx_initstate = OSINIT_TASKLISTS;
/* Initialize RTOS facilities *********************************************/
@ -623,25 +623,24 @@ void nx_start(void)
/* Initialize the logic that determine unique process IDs. */
nxsched_npidhash() = 4;
while (nxsched_npidhash() <= CONFIG_SMP_NCPUS)
g_npidhash = 4;
while (g_npidhash <= CONFIG_SMP_NCPUS)
{
nxsched_npidhash() <<= 1;
g_npidhash <<= 1;
}
nxsched_pidhash() =
kmm_zalloc(sizeof(*nxsched_pidhash()) * nxsched_npidhash());
DEBUGASSERT(nxsched_pidhash());
g_pidhash = kmm_zalloc(sizeof(*g_pidhash) * g_npidhash);
DEBUGASSERT(g_pidhash);
/* IDLE Group Initialization **********************************************/
idle_group_initialize();
nxsched_lastpid() = CONFIG_SMP_NCPUS - 1;
g_lastpid = CONFIG_SMP_NCPUS - 1;
/* The memory manager is available */
nxsched_set_initstate(OSINIT_MEMORY);
g_nx_initstate = OSINIT_MEMORY;
/* Initialize tasking data structures */
@ -721,7 +720,7 @@ void nx_start(void)
/* Hardware resources are now available */
nxsched_set_initstate(OSINIT_HARDWARE);
g_nx_initstate = OSINIT_HARDWARE;
/* Setup for Multi-Tasking ************************************************/
@ -767,7 +766,7 @@ void nx_start(void)
/* The OS is fully initialized and we are beginning multi-tasking */
nxsched_set_initstate(OSINIT_OSREADY);
g_nx_initstate = OSINIT_OSREADY;
/* Create initial tasks and bring-up the system */
@ -775,7 +774,7 @@ void nx_start(void)
/* Enter to idleloop */
nxsched_set_initstate(OSINIT_IDLELOOP);
g_nx_initstate = OSINIT_IDLELOOP;
/* Let other threads have access to the memory manager */

View File

@ -117,10 +117,6 @@
# define CRITMONITOR_PANIC(fmt, ...) _alert(fmt, ##__VA_ARGS__)
#endif
#define nxsched_pidhash() g_pidhash
#define nxsched_npidhash() g_npidhash
#define nxsched_lastpid() g_lastpid
/****************************************************************************
* Public Type Definitions
****************************************************************************/

View File

@ -111,12 +111,12 @@ void nxsched_process_taskload_ticks(FAR struct tcb_s *tcb, clock_t ticks)
* total.
*/
for (i = 0; i < nxsched_npidhash(); i++)
for (i = 0; i < g_npidhash; i++)
{
if (nxsched_pidhash()[i])
if (g_pidhash[i])
{
nxsched_pidhash()[i]->ticks >>= 1;
total += nxsched_pidhash()[i]->ticks;
g_pidhash[i]->ticks >>= 1;
total += g_pidhash[i]->ticks;
}
}
@ -209,11 +209,10 @@ int clock_cpuload(int pid, FAR struct cpuload_s *cpuload)
* do this too, but this would require a little more overhead.
*/
if (nxsched_pidhash()[hash_index] &&
nxsched_pidhash()[hash_index]->pid == pid)
if (g_pidhash[hash_index] && g_pidhash[hash_index]->pid == pid)
{
cpuload->total = g_cpuload_total;
cpuload->active = nxsched_pidhash()[hash_index]->ticks;
cpuload->active = g_pidhash[hash_index]->ticks;
ret = OK;
}

View File

@ -64,13 +64,13 @@ void nxsched_foreach(nxsched_foreach_t handler, FAR void *arg)
flags = enter_critical_section();
sched_lock();
for (ndx = 0; ndx < nxsched_npidhash(); ndx++)
for (ndx = 0; ndx < g_npidhash; ndx++)
{
/* This test and the function call must be atomic */
if (nxsched_pidhash()[ndx])
if (g_pidhash[ndx])
{
handler(nxsched_pidhash()[ndx], arg);
handler(g_pidhash[ndx], arg);
}
}

View File

@ -62,7 +62,7 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid)
* whether the PID is within range.
*/
if (nxsched_pidhash() != NULL && pid >= 0)
if (g_pidhash != NULL && pid >= 0)
{
/* The test and the return setup should be atomic. This still does
* not provide proper protection if the recipient of the TCB does not
@ -76,12 +76,11 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid)
/* Verify that the correct TCB was found. */
if (nxsched_pidhash()[hash_ndx] != NULL &&
pid == nxsched_pidhash()[hash_ndx]->pid)
if (g_pidhash[hash_ndx] != NULL && pid == g_pidhash[hash_ndx]->pid)
{
/* Return the TCB associated with this pid (if any) */
ret = nxsched_pidhash()[hash_ndx];
ret = g_pidhash[hash_ndx];
}
}

View File

@ -62,7 +62,7 @@ bool sched_idletask(void)
* have been initialized and, in that case, rtcb may be NULL.
*/
DEBUGASSERT(rtcb != NULL || nxsched_get_initstate() < OSINIT_TASKLISTS);
DEBUGASSERT(rtcb != NULL || g_nx_initstate < OSINIT_TASKLISTS);
if (rtcb != NULL)
{
/* The IDLE task TCB is distinguishable by a few things:

View File

@ -57,7 +57,7 @@ static void nxsched_releasepid(pid_t pid)
* total for all threads.
*/
g_cpuload_total -= nxsched_pidhash()[hash_ndx]->ticks;
g_cpuload_total -= g_pidhash[hash_ndx]->ticks;
#endif
/* Make any pid associated with this hash available. Note:
@ -65,7 +65,7 @@ static void nxsched_releasepid(pid_t pid)
* following action is atomic
*/
nxsched_pidhash()[hash_ndx] = NULL;
g_pidhash[hash_ndx] = NULL;
leave_critical_section(flags);
}

View File

@ -72,7 +72,7 @@ bool nxsched_verify_tcb(FAR struct tcb_s *tcb)
bool valid;
flags = enter_critical_section();
valid = tcb == nxsched_pidhash()[PIDHASH(tcb->pid)];
valid = tcb == g_pidhash[PIDHASH(tcb->pid)];
leave_critical_section(flags);
return valid;

View File

@ -88,12 +88,12 @@ static int nxtask_assign_pid(FAR struct tcb_s *tcb)
int i;
/* NOTE:
* ERROR means that the nxsched_pidhash()[] table is completely full.
* ERROR means that the g_pidhash[] table is completely full.
* We cannot allow another task to be started.
*/
/* Protect the following operation with a critical section
* because nxsched_pidhash() is accessed from an interrupt context
* because g_pidhash is accessed from an interrupt context
*/
irqstate_t flags = enter_critical_section();
@ -104,8 +104,8 @@ retry:
/* Get the next process ID candidate */
next_pid = nxsched_lastpid() + 1;
for (i = 0; i < nxsched_npidhash(); i++)
next_pid = g_lastpid + 1;
for (i = 0; i < g_npidhash; i++)
{
/* Verify that the next_pid is in the valid range */
@ -120,13 +120,13 @@ retry:
/* Check if there is a (potential) duplicate of this pid */
if (!nxsched_pidhash()[hash_ndx])
if (!g_pidhash[hash_ndx])
{
/* Assign this PID to the task */
nxsched_pidhash()[hash_ndx] = tcb;
g_pidhash[hash_ndx] = tcb;
tcb->pid = next_pid;
nxsched_lastpid() = next_pid;
g_lastpid = next_pid;
leave_critical_section(flags);
return OK;
@ -135,35 +135,35 @@ retry:
next_pid++;
}
/* If we get here, then the nxsched_pidhash()[] table is completely full.
* We will alloc new space and copy original nxsched_pidhash() to it to
/* If we get here, then the g_pidhash[] table is completely full.
* We will alloc new space and copy original g_pidhash to it to
* expand space.
*/
pidhash = kmm_zalloc(nxsched_npidhash() * 2 * sizeof(*pidhash));
pidhash = kmm_zalloc(g_npidhash * 2 * sizeof(*pidhash));
if (pidhash == NULL)
{
leave_critical_section(flags);
return -ENOMEM;
}
nxsched_npidhash() *= 2;
g_npidhash *= 2;
/* All original pid and hash_ndx are mismatch,
* so we need to rebuild their relationship
*/
for (i = 0; i < nxsched_npidhash() / 2; i++)
for (i = 0; i < g_npidhash / 2; i++)
{
hash_ndx = PIDHASH(nxsched_pidhash()[i]->pid);
hash_ndx = PIDHASH(g_pidhash[i]->pid);
DEBUGASSERT(pidhash[hash_ndx] == NULL);
pidhash[hash_ndx] = nxsched_pidhash()[i];
pidhash[hash_ndx] = g_pidhash[i];
}
/* Release resource for original g_pidhash, using new g_pidhash */
temp = nxsched_pidhash();
nxsched_pidhash() = pidhash;
temp = g_pidhash;
g_pidhash = pidhash;
kmm_free(temp);
/* Let's try every allowable pid again */

View File

@ -148,7 +148,7 @@ int work_cancel(int qid, FAR struct work_s *work)
{
/* Cancel high priority work */
return work_qcancel((FAR struct kwork_wqueue_s *)&hpwork(),
return work_qcancel((FAR struct kwork_wqueue_s *)&g_hpwork,
-1, work);
}
else
@ -158,7 +158,7 @@ int work_cancel(int qid, FAR struct work_s *work)
{
/* Cancel low priority work */
return work_qcancel((FAR struct kwork_wqueue_s *)&lpwork(),
return work_qcancel((FAR struct kwork_wqueue_s *)&g_lpwork,
-1, work);
}
else
@ -196,7 +196,7 @@ int work_cancel_sync(int qid, FAR struct work_s *work)
{
/* Cancel high priority work */
return work_qcancel((FAR struct kwork_wqueue_s *)&hpwork(),
return work_qcancel((FAR struct kwork_wqueue_s *)&g_hpwork,
CONFIG_SCHED_HPNTHREADS, work);
}
else
@ -206,7 +206,7 @@ int work_cancel_sync(int qid, FAR struct work_s *work)
{
/* Cancel low priority work */
return work_qcancel((FAR struct kwork_wqueue_s *)&lpwork(),
return work_qcancel((FAR struct kwork_wqueue_s *)&g_lpwork,
CONFIG_SCHED_LPNTHREADS, work);
}
else

View File

@ -224,7 +224,7 @@ void lpwork_boostpriority(uint8_t reqprio)
for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++)
{
lpwork_boostworker(lpwork().worker[wndx].pid, reqprio);
lpwork_boostworker(g_lpwork.worker[wndx].pid, reqprio);
}
leave_critical_section(flags);
@ -268,7 +268,7 @@ void lpwork_restorepriority(uint8_t reqprio)
for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++)
{
lpwork_restoreworker(lpwork().worker[wndx].pid, reqprio);
lpwork_restoreworker(g_lpwork.worker[wndx].pid, reqprio);
}
leave_critical_section(flags);

View File

@ -67,7 +67,7 @@
static void hp_work_timer_expiry(wdparm_t arg)
{
irqstate_t flags = enter_critical_section();
queue_work(hpwork(), arg);
queue_work(g_hpwork, arg);
leave_critical_section(flags);
}
#endif
@ -80,7 +80,7 @@ static void hp_work_timer_expiry(wdparm_t arg)
static void lp_work_timer_expiry(wdparm_t arg)
{
irqstate_t flags = enter_critical_section();
queue_work(lpwork(), arg);
queue_work(g_lpwork, arg);
leave_critical_section(flags);
}
#endif
@ -152,7 +152,7 @@ int work_queue(int qid, FAR struct work_s *work, worker_t worker,
if (!delay)
{
queue_work(hpwork(), work);
queue_work(g_hpwork, work);
}
else
{
@ -169,7 +169,7 @@ int work_queue(int qid, FAR struct work_s *work, worker_t worker,
if (!delay)
{
queue_work(lpwork(), work);
queue_work(g_lpwork, work);
}
else
{

View File

@ -305,7 +305,7 @@ void work_foreach(int qid, work_foreach_t handler, FAR void *arg)
#ifdef CONFIG_SCHED_HPWORK
if (qid == HPWORK)
{
wqueue = (FAR struct kwork_wqueue_s *)&hpwork();
wqueue = (FAR struct kwork_wqueue_s *)&g_hpwork;
nthread = CONFIG_SCHED_HPNTHREADS;
}
else
@ -313,7 +313,7 @@ void work_foreach(int qid, work_foreach_t handler, FAR void *arg)
#ifdef CONFIG_SCHED_LPWORK
if (qid == LPWORK)
{
wqueue = (FAR struct kwork_wqueue_s *)&lpwork();
wqueue = (FAR struct kwork_wqueue_s *)&g_lpwork;
nthread = CONFIG_SCHED_LPNTHREADS;
}
else
@ -352,7 +352,7 @@ int work_start_highpri(void)
return work_thread_create(HPWORKNAME, CONFIG_SCHED_HPWORKPRIORITY,
CONFIG_SCHED_HPWORKSTACKSIZE,
CONFIG_SCHED_HPNTHREADS,
(FAR struct kwork_wqueue_s *)&hpwork());
(FAR struct kwork_wqueue_s *)&g_hpwork);
}
#endif /* CONFIG_SCHED_HPWORK */
@ -380,7 +380,7 @@ int work_start_lowpri(void)
return work_thread_create(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY,
CONFIG_SCHED_LPWORKSTACKSIZE,
CONFIG_SCHED_LPNTHREADS,
(FAR struct kwork_wqueue_s *)&lpwork());
(FAR struct kwork_wqueue_s *)&g_lpwork);
}
#endif /* CONFIG_SCHED_LPWORK */

View File

@ -105,16 +105,12 @@ struct lp_wqueue_s
#ifdef CONFIG_SCHED_HPWORK
/* The state of the kernel mode, high priority work queue. */
#define hpwork() g_hpwork
extern struct hp_wqueue_s g_hpwork;
#endif
#ifdef CONFIG_SCHED_LPWORK
/* The state of the kernel mode, low priority work queue(s). */
#define lpwork() g_lpwork
extern struct lp_wqueue_s g_lpwork;
#endif