sched: replace some global variables to macro
replace to macro will help to extend the scheduling implementation Signed-off-by: chao an <anchao@lixiang.com>
This commit is contained in:
parent
d90e1cb7b4
commit
4e62d0005a
@ -136,13 +136,13 @@ static void exec_swap(FAR struct tcb_s *ptcb, FAR struct tcb_s *chtcb)
|
||||
pndx = PIDHASH(ptcb->pid);
|
||||
chndx = PIDHASH(chtcb->pid);
|
||||
|
||||
DEBUGASSERT(g_pidhash[pndx]);
|
||||
DEBUGASSERT(g_pidhash[chndx]);
|
||||
DEBUGASSERT(nxsched_pidhash()[pndx]);
|
||||
DEBUGASSERT(nxsched_pidhash()[chndx]);
|
||||
|
||||
/* Exchange g_pidhash index */
|
||||
/* Exchange nxsched_pidhash() index */
|
||||
|
||||
g_pidhash[pndx] = chtcb;
|
||||
g_pidhash[chndx] = ptcb;
|
||||
nxsched_pidhash()[pndx] = chtcb;
|
||||
nxsched_pidhash()[chndx] = ptcb;
|
||||
|
||||
/* Exchange pid */
|
||||
|
||||
|
@ -187,9 +187,9 @@ static int elf_get_ntcb(void)
|
||||
int count = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
if (g_pidhash[i] != NULL)
|
||||
if (nxsched_pidhash()[i] != NULL)
|
||||
{
|
||||
count++;
|
||||
}
|
||||
@ -313,11 +313,11 @@ static void elf_emit_note(FAR struct elf_dumpinfo_s *cinfo)
|
||||
|
||||
if (cinfo->pid == INVALID_PROCESS_ID)
|
||||
{
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
if (g_pidhash[i] != NULL)
|
||||
if (nxsched_pidhash()[i] != NULL)
|
||||
{
|
||||
elf_emit_tcb_note(cinfo, g_pidhash[i]);
|
||||
elf_emit_tcb_note(cinfo, nxsched_pidhash()[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -395,11 +395,11 @@ static void elf_emit_stack(FAR struct elf_dumpinfo_s *cinfo)
|
||||
|
||||
if (cinfo->pid == INVALID_PROCESS_ID)
|
||||
{
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
if (g_pidhash[i] != NULL)
|
||||
if (nxsched_pidhash()[i] != NULL)
|
||||
{
|
||||
elf_emit_tcb_stack(cinfo, g_pidhash[i]);
|
||||
elf_emit_tcb_stack(cinfo, nxsched_pidhash()[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -520,11 +520,11 @@ static void elf_emit_phdr(FAR struct elf_dumpinfo_s *cinfo,
|
||||
|
||||
if (cinfo->pid == INVALID_PROCESS_ID)
|
||||
{
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
if (g_pidhash[i] != NULL)
|
||||
if (nxsched_pidhash()[i] != NULL)
|
||||
{
|
||||
elf_emit_tcb_phdr(cinfo, g_pidhash[i], &phdr, &offset);
|
||||
elf_emit_tcb_phdr(cinfo, nxsched_pidhash()[i], &phdr, &offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -164,11 +164,11 @@ static void note_sysview_send_tasklist(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
if (g_pidhash[i] != NULL)
|
||||
if (nxsched_pidhash()[i] != NULL)
|
||||
{
|
||||
note_sysview_send_taskinfo(g_pidhash[i]);
|
||||
note_sysview_send_taskinfo(nxsched_pidhash()[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,11 +38,13 @@
|
||||
* initialization.
|
||||
*/
|
||||
|
||||
#define OSINIT_MM_READY() (g_nx_initstate >= OSINIT_MEMORY)
|
||||
#define OSINIT_HW_READY() (g_nx_initstate >= OSINIT_HARDWARE)
|
||||
#define OSINIT_OS_READY() (g_nx_initstate >= OSINIT_OSREADY)
|
||||
#define OSINIT_IDLELOOP() (g_nx_initstate >= OSINIT_IDLELOOP)
|
||||
#define OSINIT_OS_INITIALIZING() (g_nx_initstate < OSINIT_OSREADY)
|
||||
#define nxsched_initstate() g_nx_initstate
|
||||
|
||||
#define OSINIT_MM_READY() (nxsched_initstate() >= OSINIT_MEMORY)
|
||||
#define OSINIT_HW_READY() (nxsched_initstate() >= OSINIT_HARDWARE)
|
||||
#define OSINIT_OS_READY() (nxsched_initstate() >= OSINIT_OSREADY)
|
||||
#define OSINIT_IDLELOOP() (nxsched_initstate() >= OSINIT_IDLELOOP)
|
||||
#define OSINIT_OS_INITIALIZING() (nxsched_initstate() < OSINIT_OSREADY)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
|
@ -322,7 +322,7 @@ void nx_start(void)
|
||||
|
||||
/* Boot up is complete */
|
||||
|
||||
g_nx_initstate = OSINIT_BOOT;
|
||||
nxsched_initstate() = OSINIT_BOOT;
|
||||
|
||||
/* Initialize RTOS Data ***************************************************/
|
||||
|
||||
@ -427,7 +427,7 @@ void nx_start(void)
|
||||
|
||||
/* Task lists are initialized */
|
||||
|
||||
g_nx_initstate = OSINIT_TASKLISTS;
|
||||
nxsched_initstate() = OSINIT_TASKLISTS;
|
||||
|
||||
/* Initialize RTOS facilities *********************************************/
|
||||
|
||||
@ -495,14 +495,15 @@ void nx_start(void)
|
||||
|
||||
/* Initialize the logic that determine unique process IDs. */
|
||||
|
||||
g_npidhash = 4;
|
||||
while (g_npidhash <= CONFIG_SMP_NCPUS)
|
||||
nxsched_npidhash() = 4;
|
||||
while (nxsched_npidhash() <= CONFIG_SMP_NCPUS)
|
||||
{
|
||||
g_npidhash <<= 1;
|
||||
nxsched_npidhash() <<= 1;
|
||||
}
|
||||
|
||||
g_pidhash = kmm_zalloc(sizeof(*g_pidhash) * g_npidhash);
|
||||
DEBUGASSERT(g_pidhash);
|
||||
nxsched_pidhash() =
|
||||
kmm_zalloc(sizeof(*nxsched_pidhash()) * nxsched_npidhash());
|
||||
DEBUGASSERT(nxsched_pidhash());
|
||||
|
||||
/* IDLE Group Initialization **********************************************/
|
||||
|
||||
@ -513,7 +514,7 @@ void nx_start(void)
|
||||
/* Assign the process ID(s) of ZERO to the idle task(s) */
|
||||
|
||||
hashndx = PIDHASH(i);
|
||||
g_pidhash[hashndx] = &g_idletcb[i].cmn;
|
||||
nxsched_pidhash()[hashndx] = &g_idletcb[i].cmn;
|
||||
|
||||
/* Allocate the IDLE group */
|
||||
|
||||
@ -553,11 +554,11 @@ void nx_start(void)
|
||||
GROUP_FLAG_PRIVILEGED;
|
||||
}
|
||||
|
||||
g_lastpid = CONFIG_SMP_NCPUS - 1;
|
||||
nxsched_lastpid() = CONFIG_SMP_NCPUS - 1;
|
||||
|
||||
/* The memory manager is available */
|
||||
|
||||
g_nx_initstate = OSINIT_MEMORY;
|
||||
nxsched_initstate() = OSINIT_MEMORY;
|
||||
|
||||
/* Initialize tasking data structures */
|
||||
|
||||
@ -643,7 +644,7 @@ void nx_start(void)
|
||||
|
||||
/* Hardware resources are now available */
|
||||
|
||||
g_nx_initstate = OSINIT_HARDWARE;
|
||||
nxsched_initstate() = OSINIT_HARDWARE;
|
||||
|
||||
/* Setup for Multi-Tasking ************************************************/
|
||||
|
||||
@ -689,7 +690,7 @@ void nx_start(void)
|
||||
|
||||
/* The OS is fully initialized and we are beginning multi-tasking */
|
||||
|
||||
g_nx_initstate = OSINIT_OSREADY;
|
||||
nxsched_initstate() = OSINIT_OSREADY;
|
||||
|
||||
/* Create initial tasks and bring-up the system */
|
||||
|
||||
@ -697,7 +698,7 @@ void nx_start(void)
|
||||
|
||||
/* Enter to idleloop */
|
||||
|
||||
g_nx_initstate = OSINIT_IDLELOOP;
|
||||
nxsched_initstate() = OSINIT_IDLELOOP;
|
||||
|
||||
/* Let other threads have access to the memory manager */
|
||||
|
||||
|
@ -192,7 +192,7 @@ try_again:
|
||||
* lists are valid.
|
||||
*/
|
||||
|
||||
if (g_nx_initstate >= OSINIT_TASKLISTS)
|
||||
if (nxsched_initstate() >= OSINIT_TASKLISTS)
|
||||
{
|
||||
/* If called from an interrupt handler, then just take the spinlock.
|
||||
* If we are already in a critical section, this will lock the CPU
|
||||
@ -416,7 +416,7 @@ irqstate_t enter_critical_section(void)
|
||||
* lists have been initialized.
|
||||
*/
|
||||
|
||||
if (!up_interrupt_context() && g_nx_initstate >= OSINIT_TASKLISTS)
|
||||
if (!up_interrupt_context() && nxsched_initstate() >= OSINIT_TASKLISTS)
|
||||
{
|
||||
FAR struct tcb_s *rtcb = this_task();
|
||||
DEBUGASSERT(rtcb != NULL);
|
||||
@ -463,7 +463,7 @@ void leave_critical_section(irqstate_t flags)
|
||||
* lists are valid.
|
||||
*/
|
||||
|
||||
if (g_nx_initstate >= OSINIT_TASKLISTS)
|
||||
if (nxsched_initstate() >= OSINIT_TASKLISTS)
|
||||
{
|
||||
/* If called from an interrupt handler, then just release the
|
||||
* spinlock. The interrupt handling logic should already hold the
|
||||
@ -608,7 +608,7 @@ void leave_critical_section(irqstate_t flags)
|
||||
* lists have been initialized.
|
||||
*/
|
||||
|
||||
if (!up_interrupt_context() && g_nx_initstate >= OSINIT_TASKLISTS)
|
||||
if (!up_interrupt_context() && nxsched_initstate() >= OSINIT_TASKLISTS)
|
||||
{
|
||||
FAR struct tcb_s *rtcb = this_task();
|
||||
DEBUGASSERT(rtcb != NULL);
|
||||
@ -667,7 +667,7 @@ bool irq_cpu_locked(int cpu)
|
||||
|
||||
/* g_cpu_irqset is not valid in early phases of initialization */
|
||||
|
||||
if (g_nx_initstate < OSINIT_OSREADY)
|
||||
if (nxsched_initstate() < OSINIT_OSREADY)
|
||||
{
|
||||
/* We are still single threaded. In either state of g_cpu_irqlock,
|
||||
* the correct return value should always be false.
|
||||
|
@ -119,6 +119,10 @@
|
||||
# define CRITMONITOR_PANIC(fmt, ...) _alert(fmt, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#define nxsched_pidhash() g_pidhash
|
||||
#define nxsched_npidhash() g_npidhash
|
||||
#define nxsched_lastpid() g_lastpid
|
||||
|
||||
/****************************************************************************
|
||||
* Public Type Definitions
|
||||
****************************************************************************/
|
||||
|
@ -111,12 +111,12 @@ void nxsched_process_taskload_ticks(FAR struct tcb_s *tcb, clock_t ticks)
|
||||
* total.
|
||||
*/
|
||||
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
if (g_pidhash[i])
|
||||
if (nxsched_pidhash()[i])
|
||||
{
|
||||
g_pidhash[i]->ticks >>= 1;
|
||||
total += g_pidhash[i]->ticks;
|
||||
nxsched_pidhash()[i]->ticks >>= 1;
|
||||
total += nxsched_pidhash()[i]->ticks;
|
||||
}
|
||||
}
|
||||
|
||||
@ -209,10 +209,11 @@ int clock_cpuload(int pid, FAR struct cpuload_s *cpuload)
|
||||
* do this too, but this would require a little more overhead.
|
||||
*/
|
||||
|
||||
if (g_pidhash[hash_index] && g_pidhash[hash_index]->pid == pid)
|
||||
if (nxsched_pidhash()[hash_index] &&
|
||||
nxsched_pidhash()[hash_index]->pid == pid)
|
||||
{
|
||||
cpuload->total = g_cpuload_total;
|
||||
cpuload->active = g_pidhash[hash_index]->ticks;
|
||||
cpuload->active = nxsched_pidhash()[hash_index]->ticks;
|
||||
ret = OK;
|
||||
}
|
||||
|
||||
|
@ -64,13 +64,13 @@ void nxsched_foreach(nxsched_foreach_t handler, FAR void *arg)
|
||||
|
||||
flags = enter_critical_section();
|
||||
sched_lock();
|
||||
for (ndx = 0; ndx < g_npidhash; ndx++)
|
||||
for (ndx = 0; ndx < nxsched_npidhash(); ndx++)
|
||||
{
|
||||
/* This test and the function call must be atomic */
|
||||
|
||||
if (g_pidhash[ndx])
|
||||
if (nxsched_pidhash()[ndx])
|
||||
{
|
||||
handler(g_pidhash[ndx], arg);
|
||||
handler(nxsched_pidhash()[ndx], arg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid)
|
||||
* whether the PID is within range.
|
||||
*/
|
||||
|
||||
if (g_pidhash != NULL && pid >= 0)
|
||||
if (nxsched_pidhash() != NULL && pid >= 0)
|
||||
{
|
||||
/* The test and the return setup should be atomic. This still does
|
||||
* not provide proper protection if the recipient of the TCB does not
|
||||
@ -76,11 +76,12 @@ FAR struct tcb_s *nxsched_get_tcb(pid_t pid)
|
||||
|
||||
/* Verify that the correct TCB was found. */
|
||||
|
||||
if (g_pidhash[hash_ndx] != NULL && pid == g_pidhash[hash_ndx]->pid)
|
||||
if (nxsched_pidhash()[hash_ndx] != NULL &&
|
||||
pid == nxsched_pidhash()[hash_ndx]->pid)
|
||||
{
|
||||
/* Return the TCB associated with this pid (if any) */
|
||||
|
||||
ret = g_pidhash[hash_ndx];
|
||||
ret = nxsched_pidhash()[hash_ndx];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ bool sched_idletask(void)
|
||||
* have been initialized and, in that case, rtcb may be NULL.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(rtcb != NULL || g_nx_initstate < OSINIT_TASKLISTS);
|
||||
DEBUGASSERT(rtcb != NULL || nxsched_initstate() < OSINIT_TASKLISTS);
|
||||
if (rtcb != NULL)
|
||||
{
|
||||
/* The IDLE task TCB is distinguishable by a few things:
|
||||
|
@ -57,7 +57,7 @@ static void nxsched_releasepid(pid_t pid)
|
||||
* total for all threads.
|
||||
*/
|
||||
|
||||
g_cpuload_total -= g_pidhash[hash_ndx]->ticks;
|
||||
g_cpuload_total -= nxsched_pidhash()[hash_ndx]->ticks;
|
||||
#endif
|
||||
|
||||
/* Make any pid associated with this hash available. Note:
|
||||
@ -65,7 +65,7 @@ static void nxsched_releasepid(pid_t pid)
|
||||
* following action is atomic
|
||||
*/
|
||||
|
||||
g_pidhash[hash_ndx] = NULL;
|
||||
nxsched_pidhash()[hash_ndx] = NULL;
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ bool nxsched_verify_tcb(FAR struct tcb_s *tcb)
|
||||
bool valid;
|
||||
|
||||
flags = enter_critical_section();
|
||||
valid = tcb == g_pidhash[PIDHASH(tcb->pid)];
|
||||
valid = tcb == nxsched_pidhash()[PIDHASH(tcb->pid)];
|
||||
leave_critical_section(flags);
|
||||
|
||||
return valid;
|
||||
|
@ -88,12 +88,12 @@ static int nxtask_assign_pid(FAR struct tcb_s *tcb)
|
||||
int i;
|
||||
|
||||
/* NOTE:
|
||||
* ERROR means that the g_pidhash[] table is completely full.
|
||||
* ERROR means that the nxsched_pidhash()[] table is completely full.
|
||||
* We cannot allow another task to be started.
|
||||
*/
|
||||
|
||||
/* Protect the following operation with a critical section
|
||||
* because g_pidhash is accessed from an interrupt context
|
||||
* because nxsched_pidhash() is accessed from an interrupt context
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
@ -104,8 +104,8 @@ retry:
|
||||
|
||||
/* Get the next process ID candidate */
|
||||
|
||||
next_pid = g_lastpid + 1;
|
||||
for (i = 0; i < g_npidhash; i++)
|
||||
next_pid = nxsched_lastpid() + 1;
|
||||
for (i = 0; i < nxsched_npidhash(); i++)
|
||||
{
|
||||
/* Verify that the next_pid is in the valid range */
|
||||
|
||||
@ -120,13 +120,13 @@ retry:
|
||||
|
||||
/* Check if there is a (potential) duplicate of this pid */
|
||||
|
||||
if (!g_pidhash[hash_ndx])
|
||||
if (!nxsched_pidhash()[hash_ndx])
|
||||
{
|
||||
/* Assign this PID to the task */
|
||||
|
||||
g_pidhash[hash_ndx] = tcb;
|
||||
nxsched_pidhash()[hash_ndx] = tcb;
|
||||
tcb->pid = next_pid;
|
||||
g_lastpid = next_pid;
|
||||
nxsched_lastpid() = next_pid;
|
||||
|
||||
leave_critical_section(flags);
|
||||
return OK;
|
||||
@ -135,35 +135,35 @@ retry:
|
||||
next_pid++;
|
||||
}
|
||||
|
||||
/* If we get here, then the g_pidhash[] table is completely full.
|
||||
* We will alloc new space and copy original g_pidhash to it to
|
||||
/* If we get here, then the nxsched_pidhash()[] table is completely full.
|
||||
* We will alloc new space and copy original nxsched_pidhash() to it to
|
||||
* expand space.
|
||||
*/
|
||||
|
||||
pidhash = kmm_zalloc(g_npidhash * 2 * sizeof(*pidhash));
|
||||
pidhash = kmm_zalloc(nxsched_npidhash() * 2 * sizeof(*pidhash));
|
||||
if (pidhash == NULL)
|
||||
{
|
||||
leave_critical_section(flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
g_npidhash *= 2;
|
||||
nxsched_npidhash() *= 2;
|
||||
|
||||
/* All original pid and hash_ndx are mismatch,
|
||||
* so we need to rebuild their relationship
|
||||
*/
|
||||
|
||||
for (i = 0; i < g_npidhash / 2; i++)
|
||||
for (i = 0; i < nxsched_npidhash() / 2; i++)
|
||||
{
|
||||
hash_ndx = PIDHASH(g_pidhash[i]->pid);
|
||||
hash_ndx = PIDHASH(nxsched_pidhash()[i]->pid);
|
||||
DEBUGASSERT(pidhash[hash_ndx] == NULL);
|
||||
pidhash[hash_ndx] = g_pidhash[i];
|
||||
pidhash[hash_ndx] = nxsched_pidhash()[i];
|
||||
}
|
||||
|
||||
/* Release resource for original g_pidhash, using new g_pidhash */
|
||||
|
||||
temp = g_pidhash;
|
||||
g_pidhash = pidhash;
|
||||
temp = nxsched_pidhash();
|
||||
nxsched_pidhash() = pidhash;
|
||||
kmm_free(temp);
|
||||
|
||||
/* Let's try every allowable pid again */
|
||||
|
Loading…
Reference in New Issue
Block a user