From 306c1c0b7dc218a1d39f65ef74ee01b8313b2fcc Mon Sep 17 00:00:00 2001 From: chao an Date: Tue, 19 Mar 2024 12:33:08 +0800 Subject: [PATCH] sched/tasklist: replace task status list with macro definition replace to macro will help to extend the scheduling implementation Signed-off-by: chao an --- sched/irq/irq_csection.c | 2 +- sched/paging/pg_miss.c | 2 +- sched/paging/pg_worker.c | 5 +++-- sched/pthread/pthread_create.c | 2 +- sched/sched/sched.h | 20 ++++++++++++++++++-- sched/sched/sched_addreadytorun.c | 14 +++++++------- sched/sched/sched_cpuselect.c | 2 +- sched/sched/sched_lock.c | 4 ++-- sched/sched/sched_mergepending.c | 23 ++++++++++++----------- sched/sched/sched_removereadytorun.c | 8 ++++---- sched/sched/sched_reprioritizertr.c | 2 +- sched/sched/sched_setpriority.c | 4 ++-- sched/sched/sched_suspend.c | 4 ++-- sched/sched/sched_unlock.c | 4 ++-- sched/signal/sig_dispatch.c | 6 +++--- sched/signal/sig_suspend.c | 2 +- sched/signal/sig_timedwait.c | 8 ++++---- sched/task/task_fork.c | 2 +- sched/task/task_init.c | 2 +- sched/task/task_restart.c | 2 +- sched/task/task_setup.c | 2 +- 21 files changed, 69 insertions(+), 51 deletions(-) diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 0c804c7460..b668428611 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -563,7 +563,7 @@ void leave_critical_section(irqstate_t flags) * section then. */ - if (g_pendingtasks.head != NULL && + if (list_pendingtasks()->head != NULL && !nxsched_islocked_global()) { /* Release any ready-to-run tasks that have collected diff --git a/sched/paging/pg_miss.c b/sched/paging/pg_miss.c index 707b9f3b4d..8b10245bc9 100644 --- a/sched/paging/pg_miss.c +++ b/sched/paging/pg_miss.c @@ -143,7 +143,7 @@ void pg_miss(void) /* Add the task to the specified blocked task list */ ftcb->task_state = TSTATE_WAIT_PAGEFILL; - nxsched_add_prioritized(ftcb, &g_waitingforfill); + nxsched_add_prioritized(ftcb, list_waitingforfill()); /* Now, perform the context switch if one is needed */ diff --git a/sched/paging/pg_worker.c b/sched/paging/pg_worker.c index 9e6f4824e9..b58da4848d 100644 --- a/sched/paging/pg_worker.c +++ b/sched/paging/pg_worker.c @@ -136,7 +136,8 @@ static void pg_callback(FAR struct tcb_s *tcb, int result) pginfo("g_pftcb: %p\n", g_pftcb); if (g_pftcb) { - FAR struct tcb_s *htcb = (FAR struct tcb_s *)g_waitingforfill.head; + FAR struct tcb_s *htcb = (FAR struct tcb_s *) + list_waitingforfill()->head; FAR struct tcb_s *wtcb = nxsched_get_tcb(g_pgworker); /* Find the higher priority between the task waiting for the fill to @@ -225,7 +226,7 @@ static inline bool pg_dequeue(void) { /* Remove the TCB from the head of the list (if any) */ - g_pftcb = (FAR struct tcb_s *)dq_remfirst(&g_waitingforfill); + g_pftcb = (FAR struct tcb_s *)dq_remfirst(list_waitingforfill()); pginfo("g_pftcb: %p\n", g_pftcb); if (g_pftcb != NULL) { diff --git a/sched/pthread/pthread_create.c b/sched/pthread/pthread_create.c index f3b37ff7b1..1ba6c1b3ba 100644 --- a/sched/pthread/pthread_create.c +++ b/sched/pthread/pthread_create.c @@ -479,7 +479,7 @@ int nx_pthread_create(pthread_trampoline_t trampoline, FAR pthread_t *thread, else { sched_unlock(); - dq_rem((FAR dq_entry_t *)ptcb, &g_inactivetasks); + dq_rem((FAR dq_entry_t *)ptcb, list_inactivetasks()); errcode = EIO; goto errout_with_tcb; diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 37aed2d4c7..a7159f986e 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -42,16 +42,32 @@ #define PIDHASH(pid) ((pid) & (g_npidhash - 1)) +/* The state of a task is indicated both by the task_state field of the TCB + * and by a series of task lists. All of these tasks lists are declared + * below. Although it is not always necessary, most of these lists are + * prioritized so that common list handling logic can be used (only the + * g_readytorun, the g_pendingtasks, and the g_waitingforsemaphore lists + * need to be prioritized). + */ + +#define list_readytorun() (&g_readytorun) +#define list_pendingtasks() (&g_pendingtasks) +#define list_waitingforsignal() (&g_waitingforsignal) +#define list_waitingforfill() (&g_waitingforfill) +#define list_stoppedtasks() (&g_stoppedtasks) +#define list_inactivetasks() (&g_inactivetasks) +#define list_assignedtasks(cpu) (&g_assignedtasks[cpu]) + /* These are macros to access the current CPU and the current task on a CPU. * These macros are intended to support a future SMP implementation. * NOTE: this_task() for SMP is implemented in sched_thistask.c */ #ifdef CONFIG_SMP -# define current_task(cpu) ((FAR struct tcb_s *)g_assignedtasks[cpu].head) +# define current_task(cpu) ((FAR struct tcb_s *)list_assignedtasks(cpu)->head) # define this_cpu() up_cpu_index() #else -# define current_task(cpu) ((FAR struct tcb_s *)g_readytorun.head) +# define current_task(cpu) ((FAR struct tcb_s *)list_readytorun()->head) # define this_cpu() (0) # define this_task() (current_task(this_cpu())) #endif diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index a4434423e0..46c0ef9d4d 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -82,14 +82,14 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * g_pendingtasks task list for now. */ - nxsched_add_prioritized(btcb, &g_pendingtasks); + nxsched_add_prioritized(btcb, list_pendingtasks()); btcb->task_state = TSTATE_TASK_PENDING; ret = false; } /* Otherwise, add the new task to the ready-to-run task list */ - else if (nxsched_add_prioritized(btcb, &g_readytorun)) + else if (nxsched_add_prioritized(btcb, list_readytorun())) { /* The new btcb was added at the head of the ready-to-run list. It * is now the new active task! @@ -230,7 +230,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * now. */ - nxsched_add_prioritized(btcb, &g_pendingtasks); + nxsched_add_prioritized(btcb, list_pendingtasks()); btcb->task_state = TSTATE_TASK_PENDING; doswitch = false; } @@ -244,7 +244,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * Add the task to the ready-to-run (but not running) task list */ - nxsched_add_prioritized(btcb, &g_readytorun); + nxsched_add_prioritized(btcb, list_readytorun()); btcb->task_state = TSTATE_TASK_READYTORUN; doswitch = false; @@ -264,7 +264,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * and check if a context switch will occur */ - tasklist = &g_assignedtasks[cpu]; + tasklist = list_assignedtasks(cpu); switched = nxsched_add_prioritized(btcb, tasklist); /* If the selected task list was the g_assignedtasks[] list and if the @@ -338,12 +338,12 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) if (nxsched_islocked_global()) { next->task_state = TSTATE_TASK_PENDING; - tasklist = &g_pendingtasks; + tasklist = list_pendingtasks(); } else { next->task_state = TSTATE_TASK_READYTORUN; - tasklist = &g_readytorun; + tasklist = list_readytorun(); } nxsched_add_prioritized(next, tasklist); diff --git a/sched/sched/sched_cpuselect.c b/sched/sched/sched_cpuselect.c index a225afaa15..1d49750cd3 100644 --- a/sched/sched/sched_cpuselect.c +++ b/sched/sched/sched_cpuselect.c @@ -77,7 +77,7 @@ int nxsched_select_cpu(cpu_set_t affinity) if ((affinity & (1 << i)) != 0) { FAR struct tcb_s *rtcb = (FAR struct tcb_s *) - g_assignedtasks[i].head; + list_assignedtasks(i)->head; /* If this CPU is executing its IDLE task, then use it. The * IDLE task is always the last task in the assigned task list. diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index 7d9dc49755..16cf8d46a8 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -213,8 +213,8 @@ int sched_lock(void) * unlocked and nxsched_merge_pending() is called. */ - nxsched_merge_prioritized(&g_readytorun, - &g_pendingtasks, + nxsched_merge_prioritized(list_readytorun(), + list_pendingtasks(), TSTATE_TASK_PENDING); leave_critical_section(flags); diff --git a/sched/sched/sched_mergepending.c b/sched/sched/sched_mergepending.c index 9915eb3abd..e86969a73b 100644 --- a/sched/sched/sched_mergepending.c +++ b/sched/sched/sched_mergepending.c @@ -89,7 +89,7 @@ bool nxsched_merge_pending(void) if (rtcb->lockcount == 0) { - for (ptcb = (FAR struct tcb_s *)g_pendingtasks.head; + for (ptcb = (FAR struct tcb_s *)list_pendingtasks()->head; ptcb; ptcb = pnext) { @@ -128,7 +128,8 @@ bool nxsched_merge_pending(void) ptcb->flink = rtcb; ptcb->blink = NULL; rtcb->blink = ptcb; - g_readytorun.head = (FAR dq_entry_t *)ptcb; + list_readytorun()->head + = (FAR dq_entry_t *)ptcb; rtcb->task_state = TSTATE_TASK_READYTORUN; ptcb->task_state = TSTATE_TASK_RUNNING; ret = true; @@ -151,8 +152,8 @@ bool nxsched_merge_pending(void) /* Mark the input list empty */ - g_pendingtasks.head = NULL; - g_pendingtasks.tail = NULL; + list_pendingtasks()->head = NULL; + list_pendingtasks()->tail = NULL; } return ret; @@ -202,7 +203,7 @@ bool nxsched_merge_pending(void) { /* Find the CPU that is executing the lowest priority task */ - ptcb = (FAR struct tcb_s *)dq_peek(&g_pendingtasks); + ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks()); if (ptcb == NULL) { /* The pending task list is empty */ @@ -226,7 +227,7 @@ bool nxsched_merge_pending(void) { /* Remove the task from the pending task list */ - tcb = (FAR struct tcb_s *)dq_remfirst(&g_pendingtasks); + tcb = (FAR struct tcb_s *)dq_remfirst(list_pendingtasks()); /* Add the pending task to the correct ready-to-run list. */ @@ -243,8 +244,8 @@ bool nxsched_merge_pending(void) * move them back to the pending task list. */ - nxsched_merge_prioritized(&g_readytorun, - &g_pendingtasks, + nxsched_merge_prioritized(list_readytorun(), + list_pendingtasks(), TSTATE_TASK_PENDING); /* And return with the scheduler locked and tasks in the @@ -256,7 +257,7 @@ bool nxsched_merge_pending(void) /* Set up for the next time through the loop */ - ptcb = (FAR struct tcb_s *)dq_peek(&g_pendingtasks); + ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks()); if (ptcb == NULL) { /* The pending task list is empty */ @@ -272,8 +273,8 @@ bool nxsched_merge_pending(void) * tasks in the pending task list to the ready-to-run task list. */ - nxsched_merge_prioritized(&g_pendingtasks, - &g_readytorun, + nxsched_merge_prioritized(list_pendingtasks(), + list_readytorun(), TSTATE_TASK_READYTORUN); } diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index ba954c4d5f..4d9050b36a 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -97,7 +97,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) rtcb->task_state = TSTATE_TASK_INVALID; - if (g_pendingtasks.head && merge) + if (list_pendingtasks()->head && merge) { doswitch |= nxsched_merge_pending(); } @@ -200,7 +200,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) * CPU. */ - for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head; + for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head; rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity); rtrtcb = rtrtcb->flink); @@ -218,7 +218,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) * list and add to the head of the g_assignedtasks[cpu] list. */ - dq_rem((FAR dq_entry_t *)rtrtcb, &g_readytorun); + dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun()); dq_addfirst((FAR dq_entry_t *)rtrtcb, tasklist); rtrtcb->cpu = cpu; @@ -279,7 +279,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) rtcb->task_state = TSTATE_TASK_INVALID; - if (g_pendingtasks.head && merge) + if (list_pendingtasks()->head && merge) { doswitch |= nxsched_merge_pending(); } diff --git a/sched/sched/sched_reprioritizertr.c b/sched/sched/sched_reprioritizertr.c index a94cf0e298..8b4c6b98c2 100644 --- a/sched/sched/sched_reprioritizertr.c +++ b/sched/sched/sched_reprioritizertr.c @@ -82,7 +82,7 @@ bool nxsched_reprioritize_rtr(FAR struct tcb_s *tcb, int priority) * time to add any pending tasks back into the ready-to-run list. */ - if (switch_needed && g_pendingtasks.head) + if (switch_needed && list_pendingtasks()->head) { nxsched_merge_pending(); } diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c index 711ecb61f7..a93f92978b 100644 --- a/sched/sched/sched_setpriority.c +++ b/sched/sched/sched_setpriority.c @@ -72,7 +72,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb) { /* Search for the highest priority task that can run on tcb->cpu. */ - for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head; + for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head; rtrtcb != NULL && !CPU_ISSET(tcb->cpu, &rtrtcb->affinity); rtrtcb = rtrtcb->flink); @@ -154,7 +154,7 @@ static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb, DEBUGASSERT(check == false); UNUSED(check); - nxsched_add_prioritized(nxttcb, &g_pendingtasks); + nxsched_add_prioritized(nxttcb, list_pendingtasks()); nxttcb->task_state = TSTATE_TASK_PENDING; #ifdef CONFIG_SMP diff --git a/sched/sched/sched_suspend.c b/sched/sched/sched_suspend.c index 53bd8f294a..8d4be93eaa 100644 --- a/sched/sched/sched_suspend.c +++ b/sched/sched/sched_suspend.c @@ -75,7 +75,7 @@ void nxsched_suspend(FAR struct tcb_s *tcb) /* Move the TCB to the g_stoppedtasks list. */ tcb->task_state = TSTATE_TASK_STOPPED; - dq_addlast((FAR dq_entry_t *)tcb, &g_stoppedtasks); + dq_addlast((FAR dq_entry_t *)tcb, list_stoppedtasks()); } else { @@ -97,7 +97,7 @@ void nxsched_suspend(FAR struct tcb_s *tcb) /* Add the task to the specified blocked task list */ tcb->task_state = TSTATE_TASK_STOPPED; - dq_addlast((FAR dq_entry_t *)tcb, &g_stoppedtasks); + dq_addlast((FAR dq_entry_t *)tcb, list_stoppedtasks()); /* Now, perform the context switch if one is needed */ diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index eec27fd4a2..fdbb291a82 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -138,7 +138,7 @@ int sched_unlock(void) */ if (!nxsched_islocked_global() && !irq_cpu_locked(cpu) && - g_pendingtasks.head != NULL) + list_pendingtasks()->head != NULL) { if (nxsched_merge_pending()) { @@ -272,7 +272,7 @@ int sched_unlock(void) * fully independently. */ - if (g_pendingtasks.head != NULL) + if (list_pendingtasks()->head != NULL) { if (nxsched_merge_pending()) { diff --git a/sched/signal/sig_dispatch.c b/sched/signal/sig_dispatch.c index 7f2785acf5..a657a92a92 100644 --- a/sched/signal/sig_dispatch.c +++ b/sched/signal/sig_dispatch.c @@ -408,7 +408,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info) /* Remove the task from waitting list */ - dq_rem((FAR dq_entry_t *)stcb, &g_waitingforsignal); + dq_rem((FAR dq_entry_t *)stcb, list_waitingforsignal()); /* Add the task to ready-to-run task list and * perform the context switch if one is needed @@ -471,7 +471,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info) /* Remove the task from waitting list */ - dq_rem((FAR dq_entry_t *)stcb, &g_waitingforsignal); + dq_rem((FAR dq_entry_t *)stcb, list_waitingforsignal()); /* Add the task to ready-to-run task list and * perform the context switch if one is needed @@ -535,7 +535,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info) #else /* Remove the task from waitting list */ - dq_rem((FAR dq_entry_t *)stcb, &g_stoppedtasks); + dq_rem((FAR dq_entry_t *)stcb, list_stoppedtasks()); /* Add the task to ready-to-run task list and * perform the context switch if one is needed diff --git a/sched/signal/sig_suspend.c b/sched/signal/sig_suspend.c index b4e4667fda..e6dbc203dd 100644 --- a/sched/signal/sig_suspend.c +++ b/sched/signal/sig_suspend.c @@ -132,7 +132,7 @@ int sigsuspend(FAR const sigset_t *set) /* Add the task to the specified blocked task list */ rtcb->task_state = TSTATE_WAIT_SIG; - dq_addlast((FAR dq_entry_t *)rtcb, &g_waitingforsignal); + dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal()); /* Now, perform the context switch if one is needed */ diff --git a/sched/signal/sig_timedwait.c b/sched/signal/sig_timedwait.c index d14daefe9f..9bbef85438 100644 --- a/sched/signal/sig_timedwait.c +++ b/sched/signal/sig_timedwait.c @@ -110,7 +110,7 @@ static void nxsig_timeout(wdparm_t arg) /* Remove the task from waitting list */ - dq_rem((FAR dq_entry_t *)wtcb, &g_waitingforsignal); + dq_rem((FAR dq_entry_t *)wtcb, list_waitingforsignal()); /* Add the task to ready-to-run task list, and * perform the context switch if one is needed @@ -177,7 +177,7 @@ void nxsig_wait_irq(FAR struct tcb_s *wtcb, int errcode) /* Remove the task from waitting list */ - dq_rem((FAR dq_entry_t *)wtcb, &g_waitingforsignal); + dq_rem((FAR dq_entry_t *)wtcb, list_waitingforsignal()); /* Add the task to ready-to-run task list, and * perform the context switch if one is needed @@ -361,7 +361,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info, /* Add the task to the specified blocked task list */ rtcb->task_state = TSTATE_WAIT_SIG; - dq_addlast((FAR dq_entry_t *)rtcb, &g_waitingforsignal); + dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal()); /* Now, perform the context switch if one is needed */ @@ -403,7 +403,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info, /* Add the task to the specified blocked task list */ rtcb->task_state = TSTATE_WAIT_SIG; - dq_addlast((FAR dq_entry_t *)rtcb, &g_waitingforsignal); + dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal()); /* Now, perform the context switch if one is needed */ diff --git a/sched/task/task_fork.c b/sched/task/task_fork.c index f9d5d4a5f2..66854106c1 100644 --- a/sched/task/task_fork.c +++ b/sched/task/task_fork.c @@ -318,7 +318,7 @@ void nxtask_abort_fork(FAR struct task_tcb_s *child, int errcode) { /* The TCB was added to the active task list by nxtask_setup_scheduler() */ - dq_rem((FAR dq_entry_t *)child, &g_inactivetasks); + dq_rem((FAR dq_entry_t *)child, list_inactivetasks()); /* Release the TCB */ diff --git a/sched/task/task_init.c b/sched/task/task_init.c index 8f3ed60d85..14f8f64f7c 100644 --- a/sched/task/task_init.c +++ b/sched/task/task_init.c @@ -243,7 +243,7 @@ void nxtask_uninit(FAR struct task_tcb_s *tcb) * nxtask_setup_scheduler(). */ - dq_rem((FAR dq_entry_t *)tcb, &g_inactivetasks); + dq_rem((FAR dq_entry_t *)tcb, list_inactivetasks()); /* Release all resources associated with the TCB... Including the TCB * itself. diff --git a/sched/task/task_restart.c b/sched/task/task_restart.c index cb722f44fe..a32d77ca52 100644 --- a/sched/task/task_restart.c +++ b/sched/task/task_restart.c @@ -177,7 +177,7 @@ static int nxtask_restart(pid_t pid) /* Add the task to the inactive task list */ - dq_addfirst((FAR dq_entry_t *)tcb, &g_inactivetasks); + dq_addfirst((FAR dq_entry_t *)tcb, list_inactivetasks()); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; #ifdef CONFIG_SMP diff --git a/sched/task/task_setup.c b/sched/task/task_setup.c index eeb51fbc2e..d74200590e 100644 --- a/sched/task/task_setup.c +++ b/sched/task/task_setup.c @@ -452,7 +452,7 @@ static int nxthread_setup_scheduler(FAR struct tcb_s *tcb, int priority, /* Add the task to the inactive task list */ sched_lock(); - dq_addfirst((FAR dq_entry_t *)tcb, &g_inactivetasks); + dq_addfirst((FAR dq_entry_t *)tcb, list_inactivetasks()); tcb->task_state = TSTATE_TASK_INACTIVE; sched_unlock(); }