sched/tasklist: replace task status list with macro definition

replace to macro will help to extend the scheduling implementation

Signed-off-by: chao an <anchao@lixiang.com>
This commit is contained in:
chao an 2024-03-19 12:33:08 +08:00 committed by Masayuki Ishikawa
parent ad0efd04ee
commit 306c1c0b7d
21 changed files with 69 additions and 51 deletions

View File

@ -563,7 +563,7 @@ void leave_critical_section(irqstate_t flags)
* section then.
*/
if (g_pendingtasks.head != NULL &&
if (list_pendingtasks()->head != NULL &&
!nxsched_islocked_global())
{
/* Release any ready-to-run tasks that have collected

View File

@ -143,7 +143,7 @@ void pg_miss(void)
/* Add the task to the specified blocked task list */
ftcb->task_state = TSTATE_WAIT_PAGEFILL;
nxsched_add_prioritized(ftcb, &g_waitingforfill);
nxsched_add_prioritized(ftcb, list_waitingforfill());
/* Now, perform the context switch if one is needed */

View File

@ -136,7 +136,8 @@ static void pg_callback(FAR struct tcb_s *tcb, int result)
pginfo("g_pftcb: %p\n", g_pftcb);
if (g_pftcb)
{
FAR struct tcb_s *htcb = (FAR struct tcb_s *)g_waitingforfill.head;
FAR struct tcb_s *htcb = (FAR struct tcb_s *)
list_waitingforfill()->head;
FAR struct tcb_s *wtcb = nxsched_get_tcb(g_pgworker);
/* Find the higher priority between the task waiting for the fill to
@ -225,7 +226,7 @@ static inline bool pg_dequeue(void)
{
/* Remove the TCB from the head of the list (if any) */
g_pftcb = (FAR struct tcb_s *)dq_remfirst(&g_waitingforfill);
g_pftcb = (FAR struct tcb_s *)dq_remfirst(list_waitingforfill());
pginfo("g_pftcb: %p\n", g_pftcb);
if (g_pftcb != NULL)
{

View File

@ -479,7 +479,7 @@ int nx_pthread_create(pthread_trampoline_t trampoline, FAR pthread_t *thread,
else
{
sched_unlock();
dq_rem((FAR dq_entry_t *)ptcb, &g_inactivetasks);
dq_rem((FAR dq_entry_t *)ptcb, list_inactivetasks());
errcode = EIO;
goto errout_with_tcb;

View File

@ -42,16 +42,32 @@
#define PIDHASH(pid) ((pid) & (g_npidhash - 1))
/* The state of a task is indicated both by the task_state field of the TCB
* and by a series of task lists. All of these tasks lists are declared
* below. Although it is not always necessary, most of these lists are
* prioritized so that common list handling logic can be used (only the
* g_readytorun, the g_pendingtasks, and the g_waitingforsemaphore lists
* need to be prioritized).
*/
#define list_readytorun() (&g_readytorun)
#define list_pendingtasks() (&g_pendingtasks)
#define list_waitingforsignal() (&g_waitingforsignal)
#define list_waitingforfill() (&g_waitingforfill)
#define list_stoppedtasks() (&g_stoppedtasks)
#define list_inactivetasks() (&g_inactivetasks)
#define list_assignedtasks(cpu) (&g_assignedtasks[cpu])
/* These are macros to access the current CPU and the current task on a CPU.
* These macros are intended to support a future SMP implementation.
* NOTE: this_task() for SMP is implemented in sched_thistask.c
*/
#ifdef CONFIG_SMP
# define current_task(cpu) ((FAR struct tcb_s *)g_assignedtasks[cpu].head)
# define current_task(cpu) ((FAR struct tcb_s *)list_assignedtasks(cpu)->head)
# define this_cpu() up_cpu_index()
#else
# define current_task(cpu) ((FAR struct tcb_s *)g_readytorun.head)
# define current_task(cpu) ((FAR struct tcb_s *)list_readytorun()->head)
# define this_cpu() (0)
# define this_task() (current_task(this_cpu()))
#endif

View File

@ -82,14 +82,14 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* g_pendingtasks task list for now.
*/
nxsched_add_prioritized(btcb, &g_pendingtasks);
nxsched_add_prioritized(btcb, list_pendingtasks());
btcb->task_state = TSTATE_TASK_PENDING;
ret = false;
}
/* Otherwise, add the new task to the ready-to-run task list */
else if (nxsched_add_prioritized(btcb, &g_readytorun))
else if (nxsched_add_prioritized(btcb, list_readytorun()))
{
/* The new btcb was added at the head of the ready-to-run list. It
* is now the new active task!
@ -230,7 +230,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* now.
*/
nxsched_add_prioritized(btcb, &g_pendingtasks);
nxsched_add_prioritized(btcb, list_pendingtasks());
btcb->task_state = TSTATE_TASK_PENDING;
doswitch = false;
}
@ -244,7 +244,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* Add the task to the ready-to-run (but not running) task list
*/
nxsched_add_prioritized(btcb, &g_readytorun);
nxsched_add_prioritized(btcb, list_readytorun());
btcb->task_state = TSTATE_TASK_READYTORUN;
doswitch = false;
@ -264,7 +264,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* and check if a context switch will occur
*/
tasklist = &g_assignedtasks[cpu];
tasklist = list_assignedtasks(cpu);
switched = nxsched_add_prioritized(btcb, tasklist);
/* If the selected task list was the g_assignedtasks[] list and if the
@ -338,12 +338,12 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
if (nxsched_islocked_global())
{
next->task_state = TSTATE_TASK_PENDING;
tasklist = &g_pendingtasks;
tasklist = list_pendingtasks();
}
else
{
next->task_state = TSTATE_TASK_READYTORUN;
tasklist = &g_readytorun;
tasklist = list_readytorun();
}
nxsched_add_prioritized(next, tasklist);

View File

@ -77,7 +77,7 @@ int nxsched_select_cpu(cpu_set_t affinity)
if ((affinity & (1 << i)) != 0)
{
FAR struct tcb_s *rtcb = (FAR struct tcb_s *)
g_assignedtasks[i].head;
list_assignedtasks(i)->head;
/* If this CPU is executing its IDLE task, then use it. The
* IDLE task is always the last task in the assigned task list.

View File

@ -213,8 +213,8 @@ int sched_lock(void)
* unlocked and nxsched_merge_pending() is called.
*/
nxsched_merge_prioritized(&g_readytorun,
&g_pendingtasks,
nxsched_merge_prioritized(list_readytorun(),
list_pendingtasks(),
TSTATE_TASK_PENDING);
leave_critical_section(flags);

View File

@ -89,7 +89,7 @@ bool nxsched_merge_pending(void)
if (rtcb->lockcount == 0)
{
for (ptcb = (FAR struct tcb_s *)g_pendingtasks.head;
for (ptcb = (FAR struct tcb_s *)list_pendingtasks()->head;
ptcb;
ptcb = pnext)
{
@ -128,7 +128,8 @@ bool nxsched_merge_pending(void)
ptcb->flink = rtcb;
ptcb->blink = NULL;
rtcb->blink = ptcb;
g_readytorun.head = (FAR dq_entry_t *)ptcb;
list_readytorun()->head
= (FAR dq_entry_t *)ptcb;
rtcb->task_state = TSTATE_TASK_READYTORUN;
ptcb->task_state = TSTATE_TASK_RUNNING;
ret = true;
@ -151,8 +152,8 @@ bool nxsched_merge_pending(void)
/* Mark the input list empty */
g_pendingtasks.head = NULL;
g_pendingtasks.tail = NULL;
list_pendingtasks()->head = NULL;
list_pendingtasks()->tail = NULL;
}
return ret;
@ -202,7 +203,7 @@ bool nxsched_merge_pending(void)
{
/* Find the CPU that is executing the lowest priority task */
ptcb = (FAR struct tcb_s *)dq_peek(&g_pendingtasks);
ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks());
if (ptcb == NULL)
{
/* The pending task list is empty */
@ -226,7 +227,7 @@ bool nxsched_merge_pending(void)
{
/* Remove the task from the pending task list */
tcb = (FAR struct tcb_s *)dq_remfirst(&g_pendingtasks);
tcb = (FAR struct tcb_s *)dq_remfirst(list_pendingtasks());
/* Add the pending task to the correct ready-to-run list. */
@ -243,8 +244,8 @@ bool nxsched_merge_pending(void)
* move them back to the pending task list.
*/
nxsched_merge_prioritized(&g_readytorun,
&g_pendingtasks,
nxsched_merge_prioritized(list_readytorun(),
list_pendingtasks(),
TSTATE_TASK_PENDING);
/* And return with the scheduler locked and tasks in the
@ -256,7 +257,7 @@ bool nxsched_merge_pending(void)
/* Set up for the next time through the loop */
ptcb = (FAR struct tcb_s *)dq_peek(&g_pendingtasks);
ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks());
if (ptcb == NULL)
{
/* The pending task list is empty */
@ -272,8 +273,8 @@ bool nxsched_merge_pending(void)
* tasks in the pending task list to the ready-to-run task list.
*/
nxsched_merge_prioritized(&g_pendingtasks,
&g_readytorun,
nxsched_merge_prioritized(list_pendingtasks(),
list_readytorun(),
TSTATE_TASK_READYTORUN);
}

View File

@ -97,7 +97,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
rtcb->task_state = TSTATE_TASK_INVALID;
if (g_pendingtasks.head && merge)
if (list_pendingtasks()->head && merge)
{
doswitch |= nxsched_merge_pending();
}
@ -200,7 +200,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
* CPU.
*/
for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head;
rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
rtrtcb = rtrtcb->flink);
@ -218,7 +218,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
* list and add to the head of the g_assignedtasks[cpu] list.
*/
dq_rem((FAR dq_entry_t *)rtrtcb, &g_readytorun);
dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun());
dq_addfirst((FAR dq_entry_t *)rtrtcb, tasklist);
rtrtcb->cpu = cpu;
@ -279,7 +279,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
rtcb->task_state = TSTATE_TASK_INVALID;
if (g_pendingtasks.head && merge)
if (list_pendingtasks()->head && merge)
{
doswitch |= nxsched_merge_pending();
}

View File

@ -82,7 +82,7 @@ bool nxsched_reprioritize_rtr(FAR struct tcb_s *tcb, int priority)
* time to add any pending tasks back into the ready-to-run list.
*/
if (switch_needed && g_pendingtasks.head)
if (switch_needed && list_pendingtasks()->head)
{
nxsched_merge_pending();
}

View File

@ -72,7 +72,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb)
{
/* Search for the highest priority task that can run on tcb->cpu. */
for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head;
rtrtcb != NULL && !CPU_ISSET(tcb->cpu, &rtrtcb->affinity);
rtrtcb = rtrtcb->flink);
@ -154,7 +154,7 @@ static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb,
DEBUGASSERT(check == false);
UNUSED(check);
nxsched_add_prioritized(nxttcb, &g_pendingtasks);
nxsched_add_prioritized(nxttcb, list_pendingtasks());
nxttcb->task_state = TSTATE_TASK_PENDING;
#ifdef CONFIG_SMP

View File

@ -75,7 +75,7 @@ void nxsched_suspend(FAR struct tcb_s *tcb)
/* Move the TCB to the g_stoppedtasks list. */
tcb->task_state = TSTATE_TASK_STOPPED;
dq_addlast((FAR dq_entry_t *)tcb, &g_stoppedtasks);
dq_addlast((FAR dq_entry_t *)tcb, list_stoppedtasks());
}
else
{
@ -97,7 +97,7 @@ void nxsched_suspend(FAR struct tcb_s *tcb)
/* Add the task to the specified blocked task list */
tcb->task_state = TSTATE_TASK_STOPPED;
dq_addlast((FAR dq_entry_t *)tcb, &g_stoppedtasks);
dq_addlast((FAR dq_entry_t *)tcb, list_stoppedtasks());
/* Now, perform the context switch if one is needed */

View File

@ -138,7 +138,7 @@ int sched_unlock(void)
*/
if (!nxsched_islocked_global() && !irq_cpu_locked(cpu) &&
g_pendingtasks.head != NULL)
list_pendingtasks()->head != NULL)
{
if (nxsched_merge_pending())
{
@ -272,7 +272,7 @@ int sched_unlock(void)
* fully independently.
*/
if (g_pendingtasks.head != NULL)
if (list_pendingtasks()->head != NULL)
{
if (nxsched_merge_pending())
{

View File

@ -408,7 +408,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info)
/* Remove the task from waitting list */
dq_rem((FAR dq_entry_t *)stcb, &g_waitingforsignal);
dq_rem((FAR dq_entry_t *)stcb, list_waitingforsignal());
/* Add the task to ready-to-run task list and
* perform the context switch if one is needed
@ -471,7 +471,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info)
/* Remove the task from waitting list */
dq_rem((FAR dq_entry_t *)stcb, &g_waitingforsignal);
dq_rem((FAR dq_entry_t *)stcb, list_waitingforsignal());
/* Add the task to ready-to-run task list and
* perform the context switch if one is needed
@ -535,7 +535,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info)
#else
/* Remove the task from waitting list */
dq_rem((FAR dq_entry_t *)stcb, &g_stoppedtasks);
dq_rem((FAR dq_entry_t *)stcb, list_stoppedtasks());
/* Add the task to ready-to-run task list and
* perform the context switch if one is needed

View File

@ -132,7 +132,7 @@ int sigsuspend(FAR const sigset_t *set)
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_SIG;
dq_addlast((FAR dq_entry_t *)rtcb, &g_waitingforsignal);
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
/* Now, perform the context switch if one is needed */

View File

@ -110,7 +110,7 @@ static void nxsig_timeout(wdparm_t arg)
/* Remove the task from waitting list */
dq_rem((FAR dq_entry_t *)wtcb, &g_waitingforsignal);
dq_rem((FAR dq_entry_t *)wtcb, list_waitingforsignal());
/* Add the task to ready-to-run task list, and
* perform the context switch if one is needed
@ -177,7 +177,7 @@ void nxsig_wait_irq(FAR struct tcb_s *wtcb, int errcode)
/* Remove the task from waitting list */
dq_rem((FAR dq_entry_t *)wtcb, &g_waitingforsignal);
dq_rem((FAR dq_entry_t *)wtcb, list_waitingforsignal());
/* Add the task to ready-to-run task list, and
* perform the context switch if one is needed
@ -361,7 +361,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_SIG;
dq_addlast((FAR dq_entry_t *)rtcb, &g_waitingforsignal);
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
/* Now, perform the context switch if one is needed */
@ -403,7 +403,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_SIG;
dq_addlast((FAR dq_entry_t *)rtcb, &g_waitingforsignal);
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
/* Now, perform the context switch if one is needed */

View File

@ -318,7 +318,7 @@ void nxtask_abort_fork(FAR struct task_tcb_s *child, int errcode)
{
/* The TCB was added to the active task list by nxtask_setup_scheduler() */
dq_rem((FAR dq_entry_t *)child, &g_inactivetasks);
dq_rem((FAR dq_entry_t *)child, list_inactivetasks());
/* Release the TCB */

View File

@ -243,7 +243,7 @@ void nxtask_uninit(FAR struct task_tcb_s *tcb)
* nxtask_setup_scheduler().
*/
dq_rem((FAR dq_entry_t *)tcb, &g_inactivetasks);
dq_rem((FAR dq_entry_t *)tcb, list_inactivetasks());
/* Release all resources associated with the TCB... Including the TCB
* itself.

View File

@ -177,7 +177,7 @@ static int nxtask_restart(pid_t pid)
/* Add the task to the inactive task list */
dq_addfirst((FAR dq_entry_t *)tcb, &g_inactivetasks);
dq_addfirst((FAR dq_entry_t *)tcb, list_inactivetasks());
tcb->cmn.task_state = TSTATE_TASK_INACTIVE;
#ifdef CONFIG_SMP

View File

@ -452,7 +452,7 @@ static int nxthread_setup_scheduler(FAR struct tcb_s *tcb, int priority,
/* Add the task to the inactive task list */
sched_lock();
dq_addfirst((FAR dq_entry_t *)tcb, &g_inactivetasks);
dq_addfirst((FAR dq_entry_t *)tcb, list_inactivetasks());
tcb->task_state = TSTATE_TASK_INACTIVE;
sched_unlock();
}