diff --git a/sched/sched/queue.h b/sched/sched/queue.h new file mode 100644 index 0000000000..c91a37de7a --- /dev/null +++ b/sched/sched/queue.h @@ -0,0 +1,66 @@ +/**************************************************************************** + * sched/sched/queue.h + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +#ifndef __INCLUDE_SCHED_SCHED_NUTTX_QUEUE_H +#define __INCLUDE_SCHED_SCHED_NUTTX_QUEUE_H + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#define dq_addfirst_nonempty(p, q) \ + do \ + { \ + FAR dq_entry_t *tmp_node = (p); \ + tmp_node->blink = NULL; \ + tmp_node->flink = (q)->head; \ + (q)->head->blink = tmp_node; \ + (q)->head = tmp_node; \ + } \ + while (0) + +#define dq_rem_head(p, q) \ + do \ + { \ + FAR dq_entry_t *tmp_node = (p); \ + FAR dq_entry_t *tmp_next = tmp_node->flink; \ + (q)->head = tmp_next; \ + tmp_next->blink = NULL; \ + tmp_node->flink = NULL; \ + } \ + while (0) + +#define dq_rem_mid(p) \ + do \ + { \ + FAR dq_entry_t *tmp_prev = (FAR dq_entry_t *)p->blink; \ + FAR dq_entry_t *tmp_next = (FAR dq_entry_t *)p->flink; \ + tmp_prev->flink = tmp_next; \ + tmp_next->blink = tmp_prev; \ + } \ + while (0) + +#endif /* __INCLUDE_NUTTX_QUEUE_H_ */ diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 055d259a6e..4c760b7fa1 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -29,9 +29,8 @@ #include #include -#include - #include "irq/irq.h" +#include "sched/queue.h" #include "sched/sched.h" /**************************************************************************** @@ -154,30 +153,14 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) bool nxsched_add_readytorun(FAR struct tcb_s *btcb) { FAR struct tcb_s *rtcb; + FAR struct tcb_s *headtcb; FAR dq_queue_t *tasklist; bool doswitch; int task_state; int cpu; int me; - /* Check if the blocked TCB is locked to this CPU */ - - if ((btcb->flags & TCB_FLAG_CPU_LOCKED) != 0) - { - /* Yes.. that is the CPU we must use */ - - task_state = TSTATE_TASK_ASSIGNED; - cpu = btcb->cpu; - } - else - { - /* Otherwise, find the CPU that is executing the lowest priority task - * (possibly its IDLE task). - */ - - task_state = TSTATE_TASK_READYTORUN; - cpu = nxsched_select_cpu(btcb->affinity); - } + cpu = nxsched_select_cpu(btcb->affinity); /* Get the task currently running on the CPU (may be the IDLE task) */ @@ -193,6 +176,10 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) { task_state = TSTATE_TASK_RUNNING; } + else + { + task_state = TSTATE_TASK_READYTORUN; + } /* If the selected state is TSTATE_TASK_RUNNING, then we would like to * start running the task. Be we cannot do that if pre-emption is @@ -207,8 +194,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) * situation. */ - if ((nxsched_islocked_global()) && - task_state != TSTATE_TASK_ASSIGNED) + if (nxsched_islocked_global()) { /* Add the new ready-to-run task to the g_pendingtasks task list for * now. @@ -233,7 +219,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) btcb->task_state = TSTATE_TASK_READYTORUN; doswitch = false; } - else /* (task_state == TSTATE_TASK_ASSIGNED || task_state == TSTATE_TASK_RUNNING) */ + else /* (task_state == TSTATE_TASK_RUNNING) */ { /* If we are modifying some assigned task list other than our own, we * will need to stop that CPU. @@ -245,109 +231,32 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) DEBUGVERIFY(up_cpu_pause(cpu)); } - /* Add the task to the list corresponding to the selected state - * and check if a context switch will occur + tasklist = &g_assignedtasks[cpu]; + + /* Change "head" from TSTATE_TASK_RUNNING to TSTATE_TASK_ASSIGNED */ + + headtcb = (FAR struct tcb_s *)tasklist->head; + DEBUGASSERT(headtcb->task_state = TSTATE_TASK_RUNNING); + headtcb->task_state = TSTATE_TASK_ASSIGNED; + + /* Add btcb to the head of the g_assignedtasks + * task list and mark it as running */ - tasklist = list_assignedtasks(cpu); - doswitch = nxsched_add_prioritized(btcb, tasklist); + dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); - /* If the selected task list was the g_assignedtasks[] list and if the - * new tasks is the highest priority (RUNNING) task, then a context - * switch will occur. - */ + DEBUGASSERT(task_state == TSTATE_TASK_RUNNING); + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_RUNNING; - if (doswitch) + doswitch = true; + + /* Resume scheduling lock */ + + DEBUGASSERT(g_cpu_lockset == 0); + if (btcb->lockcount > 0) { - FAR struct tcb_s *next; - - /* The new btcb was added at the head of the ready-to-run list. It - * is now the new active task! - */ - - /* Assign the CPU and set the running state */ - - DEBUGASSERT(task_state == TSTATE_TASK_RUNNING); - - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_RUNNING; - - /* Adjust global pre-emption controls. If the lockcount is - * greater than zero, then this task/this CPU holds the scheduler - * lock. - */ - - if (btcb->lockcount > 0) - { - g_cpu_lockset |= (1 << cpu); - } - else - { - g_cpu_lockset &= ~(1 << cpu); - } - - /* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ - * controls will be done in the pause handler on the new CPU(cpu). - * If the task is scheduled on this CPU(me), do nothing because - * this CPU already has a critical section - */ - - /* If the following task is not locked to this CPU, then it must - * be moved to the g_readytorun list. Since it cannot be at the - * head of the list, we can do this without invoking any heavy - * lifting machinery. - */ - - DEBUGASSERT(btcb->flink != NULL); - next = btcb->flink; - - if ((next->flags & TCB_FLAG_CPU_LOCKED) != 0) - { - DEBUGASSERT(next->cpu == cpu); - next->task_state = TSTATE_TASK_ASSIGNED; - } - else - { - /* Remove the task from the assigned task list */ - - dq_rem((FAR dq_entry_t *)next, tasklist); - - /* Add the task to the g_readytorun or to the g_pendingtasks - * list. NOTE: That the above operations may cause the - * scheduler to become locked. It may be assigned to a - * different CPU the next time that it runs. - */ - - if (nxsched_islocked_global()) - { - next->task_state = TSTATE_TASK_PENDING; - tasklist = list_pendingtasks(); - } - else - { - next->task_state = TSTATE_TASK_READYTORUN; - tasklist = list_readytorun(); - } - - nxsched_add_prioritized(next, tasklist); - } - } - else - { - /* No context switch. Assign the CPU and set the assigned state. - * - * REVISIT: I have seen this assertion fire. Apparently another - * CPU may add another, higher priority task to the same - * g_assignedtasks[] list sometime after nxsched_select_cpu() was - * called above, leaving this TCB in the wrong task list if - * task_state is TSTATE_TASK_ASSIGNED). - */ - - DEBUGASSERT(task_state == TSTATE_TASK_ASSIGNED); - - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - doswitch = false; + g_cpu_lockset |= (1 << cpu); } /* All done, restart the other CPU (if it was paused). */ diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index e423dfc3bb..ef547447cc 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -29,10 +29,10 @@ #include #include -#include #include #include "irq/irq.h" +#include "sched/queue.h" #include "sched/sched.h" /**************************************************************************** @@ -190,7 +190,48 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) * or the g_assignedtasks[cpu] list. */ - dq_rem((FAR dq_entry_t *)rtcb, tasklist); + dq_rem_head((FAR dq_entry_t *)rtcb, tasklist); + + /* Find the highest priority non-running tasks in the g_assignedtasks + * list of other CPUs, and also non-idle tasks, place them in the + * g_readytorun list. so as to find the task with the highest priority, + * globally + */ + + for (int i = 0; i < CONFIG_SMP_NCPUS; i++) + { + if (i == cpu) + { + /* The highest priority task of the current + * CPU has been found, which is nxttcb. + */ + + continue; + } + + for (rtrtcb = (FAR struct tcb_s *)g_assignedtasks[i].head; + !is_idle_task(rtrtcb); rtrtcb = rtrtcb->flink) + { + if (rtrtcb->task_state != TSTATE_TASK_RUNNING && + CPU_ISSET(cpu, &rtrtcb->affinity)) + { + /* We have found the task with the highest priority whose + * CPU index is i. Since this task must be between the two + * tasks, we can use the dq_rem_mid macro to delete it. + */ + + dq_rem_mid(rtrtcb); + rtrtcb->task_state = TSTATE_TASK_READYTORUN; + + /* Add rtrtcb to g_readytorun to find + * the task with the highest global priority + */ + + nxsched_add_prioritized(rtrtcb, &g_readytorun); + break; + } + } + } /* Which task will go at the head of the list? It will be either the * next tcb in the assigned task list (nxttcb) or a TCB in the @@ -221,7 +262,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge) */ dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun()); - dq_addfirst((FAR dq_entry_t *)rtrtcb, tasklist); + dq_addfirst_nonempty((FAR dq_entry_t *)rtrtcb, tasklist); rtrtcb->cpu = cpu; nxttcb = rtrtcb;