diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 504c949e8c..9c793aee06 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -371,6 +371,7 @@ extern volatile cpu_set_t g_cpu_lockset; bool sched_addreadytorun(FAR struct tcb_s *rtrtcb); bool sched_removereadytorun(FAR struct tcb_s *rtrtcb); bool sched_addprioritized(FAR struct tcb_s *newTcb, DSEG dq_queue_t *list); +void sched_mergeprioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2); bool sched_mergepending(void); void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state); void sched_removeblocked(FAR struct tcb_s *btcb); diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index 59eb85a18e..fb4675fa7b 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -143,9 +143,6 @@ volatile cpu_set_t g_cpu_lockset; int sched_lock(void) { FAR struct tcb_s *rtcb = this_task(); -#ifdef CONFIG_SMP - FAR struct tcb_s *ptcb; -#endif /* Check for some special cases: (1) rtcb may be NULL only during early * boot-up phases, and (2) sched_lock() should have no effect if called @@ -211,17 +208,10 @@ int sched_lock(void) /* Move any tasks in the ready-to-run list to the pending task list * where they will not be available to run until the scheduler is * unlocked and sched_mergepending() is called. - * - * REVISIT: This is awkward. There is really not so much need for - * the pending task list in the SMP configuration. Perhaps it should - * just be eliminated? */ - while ((ptcb = (FAR struct tcb_s *) - dq_remlast((FAR dq_queue_t *)&g_readytorun)) != NULL) - { - (void)sched_addprioritized(ptcb, (FAR dq_queue_t *)&g_pendingtasks); - } + sched_mergeprioritized((FAR dq_queue_t *)&g_readytorun, + (FAR dq_queue_t *)&g_pendingtasks); #endif } diff --git a/sched/sched/sched_mergepending.c b/sched/sched/sched_mergepending.c index 87c2d98d87..75b30d0642 100644 --- a/sched/sched/sched_mergepending.c +++ b/sched/sched/sched_mergepending.c @@ -50,6 +50,12 @@ #include "sched/sched.h" +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#define ALL_CPUS ((cpu_set_t)-1) + /**************************************************************************** * Public Functions ****************************************************************************/ @@ -185,8 +191,11 @@ bool sched_mergepending(void) #ifdef CONFIG_SMP bool sched_mergepending(void) { + FAR struct tcb_s *rtcb; FAR struct tcb_s *ptcb; + FAR struct tcb_s *tcb; bool ret = false; + int cpu; /* Remove and process every TCB in the g_pendingtasks list. * @@ -197,17 +206,38 @@ bool sched_mergepending(void) if (!spin_islocked(&g_cpu_schedlock)) { - while ((ptcb = (FAR struct tcb_s *) - dq_remfirst((FAR dq_queue_t *)&g_pendingtasks)) != NULL) - { - /* Add the pending task to the correct ready-to-run list. These - * are prioritized lists; the g_pendingtasks list is accessed in - * highest priority order. This means that the first - * CONFIG_SMP_NCPU tasks may be made to run but the remaining will - * simply be added to the g_readtorun list. - */ + /* Find the CPU that is executing the lowest priority task */ - ret |= sched_addreadytorun(ptcb); + ptcb = (FAR struct tcb_s *)dq_peek((FAR dq_queue_t *)&g_pendingtasks); + if (ptcb == NULL) + { + /* The pending task list is empty */ + + return ret; + } + + cpu = sched_cpu_select(ALL_CPUS /* ptcb->affinity */); + rtcb = current_task(cpu); + + /* Loop while there is a higher priority task in the pending task list + * than in the lowest executing task. + * + * Normally, this loop should execute no more than CONFIG_SMP_NCPUS + * times. That number could be larger, however, if the CPU affinity + * sets do not include all CPUs. In that case, the excess TCBs will + * end up in the g_readytorun list. + */ + + while (ptcb->sched_priority > rtcb->sched_priority) + { + /* Remove the task from the pending task list */ + + tcb = (FAR struct tcb_s *)dq_remfirst((FAR dq_queue_t *)&g_pendingtasks); + DEBUGASSERT(tcb == ptcb); + + /* Add the pending task to the correct ready-to-run list. */ + + ret |= sched_addreadytorun(tcb); /* This operation could cause the scheduler to become locked. * Check if that happened. @@ -216,25 +246,40 @@ bool sched_mergepending(void) if (spin_islocked(&g_cpu_schedlock)) { /* Yes.. then we may have incorrectly placed some TCBs in the - * g_readytorun list (unlikely, but possible). - * - * REVISIT: This is awkward. There is really not so much - * need for the pending task list in the SMP configuration. - * Perhaps it should just be eliminated? + * g_readytorun list (unlikely, but possible). We will have to + * move them back to the pending task list. */ - while ((ptcb = (FAR struct tcb_s *) - dq_remlast((FAR dq_queue_t *)&g_readytorun)) != NULL) - { - (void)sched_addprioritized(ptcb, - (FAR dq_queue_t *)&g_pendingtasks); - } + sched_mergeprioritized((FAR dq_queue_t *)&g_readytorun, + (FAR dq_queue_t *)&g_pendingtasks); - /* And break out of the loop */ + /* And return with the schedule locked and tasks in the + * pending task list. + */ - break; + return ret; } + + /* Set up for the next time through the loop */ + + ptcb = (FAR struct tcb_s *)dq_peek((FAR dq_queue_t *)&g_pendingtasks); + if (ptcb == NULL) + { + /* The pending task list is empty */ + + return ret; + } + + cpu = sched_cpu_select(ALL_CPUS /* ptcb->affinity */); + rtcb = current_task(cpu); } + + /* No more pending tasks can be made running. Move any reamaining + * tasks in the pending task list to the ready-to-run task list. + */ + + sched_mergeprioritized((FAR dq_queue_t *)&g_pendingtasks, + (FAR dq_queue_t *)&g_readytorun); } return ret; diff --git a/sched/sched/sched_mergeprioritized.c b/sched/sched/sched_mergeprioritized.c new file mode 100644 index 0000000000..8af1c8e51e --- /dev/null +++ b/sched/sched/sched_mergeprioritized.c @@ -0,0 +1,147 @@ +/**************************************************************************** + * sched/sched/sched_mergeprioritized.c + * + * Copyright (C) 2016 Gregory Nutt. All rights reserved. + * Author: Gregory Nutt + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name NuttX nor the names of its contributors may be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include +#include +#include + +#include "sched/sched.h" + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: sched_mergeprioritized + * + * Description: + * This function merges the content of the prioritized task list '1ist1' + * into the prioritized task list, 'list2'. On return 'list2' will contain + * the prioritized content of both lists; 'list1' will be empty. + * + * Inputs: + * list1 - Points to the prioritized list to merge into list 1. This list + * will be empty upon return. + * list2 - That list that will contained the prioritized content of + * both lists upon return. + * + * Return Value: + * None + * + * Assumptions: + * - The caller has established a critical section before calling this + * function (calling sched_lock() first is NOT a good idea -- use + * enter_critical_section()). + * + ****************************************************************************/ + +void sched_mergeprioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2) +{ + FAR struct tcb_s *tcb1; + FAR struct tcb_s *tcb2; + + DEBUGASSERT(list1 != NULL && list2 != NULL); + + /* Get the head of list1 and list2 */ + + tcb1 = (FAR struct tcb_s *)dq_peek(list1); + if (tcb1 == NULL) + { + /* Special case.. list1 is empty. There is nothing to be done. */ + + return; + } + + tcb2 = (FAR struct tcb_s *)dq_peek(list2); + if (tcb2 == NULL) + { + /* Special case.. list2 is empty. Move list1 to list2. */ + + dq_move(list1, list2); + return; + } + + /* Now loop until all entries from list1 have been merged into list2 */ + + while (tcb1 != NULL) + { + /* Are we at the end of list2? */ + + if (tcb2 == NULL) + { + /* Yes.. Just append the remainder of list1 to the end of list2. */ + + tcb1->blink = NULL; + list1->head = (FAR dq_entry_t *)tcb1; + dq_cat(list1, list2); + break; + } + + /* Which has higher priority? */ + + else if (tcb1->sched_priority > tcb2->sched_priority) + { + /* The TCB from list1 has higher priority than the TCB from list2. + * Insert the TCB from list1 before the TCB from list2. + */ + + dq_addbefore((FAR dq_entry_t *)tcb2, (FAR dq_entry_t *)tcb2, + list2); + tcb1 = (FAR struct tcb_s *)dq_next((FAR dq_entry_t *)tcb1); + } + else + { + /* The TCB from list2 has higher (or same) priority as the TCB + * from list2. Skip to the next, lower priority TCB in list2. + */ + + tcb2 = (FAR struct tcb_s *)dq_next((FAR dq_entry_t *)tcb2); + } + } + + /* All of the TCBs from list1 have been moved. Now we can now mark list1 + * empty. + */ + + dq_init(list1); +} +