SMP: Straighten up some scheduler locking logic -- need to REVISIT

This commit is contained in:
Gregory Nutt 2016-03-22 13:01:47 -06:00
parent 3b2e94e1fd
commit 9604ea8f42
4 changed files with 93 additions and 18 deletions

2
arch

@ -1 +1 @@
Subproject commit e473d2dfda7181296f10103bde2323d8df4c2ddb
Subproject commit 0401b8ce61ca07c7680763dcfa4b40ad42d8dc4d

View File

@ -222,13 +222,16 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
}
/* If the selected state is TSTATE_TASK_RUNNING, then we would like to
* start running the task. Be we cannot do that if pre-emption is disable.
* start running the task. Be we cannot do that if pre-emption is
* disabled. If the selected state is TSTATE_TASK_READYTORUN, then it
* should also go to the pending task list so that it will have a chance
* to be restarted when the scheduler is unlocked.
*/
if (spin_islocked(&g_cpu_schedlock) && task_state == TSTATE_TASK_RUNNING)
if (spin_islocked(&g_cpu_schedlock) && task_state != TSTATE_TASK_ASSIGNED)
{
/* Preemption would occur! Add the new ready-to-run task to the
* g_pendingtasks task list for now.
/* Add the new ready-to-run task to the g_pendingtasks task list for
* now.
*/
sched_addprioritized(btcb, (FAR dq_queue_t *)&g_pendingtasks);
@ -340,13 +343,24 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
dq_rem((FAR dq_entry_t *)next, tasklist);
/* Add the task to the g_readytorun list. It may be
* assigned to a different CPU the next time that it runs.
/* Add the task to the g_readytorun or to the g_pendingtasks
* list. NOTE: That the above operations may cause the
* scheduler to become locked. It may be assigned to a
* different CPU the next time that it runs.
*/
next->task_state = TSTATE_TASK_READYTORUN;
(void)sched_addprioritized(next,
(FAR dq_queue_t *)&g_readytorun);
if (spin_islocked(&g_cpu_schedlock))
{
next->task_state = TSTATE_TASK_PENDING;
tasklist = (FAR dq_queue_t *)&g_pendingtasks;
}
else
{
next->task_state = TSTATE_TASK_READYTORUN;
tasklist = (FAR dq_queue_t *)&g_readytorun;
}
(void)sched_addprioritized(next, tasklist);
}
doswitch = true;

View File

@ -143,6 +143,9 @@ volatile cpu_set_t g_cpu_lockset;
int sched_lock(void)
{
FAR struct tcb_s *rtcb = this_task();
#ifdef CONFIG_SMP
FAR struct tcb_s *ptcb;
#endif
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
@ -169,8 +172,6 @@ int sched_lock(void)
* different CPU may have the scheduler locked. It is not
* possible for some other task on this CPU to have the scheduler
* locked (or we would not be executing!).
*
* If the scheduler is locked on another CPU, then we for the lock.
*/
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
@ -194,6 +195,7 @@ int sched_lock(void)
rtcb->lockcount++;
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
/* Check if we just acquired the lock */
@ -204,6 +206,23 @@ int sched_lock(void)
sched_note_premption(rtcb, true);
}
#endif
#ifdef CONFIG_SMP
/* Move any tasks in the ready-to-run list to the pending task list
* where they will not be available to run until the scheduler is
* unlocked and sched_mergepending() is called.
*
* REVISIT: This is awkward. There is really not so much need for
* the pending task list in the SMP configuration. Perhaps it should
* just be eliminated?
*/
while ((ptcb = (FAR struct tcb_s *)
dq_remlast((FAR dq_queue_t *)&g_readytorun)) != NULL)
{
(void)sched_addprioritized(ptcb, (FAR dq_queue_t *)&g_pendingtasks);
}
#endif
}
return OK;

View File

@ -44,6 +44,10 @@
#include <queue.h>
#include <assert.h>
#ifdef CONFIG_SMP
# include <nuttx/spinlock.h>
#endif
#include "sched/sched.h"
/****************************************************************************
@ -184,15 +188,53 @@ bool sched_mergepending(void)
FAR struct tcb_s *ptcb;
bool ret = false;
/* Remove and process every TCB in the g_pendingtasks list */
/* Remove and process every TCB in the g_pendingtasks list.
*
* This function is only called in the context where locking is known to
* disabled on one CPU. However, we must do nothing if pre-emption is
* still locked because of actions of other CPUs.
*/
for (ptcb = (FAR struct tcb_s *)dq_remfirst((FAR dq_queue_t *)&g_pendingtasks);
ptcb != NULL;
ptcb = (FAR struct tcb_s *)dq_remfirst((FAR dq_queue_t *)&g_pendingtasks))
if (!spin_islocked(&g_cpu_schedlock))
{
/* Add the pending task to the correct ready-to-run list */
while ((ptcb = (FAR struct tcb_s *)
dq_remfirst((FAR dq_queue_t *)&g_pendingtasks)) != NULL)
{
/* Add the pending task to the correct ready-to-run list. These
* are prioritized lists; the g_pendingtasks list is accessed in
* highest priority order. This means that the first
* CONFIG_SMP_NCPU tasks may be made to run but the remaining will
* simply be added to the g_readtorun list.
*/
ret |= sched_addreadytorun(ptcb);
ret |= sched_addreadytorun(ptcb);
/* This operation could cause the scheduler to become locked.
* Check if that happened.
*/
if (spin_islocked(&g_cpu_schedlock))
{
/* Yes.. then we may have incorrectly placed some TCBs in the
* g_readytorun list (unlikely, but possible).
*
* REVISIT: This is awkward. There is really not so much
* need for the pending task list in the SMP configuration.
* Perhaps it should just be eliminated?
*/
while ((ptcb = (FAR struct tcb_s *)
dq_remlast((FAR dq_queue_t *)&g_readytorun)) != NULL)
{
(void)sched_addprioritized(ptcb,
(FAR dq_queue_t *)&g_pendingtasks);
}
/* And break out of the loop */
break;
}
}
}
return ret;