sched/sched: Implements a global scheduler lock capability as part of SMP support. This allows the scheduler to be locked with no knowledge or access to the TCB of the currently running task. This is necessary because accessing the TCB of the currenlty running task is, itself, a non-atomic operation. This global scheduler lock cpability was add just to support that atomic access to the TCB.

This commit is contained in:
Gregory Nutt 2018-02-05 13:12:36 -06:00
parent f51693e36a
commit 37c9b3d54a
13 changed files with 286 additions and 70 deletions

View File

@ -1705,9 +1705,9 @@ int up_timer_start(FAR const struct timespec *ts);
****************************************************************************/
#ifdef CONFIG_ARCH_HAVE_FETCHADD
int32_t up_fetchadd32(FAR int32_t *addr, int32_t value);
int16_t up_fetchadd16(FAR int16_t *addr, int16_t value);
int8_t up_fetchadd8(FAR int8_t *addr, int8_t value);
int32_t up_fetchadd32(FAR volatile int32_t *addr, int32_t value);
int16_t up_fetchadd16(FAR volatile int16_t *addr, int16_t value);
int8_t up_fetchadd8(FAR volatile int8_t *addr, int8_t value);
#endif
/****************************************************************************
@ -1729,9 +1729,9 @@ int8_t up_fetchadd8(FAR int8_t *addr, int8_t value);
****************************************************************************/
#ifdef CONFIG_ARCH_HAVE_FETCHADD
int32_t up_fetchsub32(FAR int32_t *addr, int32_t value);
int16_t up_fetchsub16(FAR int16_t *addr, int16_t value);
int8_t up_fetchsub8(FAR int8_t *addr, int8_t value);
int32_t up_fetchsub32(FAR volatile int32_t *addr, int32_t value);
int16_t up_fetchsub16(FAR volatile int16_t *addr, int16_t value);
int8_t up_fetchsub8(FAR volatile int8_t *addr, int8_t value);
#endif
/****************************************************************************

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/irq/irq_csection.c
*
* Copyright (C) 2016-2017 Gregory Nutt. All rights reserved.
* Copyright (C) 2016-2018 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -506,7 +506,7 @@ void leave_critical_section(irqstate_t flags)
*/
cpu = this_cpu();
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
DEBUGASSERT(sched_islocked_global() &&
(g_cpu_irqset & (1 << cpu)) != 0);
/* Check if releasing the lock held by this CPU will unlock the
@ -521,8 +521,7 @@ void leave_critical_section(irqstate_t flags)
* because we were within a critical section then.
*/
if (g_pendingtasks.head != NULL &&
!spin_islocked(&g_cpu_schedlock))
if (g_pendingtasks.head != NULL && !sched_islocked_global())
{
/* Release any ready-to-run tasks that have collected
* in g_pendingtasks. NOTE: This operation has a very
@ -616,6 +615,15 @@ bool irq_cpu_locked(int cpu)
return false;
}
#ifdef CONFIG_ARCH_HAVE_FETCHADD
/* If the global lockcount has been incremented then simply return true */
if (g_global_lockcount > 0)
{
return true;
}
#endif
/* Test if g_cpu_irqlock is locked. We don't really need to use check
* g_cpu_irqlock to do this, we can use the g_cpu_set.
*

View File

@ -1,7 +1,7 @@
############################################################################
# sched/sched/Make.defs
#
# Copyright (C) 2014 Gregory Nutt. All rights reserved.
# Copyright (C) 2014, 2018 Gregory Nutt. All rights reserved.
# Author: Gregory Nutt <gnutt@nuttx.org>
#
# Redistribution and use in source and binary forms, with or without
@ -102,6 +102,8 @@ ifeq ($(CONFIG_SMP),y)
CSRCS += sched_tasklistlock.c
ifeq ($(CONFIG_ARCH_GLOBAL_IRQDISABLE),y)
CSRCS += sched_thistask.c
else ifeq ($(CONFIG_ARCH_HAVE_FETCHADD),y)
CSRCS += sched_thistask.c
endif
endif

View File

@ -73,16 +73,14 @@
/* These are macros to access the current CPU and the current task on a CPU.
* These macros are intended to support a future SMP implementation.
* NOTE: this_task() for SMP is implemented in sched_thistask.c if the CPU
* supports disabling of inter-processor interrupts.
*
* REVISIT: A mechanism to provide an atomic this_task() is still required
* for the case where where inter-processor interrupts cannot be disabled!
* supports disabling of inter-processor interrupts or if it supports the
* atomic fetch add operation.
*/
#ifdef CONFIG_SMP
# define current_task(cpu) ((FAR struct tcb_s *)g_assignedtasks[cpu].head)
# define this_cpu() up_cpu_index()
# ifndef CONFIG_ARCH_GLOBAL_IRQDISABLE
# if !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) && !defined(CONFIG_ARCH_HAVE_FETCHADD)
# define this_task() (current_task(this_cpu()))
# endif
#else
@ -376,6 +374,14 @@ extern volatile cpu_set_t g_cpu_lockset SP_SECTION;
extern volatile spinlock_t g_cpu_tasklistlock SP_SECTION;
#ifdef CONFIG_ARCH_HAVE_FETCHADD
/* This is part of the sched_lock() logic to handle atomic operations when
* locking the scheduler.
*/
extern volatile int16_t g_global_lockcount;
#endif
#endif /* CONFIG_SMP */
/****************************************************************************
@ -435,7 +441,7 @@ void sched_sporadic_lowpriority(FAR struct tcb_s *tcb);
#endif
#ifdef CONFIG_SMP
#ifdef CONFIG_ARCH_GLOBAL_IRQDISABLE
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE) || defined(CONFIG_ARCH_HAVE_FETCHADD)
FAR struct tcb_s *this_task(void);
#endif
@ -445,11 +451,20 @@ int sched_cpu_pause(FAR struct tcb_s *tcb);
irqstate_t sched_tasklist_lock(void);
void sched_tasklist_unlock(irqstate_t lock);
# define sched_islocked(tcb) spin_islocked(&g_cpu_schedlock)
#ifdef CONFIG_ARCH_HAVE_FETCHADD
# define sched_islocked_global() \
(spin_islocked(&g_cpu_schedlock) || g_global_lockcount > 0)
#else
# define sched_cpu_select(a) (0)
# define sched_cpu_pause(t) (-38) /* -ENOSYS */
# define sched_islocked(tcb) ((tcb)->lockcount > 0)
# define sched_islocked_global() \
spin_islocked(&g_cpu_schedlock)
#endif
# define sched_islocked_tcb(tcb) sched_islocked_global()
#else
# define sched_cpu_select(a) (0)
# define sched_cpu_pause(t) (-38) /* -ENOSYS */
# define sched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
#endif
/* CPU load measurement support */

View File

@ -1,7 +1,8 @@
/****************************************************************************
* sched/sched/sched_addreadytorun.c
*
* Copyright (C) 2007-2009, 2014, 2016-2017 Gregory Nutt. All rights reserved.
* Copyright (C) 2007-2009, 2014, 2016-2018 Gregory Nutt. All rights
* reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -194,7 +195,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
cpu = sched_cpu_select(btcb->affinity);
}
/* Get the task currently running on the CPU (maybe the IDLE task) */
/* Get the task currently running on the CPU (may be the IDLE task) */
rtcb = (FAR struct tcb_s *)g_assignedtasks[cpu].head;
@ -240,7 +241,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
*/
me = this_cpu();
if ((spin_islocked(&g_cpu_schedlock) || irq_cpu_locked(me)) &&
if ((sched_islocked_global() || irq_cpu_locked(me)) &&
task_state != TSTATE_TASK_ASSIGNED)
{
/* Add the new ready-to-run task to the g_pendingtasks task list for
@ -387,7 +388,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
* different CPU the next time that it runs.
*/
if (spin_islocked(&g_cpu_schedlock))
if (sched_islocked_global())
{
next->task_state = TSTATE_TASK_PENDING;
tasklist = (FAR dq_queue_t *)&g_pendingtasks;

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/sched/sched_lock.c
*
* Copyright (C) 2007, 2009, 2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009, 2016, 2018 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -116,6 +116,13 @@ volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED;
volatile spinlock_t g_cpu_locksetlock SP_SECTION;
volatile cpu_set_t g_cpu_lockset SP_SECTION;
#ifdef CONFIG_ARCH_HAVE_FETCHADD
/* This is part of the sched_lock() logic to handle atomic operations when
* locking the scheduler.
*/
volatile int16_t g_global_lockcount;
#endif
#endif /* CONFIG_SMP */
/****************************************************************************
@ -140,16 +147,32 @@ volatile cpu_set_t g_cpu_lockset SP_SECTION;
*
****************************************************************************/
#ifdef CONFIG_SMP
int sched_lock(void)
{
FAR struct tcb_s *rtcb = this_task();
FAR struct tcb_s *rtcb;
int cpu;
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
/* The following operation is non-atomic unless CONFIG_ARCH_HAVE_FETCHADD
* defined.
*/
if (rtcb && !up_interrupt_context())
#ifdef CONFIG_ARCH_HAVE_FETCHADD
DEBUGASSERT((uint16_t)g_global_lockcount < INT16_MAX); /* Not atomic! */
(void)up_fetchadd16(&g_global_lockcount, 1);
#endif
/* This operation is save if CONFIG_ARCH_HAVE_FETCHADD is defined. NOTE
* we cannot use this_task() because it calls sched_lock().
*/
cpu = this_cpu();
rtcb = current_task(cpu);
/* rtcb may be NULL only during early boot-up phases. */
if (rtcb != NULL)
{
/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
@ -157,7 +180,6 @@ int sched_lock(void)
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
#ifdef CONFIG_SMP
/* We must hold the lock on this CPU before we increment the lockcount
* for the first time. Holding the lock is sufficient to lockout context
* switching.
@ -184,7 +206,6 @@ int sched_lock(void)
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
(g_cpu_lockset & (1 << this_cpu())) != 0);
}
#endif
/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
@ -192,7 +213,6 @@ int sched_lock(void)
rtcb->lockcount++;
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
/* Check if we just acquired the lock */
@ -204,7 +224,6 @@ int sched_lock(void)
}
#endif
#ifdef CONFIG_SMP
/* Move any tasks in the ready-to-run list to the pending task list
* where they will not be available to run until the scheduler is
* unlocked and sched_mergepending() is called.
@ -213,8 +232,54 @@ int sched_lock(void)
sched_mergeprioritized((FAR dq_queue_t *)&g_readytorun,
(FAR dq_queue_t *)&g_pendingtasks,
TSTATE_TASK_PENDING);
}
#ifdef CONFIG_ARCH_HAVE_FETCHADD
DEBUGASSERT(g_global_lockcount > 0);
(void)up_fetchsub16(&g_global_lockcount, 1);
#endif
return OK;
}
#else /* CONFIG_SMP */
int sched_lock(void)
{
FAR struct tcb_s *rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/
if (rtcb != NULL && !up_interrupt_context())
{
/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
*/
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
*/
rtcb->lockcount++;
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
/* Check if we just acquired the lock */
if (rtcb->lockcount == 1)
{
/* Note that we have pre-emption locked */
sched_note_premption(rtcb, true);
}
#endif
}
return OK;
}
#endif /* CONFIG_SMP */

View File

@ -210,7 +210,7 @@ bool sched_mergepending(void)
*/
me = this_cpu();
if (!spin_islocked(&g_cpu_schedlock) && !irq_cpu_locked(me))
if (!sched_islocked_global() && !irq_cpu_locked(me))
{
/* Find the CPU that is executing the lowest priority task */
@ -248,7 +248,7 @@ bool sched_mergepending(void)
* Check if that happened.
*/
if (spin_islocked(&g_cpu_schedlock) || irq_cpu_locked(me))
if (sched_islocked_global() || irq_cpu_locked(me))
{
/* Yes.. then we may have incorrectly placed some TCBs in the
* g_readytorun list (unlikely, but possible). We will have to

View File

@ -208,7 +208,7 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
* REVISIT: What if it is not the IDLE thread?
*/
if (!spin_islocked(&g_cpu_schedlock) && !irq_cpu_locked(me))
if (!sched_islocked_global() && !irq_cpu_locked(me))
{
/* Search for the highest priority task that can run on this
* CPU.

View File

@ -121,7 +121,7 @@ uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
/* Did decrementing the timeslice counter cause the timeslice to expire? */
ret = tcb->timeslice;
if (tcb->timeslice <= 0 && !sched_islocked(tcb))
if (tcb->timeslice <= 0 && !sched_islocked_tcb(tcb))
{
/* We will also suppress context switches if we were called via one
* of the unusual cases handled by sched_timer_reasses(). In that

View File

@ -83,7 +83,7 @@ static FAR struct tcb_s *sched_nexttcb(FAR struct tcb_s *tcb)
* then use the 'nxttcb' which will probably be the IDLE thread.
*/
if (!spin_islocked(&g_cpu_schedlock) && !irq_cpu_locked(cpu))
if (!sched_islocked_global() && !irq_cpu_locked(cpu))
{
/* Search for the highest priority task that can run on this CPU. */

View File

@ -475,7 +475,7 @@ static void sporadic_budget_expire(int argc, wdparm_t arg1, ...)
* this operation is needed.
*/
if (sched_islocked(tcb))
if (sched_islocked_tcb(tcb))
{
DEBUGASSERT((mrepl->flags && SPORADIC_FLAG_ALLOCED) != 0 &&
sporadic->nrepls > 0);
@ -623,7 +623,7 @@ static void sporadic_replenish_expire(int argc, wdparm_t arg1, ...)
* this operation is needed.
*/
if (sched_islocked(tcb))
if (sched_islocked_tcb(tcb))
{
/* Set the timeslice to the magic value */
@ -1222,7 +1222,7 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
/* Does the thread have the scheduler locked? */
sporadic = tcb->sporadic;
if (sched_islocked(tcb))
if (sched_islocked_tcb(tcb))
{
/* Yes... then we have no option but to give the thread more
* time at the higher priority. Dropping the priority could

View File

@ -39,12 +39,12 @@
****************************************************************************/
#include <nuttx/config.h>
#include <nuttx/spinlock.h>
#include <sys/types.h>
#include <arch/irq.h>
#include <nuttx/irq.h>
#include <nuttx/spinlock.h>
#include "sched/sched.h"
@ -70,7 +70,7 @@
FAR struct tcb_s *this_task(void)
{
FAR struct tcb_s *tcb;
#ifdef CONFIG_ARCH_GLOBAL_IRQDISABLE
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
irqstate_t flags;
/* If the CPU supports suppression of interprocessor interrupts, then simple
@ -79,6 +79,12 @@ FAR struct tcb_s *this_task(void)
*/
flags = up_irq_save();
#elif defined(CONFIG_ARCH_HAVE_FETCHADD)
/* Global locking is supported and, hence, sched_lock() will provide the
* necessary protection.
*/
sched_lock();
#else
/* REVISIT: Otherwise, there is no protection available. sched_lock() and
* enter_critical section are not viable options here (because both depend
@ -93,8 +99,10 @@ FAR struct tcb_s *this_task(void)
/* Enable local interrupts */
#ifdef CONFIG_ARCH_GLOBAL_IRQDISABLE
#if defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
up_irq_restore(flags);
#elif defined(CONFIG_ARCH_HAVE_FETCHADD)
sched_unlock();
#endif
return tcb;
}

View File

@ -1,7 +1,8 @@
/****************************************************************************
* sched/sched/sched_unlock.c
*
* Copyright (C) 2007, 2009, 2014, 2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009, 2014, 2016, 2018 Gregory Nutt. All rights
* reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -64,24 +65,23 @@
*
****************************************************************************/
#ifdef CONFIG_SMP
int sched_unlock(void)
{
FAR struct tcb_s *rtcb = this_task();
#ifdef CONFIG_SMP
FAR struct tcb_s *rtcb;
int cpu;
/* This operation is safe because the scheduler is locked and no context
* switch may occur.
*/
cpu = this_cpu();
rtcb = current_task(cpu);
#else
rtcb = this_task();
#endif
/* Check for some special cases: (1) rtcb may be NULL only during
* early boot-up phases, and (2) sched_unlock() should have no
* effect if called from the interrupt level.
*/
/* rtcb may be NULL only during early boot-up phases. */
if (rtcb && !up_interrupt_context())
if (rtcb != NULL)
{
/* Prevent context switches throughout the following. */
@ -109,7 +109,6 @@ int sched_unlock(void)
rtcb->lockcount = 0;
#ifdef CONFIG_SMP
/* The lockcount has decremented to zero and we need to perform
* release our hold on the lock.
*/
@ -119,7 +118,6 @@ int sched_unlock(void)
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
#endif
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
@ -128,7 +126,6 @@ int sched_unlock(void)
* this task to be switched out!
*/
#ifdef CONFIG_SMP
/* In the SMP case, the tasks remains pend(1) if we are
* in a critical section, i.e., g_cpu_irqlock is locked by other
* CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
@ -147,17 +144,8 @@ int sched_unlock(void)
* BEFORE it clears IRQ lock.
*/
if (!spin_islocked(&g_cpu_schedlock) && !irq_cpu_locked(cpu) &&
if (!sched_islocked_global() && !irq_cpu_locked(cpu) &&
g_pendingtasks.head != NULL)
#else
/* In the single CPU case, decrementing irqcount to zero is
* sufficient to release the pending tasks. Further, in that
* configuration, critical sections and pre-emption can operate
* fully independently.
*/
if (g_pendingtasks.head != NULL)
#endif
{
up_release_pending();
}
@ -232,3 +220,132 @@ int sched_unlock(void)
return OK;
}
#else /* CONFIG_SMP */
int sched_unlock(void)
{
FAR struct tcb_s *rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during
* early boot-up phases, and (2) sched_unlock() should have no
* effect if called from the interrupt level.
*/
if (rtcb != NULL && !up_interrupt_context())
{
/* Prevent context switches throughout the following. */
irqstate_t flags = enter_critical_section();
/* Decrement the preemption lock counter */
if (rtcb->lockcount > 0)
{
rtcb->lockcount--;
}
/* Check if the lock counter has decremented to zero. If so,
* then pre-emption has been re-enabled.
*/
if (rtcb->lockcount <= 0)
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
/* Note that we no longer have pre-emption disabled. */
sched_note_premption(rtcb, false);
#endif
/* Set the lock count to zero */
rtcb->lockcount = 0;
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
*
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
*
* In the single CPU case, decrementing irqcount to zero is
* sufficient to release the pending tasks. Further, in that
* configuration, critical sections and pre-emption can operate
* fully independently.
*/
if (g_pendingtasks.head != NULL)
{
up_release_pending();
}
#if CONFIG_RR_INTERVAL > 0
/* If (1) the task that was running supported round-robin
* scheduling and (2) if its time slice has already expired, but
* (3) it could not slice out because pre-emption was disabled,
* then we need to swap the task out now and reassess the interval
* timer for the next time slice.
*/
if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
rtcb->timeslice == 0)
{
/* Yes.. that is the situation. But one more thing. The call
* to up_release_pending() above may have actually replaced
* the task at the head of the read-to-run list. In that case,
* we need only to reset the timeslice value back to the
* maximum.
*/
if (rtcb != this_task())
{
rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
}
#ifdef CONFIG_SCHED_TICKLESS
else
{
sched_timer_reassess();
}
#endif
}
#endif
#ifdef CONFIG_SCHED_SPORADIC
#if CONFIG_RR_INTERVAL > 0
else
#endif
/* If (1) the task that was running supported sporadic scheduling
* and (2) if its budget slice has already expired, but (3) it
* could not slice out because pre-emption was disabled, then we
* need to swap the task out now and reassess the interval timer
* for the next time slice.
*/
if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC &&
rtcb->timeslice < 0)
{
/* Yes.. that is the situation. Force the low-priority state
* now
*/
sched_sporadic_lowpriority(rtcb);
#ifdef CONFIG_SCHED_TICKLESS
/* Make sure that the call to up_release_pending() did not
* change the currently active task.
*/
if (rtcb == this_task())
{
sched_timer_reassess();
}
#endif
}
#endif
}
leave_critical_section(flags);
}
return OK;
}
#endif /* CONFIG_SMP */