SMP: Add non-reentrant spinlocks, fix pre-emption controls with SMP, and most of the basic scheduler support for SMP (still missing a few things)

This commit is contained in:
Gregory Nutt 2016-02-12 14:55:31 -06:00
parent 7d7f4e140c
commit 85f663a8ee
10 changed files with 645 additions and 32 deletions

View File

@ -100,7 +100,7 @@ struct spinlock_s
spinlock_t up_testset(volatile FAR spinlock_t *lock);
/****************************************************************************
* Name: spinlock_initialize
* Name: spin_initialize
*
* Description:
* Initialize a spinlock object to its initial, unlocked state.
@ -113,15 +113,19 @@ spinlock_t up_testset(volatile FAR spinlock_t *lock);
*
****************************************************************************/
void spinlock_initialize(FAR struct spinlock_s *lock);
void spin_initialize(FAR struct spinlock_s *lock);
/****************************************************************************
* Name: spinlock
* Name: spin_lock
*
* Description:
* If this CPU does not already hold the spinlock, then loop until the
* spinlock is successfully locked.
*
* This implementation is non-reentrant and is prone to deadlocks in
* the case that any logic on the same CPU attempts to take the lock
* more than one
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
@ -134,13 +138,40 @@ void spinlock_initialize(FAR struct spinlock_s *lock);
*
****************************************************************************/
void spinlock(FAR struct spinlock_s *lock);
void spin_lock(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spinunlock
* Name: spin_lockr
*
* Description:
* Release one count on a spinlock.
* If this CPU does not already hold the spinlock, then loop until the
* spinlock is successfully locked.
*
* This implementation is re-entrant in the sense that it can called
* numerous times from the same CPU without blocking. Of course,
* spin_unlock() must be called the same number of times. NOTE: the
* thread that originallly took the look may be executing on a different
* CPU when it unlocks the spinlock.
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* None. When the function returns, the spinlock was successfully locked
* by this CPU.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_lockr(FAR struct spinlock_s *lock);
/****************************************************************************
* Name: spin_unlock
*
* Description:
* Release one count on a non-reentrant spinlock.
*
* Input Parameters:
* lock - A reference to the spinlock object to unlock.
@ -153,7 +184,61 @@ void spinlock(FAR struct spinlock_s *lock);
*
****************************************************************************/
void spinunlock(FAR struct spinlock_s *lock);
/* void spin_unlock(FAR spinlock_t *lock); */
#define spin_unlock(l) do { (l) = SP_UNLOCKED; } while (0)
/****************************************************************************
* Name: spin_unlockr
*
* Description:
* Release one count on a re-entrant spinlock.
*
* Input Parameters:
* lock - A reference to the spinlock object to unlock.
*
* Returned Value:
* None.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_unlockr(FAR struct spinlock_s *lock);
/****************************************************************************
* Name: spin_islocked
*
* Description:
* Release one count on a renonentrant spinlock.
*
* Input Parameters:
* lock - A reference to the spinlock object to test.
*
* Returned Value:
* A boolean value: true the spinlock is locked; false if it is unlocked.
*
****************************************************************************/
/* bool spin_islocked(FAR spinlock_t lock); */
#define spin_islocked(l) ((l) == SP_UNLOCKED)
/****************************************************************************
* Name: spin_islockedr
*
* Description:
* Release one count on a re-entrant spinlock.
*
* Input Parameters:
* lock - A reference to the spinlock object to test.
*
* Returned Value:
* A boolean value: true the spinlock is locked; false if it is unlocked.
*
****************************************************************************/
/* bool spin_islockedr(FAR struct spinlock_s *lock); */
#define spin_islockedr(l) ((l)->sp_lock == SP_UNLOCKED)
#endif /* CONFIG_SPINLOCK */
#endif /* __INCLUDE_NUTTX_SPINLOCK_H */

View File

@ -49,6 +49,7 @@
#include <nuttx/arch.h>
#include <nuttx/kmalloc.h>
#include <nuttx/spinlock.h>
/****************************************************************************
* Pre-processor Definitions
@ -294,6 +295,78 @@ extern const struct tasklist_s g_tasklisttable[NUM_TASK_STATES];
extern volatile uint32_t g_cpuload_total;
#endif
/* Declared in sched_lock.c *************************************************/
/* Pre-emption is disabled via the interface sched_lock(). sched_lock()
* works by preventing context switches from the currently executing tasks.
* This prevents other tasks from running (without disabling interrupts) and
* gives the currently executing task exclusive access to the (single) CPU
* resources. Thus, sched_lock() and its companion, sched_unlcok(), are
* used to implement some critical sections.
*
* In the single CPU case, Pre-emption is disabled using a simple lockcount
* in the TCB. When the scheduling is locked, the lockcount is incremented;
* when the scheduler is unlocked, the lockcount is decremented. If the
* lockcount for the task at the head of the g_readytorun list has a
* lockcount > 0, then pre-emption is disabled.
*
* No special protection is required since only the executing task can
* modify its lockcount.
*/
#ifdef CONFIG_SMP
/* In the multiple CPU, SMP case, disabling context switches will not give a
* task exclusive access to the (multiple) CPU resources (at least without
* stopping the other CPUs): Even though pre-emption is disabled, other
* threads will still be executing on the other CPUS.
*
* There are additional rules for this multi-CPU case:
*
* 1. There is a global lock count 'g_cpu_lockset' that includes a bit for
* each CPU: If the bit is '1', then the corresponding CPU has the
* scheduler locked; if '0', then the CPU does not have the scheduler
* locked.
* 2. Scheduling logic would set the bit associated with the cpu in
* 'g_cpu_lockset' when the TCB at the head of the g_assignedtasks[cpu]
* list transitions has 'lockcount' > 0. This might happen when sched_lock()
* is called, or after a context switch that changes the TCB at the
* head of the g_assignedtasks[cpu] list.
* 3. Similarly, the cpu bit in the global 'g_cpu_lockset' would be cleared
* when the TCB at the head of the g_assignedtasks[cpu] list has
* 'lockcount' == 0. This might happen when sched_unlock() is called, or
* after a context switch that changes the TCB at the head of the
* g_assignedtasks[cpu] list.
* 4. Modification of the global 'g_cpu_lockset' must be protected by a
* spinlock, 'g_cpu_schedlock'. That spinlock would be taken when
* sched_lock() is called, and released when sched_unlock() is called.
* This assures that the scheduler does enforce the critical section.
* NOTE: Because of this spinlock, there should never be more than one
* bit set in 'g_cpu_lockset'; attempts to set additional bits should
* be cause the CPU to block on the spinlock. However, additional bits
* could get set in 'g_cpu_lockset' due to the context switches on the
* various CPUs.
* 5. Each the time the head of a g_assignedtasks[] list changes and the
* scheduler modifies 'g_cpu_lockset', it must also set 'g_cpu_schedlock'
* depending on the new state of 'g_cpu_lockset'.
* 5. Logic that currently uses the currently running tasks lockcount
* instead uses the global 'g_cpu_schedlock'. A value of SP_UNLOCKED
* means that no CPU has pre-emption disabled; SP_LOCKED means that at
* least one CPU has pre-emption disabled.
*/
extern volatile spinlock_t g_cpu_schedlock;
#if (CONFIG_SMP_NCPUS <= 8)
extern volatile uint8_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 16)
extern volatile uint16_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 32)
extern volatile uint32_t g_cpu_lockset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
#endif /* CONFIG_SMP */
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
@ -348,6 +421,12 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
void sched_sporadic_lowpriority(FAR struct tcb_s *tcb);
#endif
#ifdef CONFIG_SMP
# define sched_islocked(tcb) spin_islocked(g_cpu_schedlock)
#else
# define sched_islocked(tcb) ((tcb)->lockcount > 0)
#endif
/* CPU load measurement support */
#if defined(CONFIG_SCHED_CPULOAD) && !defined(CONFIG_SCHED_CPULOAD_EXTCLK)

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/sched/sched_addreadytorun.c
*
* Copyright (C) 2007-2009, 2014 Gregory Nutt. All rights reserved.
* Copyright (C) 2007-2009, 2014, 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -56,7 +56,7 @@
* This function adds a TCB to the ready to run list. If the currently
* active task has preemption disabled and the new TCB would cause this
* task to be pre-empted, the new task is added to the g_pendingtasks list
* instead. Thepending tasks will be made ready-to-run when preemption is
* instead. The pending tasks will be made ready-to-run when preemption is
* unlocked.
*
* Inputs:
@ -77,6 +77,7 @@
*
****************************************************************************/
#ifndef CONFIG_SMP
bool sched_addreadytorun(FAR struct tcb_s *btcb)
{
FAR struct tcb_s *rtcb = this_task();
@ -107,7 +108,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
sched_note_switch(rtcb, btcb);
/* The new btcb was added at the head of the ready-to-run list. It
* is now to new active task!
* is now the new active task!
*/
ASSERT(!rtcb->lockcount && btcb->flink != NULL);
@ -126,3 +127,228 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
return ret;
}
#endif /* !CONFIG_SMP */
/****************************************************************************
* Name: sched_addreadytorun
*
* Description:
* This function adds a TCB to one of the ready to run lists. That might
* be:
*
* 1. The g_readytorun list if the task is ready-to-run but not running
* and not assigned to a CPU.
* 2. The g_assignedtask[cpu] list if the task is running or if has been
* assigned to a CPU.
*
* If the currently active task has preemption disabled and the new TCB
* would cause this task to be pre-empted, the new task is added to the
* g_pendingtasks list instead. Thepending tasks will be made
* ready-to-run when preemption isunlocked.
*
* Inputs:
* btcb - Points to the blocked TCB that is ready-to-run
*
* Return Value:
* true if the currently active task (the head of the ready-to-run list)
* has changed.
*
* Assumptions:
* - The caller has established a critical section before calling this
* function (calling sched_lock() first is NOT a good idea -- use
* irqsave()).
* - The caller has already removed the input rtcb from whatever list it
* was in.
* - The caller handles the condition that occurs if the head of the
* ready-to-run list is changed.
*
****************************************************************************/
#ifdef CONFIG_SMP
bool sched_addreadytorun(FAR struct tcb_s *btcb)
{
FAR struct tcb_s *rtcb;
FAR struct tcb_s *next;
FAR dq_queue_t *tasklist;
uint8_t minprio;
int task_state;
int cpu;
bool switched;
bool doswitch;
int i;
/* Find the CPU that is executing the lowest priority task (possibly its
* IDLE task).
*/
rtcb = NULL;
minprio = SCHED_PRIORITY_MAX;
cpu = 0;
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
FAR struct tcb_s *candidate =
(FAR struct tcb_s *)g_assignedtasks[i].head;
/* If this thread is executing its IDLE task, the use it. The IDLE
* task is always the last task in the assigned task list.
*/
if (candidate->flink == NULL)
{
/* The IDLE task should always be assigned to this CPU and have a
* priority zero.
*/
DEBUGASSERT((candidate->flags & TCB_FLAG_CPU_ASSIGNED) != 0 &&
candidate->sched_priority == 0);
rtcb = candidate;
cpu = i;
break;
}
else if (candidate->sched_priority < minprio)
{
DEBUGASSERT(candidate->sched_priority > 0);
rtcb = candidate;
cpu = i;
}
}
/* Determine the desired new task state. First, if the new task priority
* is higher then the priority of the lowest priority, running task, then
* the new task will be running and a context switch switch will be required.
*/
if (rtcb->sched_priority < btcb->sched_priority)
{
task_state = TSTATE_TASK_RUNNING;
}
/* If it will not be running, but is assigned to a CPU, then it will be in
* the asssigned state.
*/
else if ((btcb->flags & TCB_FLAG_CPU_ASSIGNED) != 0)
{
task_state = TSTATE_TASK_ASSIGNED;
cpu = btcb->cpu;
}
/* Otherwise, it will be ready-to-run, but not not yet running */
else
{
task_state = TSTATE_TASK_READYTORUN;
cpu = 0; /* CPU does not matter */
}
/* If the selected state is TSTATE_TASK_RUNNING, then we would like to
* start running the task. Be we cannot do that if pre-emption is disable.
*/
if (spin_islocked(g_cpu_schedlock) && task_state == TSTATE_TASK_RUNNING)
{
/* Preemption would occur! Add the new ready-to-run task to the
* g_pendingtasks task list for now.
*/
sched_addprioritized(btcb, (FAR dq_queue_t *)&g_pendingtasks);
btcb->task_state = TSTATE_TASK_PENDING;
doswitch = false;
}
else
{
/* Add the task to the list corresponding to the selected state
* and check if a context switch will occur
*/
tasklist = TLIST_HEAD(task_state, cpu);
switched = sched_addprioritized(btcb, tasklist);
/* If the selected task was the g_assignedtasks[] list, then a context
* swith will occur.
*/
if (switched && task_state != TSTATE_TASK_READYTORUN)
{
/* The new btcb was added at the head of the ready-to-run list. It
* is now the new active task!
*
* Inform the instrumentation logic that we are switching tasks.
*/
sched_note_switch(rtcb, btcb);
/* Assign the CPU and set the running state */
DEBUGASSERT(task_state == TSTATE_TASK_RUNNING);
btcb->cpu = cpu;
btcb->task_state = TSTATE_TASK_RUNNING;
/* Adjust global pre-emption controls. */
if (btcb->lockcount > 0)
{
g_cpu_lockset |= (1 << cpu);
g_cpu_schedlock = SP_LOCKED;
}
else
{
g_cpu_lockset &= ~(1 << cpu);
if (g_cpu_lockset == 0)
{
g_cpu_schedlock = SP_UNLOCKED;
}
}
/* If the following task is not assigned to this CPU, then it must
* be moved to the g_readytorun list. Since it cannot be at the
* head of the list, we can do this without invoking any heavy
* lifting machinery.
*/
next = (FAR dq_queue_t *)btcb->flink;
ASSERT(!rtcb->lockcount && next != NULL);
if ((btcb->flags & TCB_FLAG_CPU_ASSIGNED) != 0)
{
next->task_state = TSTATE_TASK_ASSIGNED;
}
else
{
/* Remove the task from the assigned task list */
dq_rem((FAR dq_entry_t *)next, tasklist);
/* Add the task to the g_readytorun list. It may be
* assigned to a different CPU the next time that it runs.
*/
next->task_state = TSTATE_TASK_READYTORUN;
(void)sched_addprioritized(btcb, &g_readytorun);
}
doswitch = true;
}
else
{
/* The new btcb was added either (1) in the middle of the assigned
* task list (the btcb->cpu field is already valid) or (2) was
* added to the ready-to-run list (the btcb->cpu field does not
* matter). Either way, it won't be running.
*/
DEBUGASSERT(task_state != TSTATE_TASK_RUNNING);
btcb->task_state = TSTATE_TASK_READYTORUN;
doswitch = false;
}
}
return doswitch;
}
#endif /* CONFIG_SMP */

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/sched/sched_lock.c
*
* Copyright (C) 2007, 2009 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009, 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -45,6 +45,81 @@
#include <nuttx/arch.h>
#include "sched/sched.h"
/****************************************************************************
* Public Data
****************************************************************************/
/* Pre-emption is disabled via the interface sched_lock(). sched_lock()
* works by preventing context switches from the currently executing tasks.
* This prevents other tasks from running (without disabling interrupts) and
* gives the currently executing task exclusive access to the (single) CPU
* resources. Thus, sched_lock() and its companion, sched_unlcok(), are
* used to implement some critical sections.
*
* In the single CPU case, Pre-emption is disabled using a simple lockcount
* in the TCB. When the scheduling is locked, the lockcount is incremented;
* when the scheduler is unlocked, the lockcount is decremented. If the
* lockcount for the task at the head of the g_readytorun list has a
* lockcount > 0, then pre-emption is disabled.
*
* No special protection is required since only the executing task can
* modify its lockcount.
*/
#ifdef CONFIG_SMP
/* In the multiple CPU, SMP case, disabling context switches will not give a
* task exclusive access to the (multiple) CPU resources (at least without
* stopping the other CPUs): Even though pre-emption is disabled, other
* threads will still be executing on the other CPUS.
*
* There are additional rules for this multi-CPU case:
*
* 1. There is a global lock count 'g_cpu_lockset' that includes a bit for
* each CPU: If the bit is '1', then the corresponding CPU has the
* scheduler locked; if '0', then the CPU does not have the scheduler
* locked.
* 2. Scheduling logic would set the bit associated with the cpu in
* 'g_cpu_lockset' when the TCB at the head of the g_assignedtasks[cpu]
* list transitions has 'lockcount' > 0. This might happen when sched_lock()
* is called, or after a context switch that changes the TCB at the
* head of the g_assignedtasks[cpu] list.
* 3. Similarly, the cpu bit in the global 'g_cpu_lockset' would be cleared
* when the TCB at the head of the g_assignedtasks[cpu] list has
* 'lockcount' == 0. This might happen when sched_unlock() is called, or
* after a context switch that changes the TCB at the head of the
* g_assignedtasks[cpu] list.
* 4. Modification of the global 'g_cpu_lockset' must be protected by a
* spinlock, 'g_cpu_schedlock'. That spinlock would be taken when
* sched_lock() is called, and released when sched_unlock() is called.
* This assures that the scheduler does enforce the critical section.
* NOTE: Because of this spinlock, there should never be more than one
* bit set in 'g_cpu_lockset'; attempts to set additional bits should
* be cause the CPU to block on the spinlock. However, additional bits
* could get set in 'g_cpu_lockset' due to the context switches on the
* various CPUs.
* 5. Each the time the head of a g_assignedtasks[] list changes and the
* scheduler modifies 'g_cpu_lockset', it must also set 'g_cpu_schedlock'
* depending on the new state of 'g_cpu_lockset'.
* 5. Logic that currently uses the currently running tasks lockcount
* instead uses the global 'g_cpu_schedlock'. A value of SP_UNLOCKED
* means that no CPU has pre-emption disabled; SP_LOCKED means that at
* least one CPU has pre-emption disabled.
*/
volatile spinlock_t g_cpu_schedlock;
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 32)
volatile uint32_t g_cpu_lockset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
#endif /* CONFIG_SMP */
/****************************************************************************
* Public Functions
****************************************************************************/
@ -71,15 +146,61 @@ int sched_lock(void)
{
FAR struct tcb_s *rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during
* early boot-up phases, and (2) sched_lock() should have no
* effect if called from the interrupt level.
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/
if (rtcb && !up_interrupt_context())
{
ASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
rtcb->lockcount++;
/* Catch attempts to increment the lockcount beyound the range of the
* integer type.
*/
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
#ifdef CONFIG_SMP
/* We must hold the lock on this CPU before we increment the lockcount
* for the first time. Holding the lock is sufficient to lockout context
* switching.
*/
if (rtcb->lockcount == 0)
{
/* We don't have the scheduler locked. But logic running on a
* different CPU may have the scheduler locked. It is not
* possible for some other task on this CPU to have the scheduler
* locked (or we would not be executing!).
*
* If the scheduler is locked on another CPU, then we for the lock.
*/
spin_lock(&g_cpu_schedlock);
/* Set a bit in g_cpu_lockset to indicate that this CPU holds the
* scheduler lock. This is mostly for debug purposes but should
* also handle few cornercases during context switching.
*/
g_cpu_lockset |= (1 << this_cpu());
}
else
{
/* If this thread already has the scheduler locked, then
* g_cpu_schedlock() should indicate that the scheduler is locked
* and g_cpu_lockset should include the bit setting for this CPU.
*/
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
(g_cpu_lockset & (1 << this_cpu())) != 0);
}
#endif
/* A counter is used to support locking. This allows nested lock
* operations on this thread (on any CPU)
*/
rtcb->lockcount++;
}
return OK;

View File

@ -107,6 +107,29 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
ntcb = (FAR struct tcb_s *)rtcb->flink;
DEBUGASSERT(ntcb != NULL);
#ifdef CONFIG_SMP
/* Will pre-emption be disabled after the switch? */
if (ntcb->lockcount > 0)
{
/* Yes... make sure that scheduling logic knows about this */
g_cpu_lockset |= (1 << this_cpu());
g_cpu_schedlock = SP_LOCKED;
}
else
{
/* No.. we may need to perform release our hold on the lock.
*
* REVISIT: It might be possible for two CPUs to hold the logic in
* some strange cornercases like:
*/
g_cpu_lockset &= ~(1 << this_cpu());
g_cpu_schedlock = ((g_cpu_lockset == 0) ? SP_UNLOCKED : SP_LOCKED);
}
#endif
/* Inform the instrumentation layer that we are switching tasks */
sched_note_switch(rtcb, ntcb);

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/sched/sched_roundrobin.c
*
* Copyright (C) 2007, 2009, 2014-2015 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009, 2014-2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -124,7 +124,7 @@ uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
*/
ret = tcb->timeslice;
if (tcb->timeslice <= 0 && tcb->lockcount == 0)
if (tcb->timeslice <= 0 && !sched_islocked(tcb))
{
/* We will also suppress context switches if we were called via one
* of the unusual cases handled by sched_timer_reasses(). In that

View File

@ -452,7 +452,7 @@ static void sporadic_budget_expire(int argc, wdparm_t arg1, ...)
* this operation is needed.
*/
if (tcb->lockcount > 0)
if (sched_islocked(tcb))
{
DEBUGASSERT((mrepl->flags && SPORADIC_FLAG_ALLOCED) != 0 &&
sporadic->nrepls > 0);
@ -600,7 +600,7 @@ static void sporadic_replenish_expire(int argc, wdparm_t arg1, ...)
* this operation is needed.
*/
if (tcb->lockcount > 0)
if (sched_islocked(tcb))
{
/* Set the timeslice to the magic value */
@ -1199,7 +1199,7 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
/* Does the thread have the scheduler locked? */
sporadic = tcb->sporadic;
if (tcb->lockcount > 0)
if (sched_islocked(tcb))
{
/* Yes... then we have no option but to give the thread more
* time at the higher priority. Dropping the priority could

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/sched/sched_unlock.c
*
* Copyright (C) 2007, 2009, 2014 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009, 2014, 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -91,6 +91,24 @@ int sched_unlock(void)
{
rtcb->lockcount = 0;
#ifdef CONFIG_SMP
/* The lockcount has decremented to zero and we need to perform
* release our hold on the lock.
*
* REVISIT: It might be possible for two CPUs to hold the logic in
* some strange cornercases like:
*/
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
(g_cpu_lockset & (1 << this_cpu())) != 0);
g_cpu_lockset &= ~(1 << this_cpu());
if (g_cpu_lockset == 0)
{
spin_unlock(g_cpu_schedlock);
}
#endif
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
*/

View File

@ -56,7 +56,7 @@
/* REVISIT: What happens if a thread taks a spinlock while running on one
* CPU, but is suspended, then reassigned to another CPU where it runs and
* eventually calls spinunlock(). One solution might be to lock a thread to
* eventually calls spin_unlock(). One solution might be to lock a thread to
* a CPU if it holds a spinlock. That would assure that it never runs on
* any other CPU and avoids such complexities.
*/
@ -68,7 +68,7 @@
****************************************************************************/
/****************************************************************************
* Name: spinlock_initialize
* Name: spin_initialize
*
* Description:
* Initialize a spinlock object to its initial, unlocked state.
@ -81,7 +81,7 @@
*
****************************************************************************/
void spinlock_initialize(FAR struct spinlock_s *lock)
void spin_initialize(FAR struct spinlock_s *lock)
{
DEBUGASSERT(lock != NULL);
@ -93,12 +93,16 @@ void spinlock_initialize(FAR struct spinlock_s *lock)
}
/****************************************************************************
* Name: spinlock
* Name: spin_lock
*
* Description:
* If this CPU does not already hold the spinlock, then loop until the
* spinlock is successfully locked.
*
* This implementation is non-reentrant and is prone to deadlocks in
* the case that any logic on the same CPU attempts to take the lock
* more than one
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
@ -111,7 +115,40 @@ void spinlock_initialize(FAR struct spinlock_s *lock)
*
****************************************************************************/
void spinlock(FAR struct spinlock_s *lock)
void spin_lock(FAR volatile spinlock_t *lock)
{
while (up_testset(lock) == SP_LOCKED)
{
sched_yield();
}
}
/****************************************************************************
* Name: spin_lockr
*
* Description:
* If this CPU does not already hold the spinlock, then loop until the
* spinlock is successfully locked.
*
* This implementation is re-entrant in the sense that it can called
* numerous times from the same CPU without blocking. Of course,
* spin_unlock() must be called the same number of times. NOTE: the
* thread that originallly took the look may be executing on a different
* CPU when it unlocks the spinlock.
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* None. When the function returns, the spinlock was successfully locked
* by this CPU.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_lockr(FAR struct spinlock_s *lock)
{
#ifdef CONFIG_SMP
irqstate_t flags;
@ -134,7 +171,7 @@ void spinlock(FAR struct spinlock_s *lock)
{
#ifdef CONFIG_SPINLOCK_LOCKDOWN
/* REVISIT: What happens if this thread is suspended, then reassigned
* to another CPU where it runs and eventually calls spinunlock().
* to another CPU where it runs and eventually calls spin_unlock().
* One solution might be to lock a thread to a CPU if it holds a
* spinlock. That would assure that it never runs on any other CPU
* and avoids such complexities.
@ -178,7 +215,7 @@ void spinlock(FAR struct spinlock_s *lock)
}
/****************************************************************************
* Name: spinunlock
* Name: spin_unlockr
*
* Description:
* Release one count on a spinlock.
@ -194,7 +231,7 @@ void spinlock(FAR struct spinlock_s *lock)
*
****************************************************************************/
void spinunlock(FAR struct spinlock_s *lock)
void spin_unlockr(FAR struct spinlock_s *lock)
{
#ifdef CONFIG_SMP
irqstate_t flags;
@ -209,7 +246,7 @@ void spinunlock(FAR struct spinlock_s *lock)
#ifdef CONFIG_SPINLOCK_LOCKDOWN
/* REVISIT: What happens if this thread took the lock on a different CPU,
* was suspended, then reassigned to this CPU where it runs and eventually
* calls spinunlock(). One solution might be to lock a thread to a CPU if
* calls spin_unlock(). One solution might be to lock a thread to a CPU if
* it holds a spinlock. That would assure that it never runs on any other
* CPU and avoids such complexities.
*/
@ -221,6 +258,8 @@ void spinunlock(FAR struct spinlock_s *lock)
if (lock->sp_cpu == cpu)
#else
/* The alternative is to allow the lock to be released from any CPU */
DEBUGASSERT(lock != NULL && lock->sp-lock = SP_LOCKED &&
lock->sp_count > 0);
#endif

View File

@ -1,7 +1,7 @@
/****************************************************************************
* sched/task/task_exit.c
*
* Copyright (C) 2008-2009, 2012-2014 Gregory Nutt. All rights reserved.
* Copyright (C) 2008-2009, 2012-2014, 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -107,6 +107,14 @@ int task_exit(void)
*/
rtcb->lockcount++;
#ifdef CONFIG_SMP
/* Make sure that the system knows about the locked state */
g_cpu_schedlock = SP_LOCKED;
g_cpu_lockset |= (1 << this_cpu());
#endif
rtcb->task_state = TSTATE_TASK_READYTORUN;
/* Move the TCB to the specified blocked task list and delete it. Calling
@ -137,5 +145,19 @@ int task_exit(void)
*/
rtcb->lockcount--;
#ifdef CONFIG_SMP
if (rtcb->lockcount == 0)
{
/* Make sure that the system knows about the unlocked state */
g_cpu_lockset &= ~(1 << this_cpu());
if (g_cpu_lockset == 0)
{
g_cpu_schedlock = SP_UNLOCKED;
}
}
#endif
return ret;
}