SMP: Add funtions to perform atomic bit set/clear operations; fix two errors in SMP macros

This commit is contained in:
Gregory Nutt 2016-02-17 13:20:01 -06:00
parent 8ac12839c3
commit b50325bb38
11 changed files with 177 additions and 101 deletions

@ -1 +1 @@
Subproject commit ff5400544e8ab499fcf20a1a88ebec05c9cbd2e9
Subproject commit af52276c2aa09944c1c2f158cdb05e7eb53c1659

View File

@ -73,6 +73,18 @@ struct spinlock_s
#endif
};
/* This is the smallest integer type that will not a bitset of all CPUs */
#if (CONFIG_SMP_NCPUS <= 8)
typedef volatile uint8_t cpuset_t;
#elif (CONFIG_SMP_NCPUS <= 16)
typedef volatile uint16_t cpuset_t;
#elif (CONFIG_SMP_NCPUS <= 32)
typedef volatile uint32_t cpuset_t;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
@ -202,7 +214,7 @@ void spin_lockr(FAR struct spinlock_s *lock);
****************************************************************************/
/* void spin_unlock(FAR spinlock_t *lock); */
#define spin_unlock(l) do { (l) = SP_UNLOCKED; } while (0)
#define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0)
/****************************************************************************
* Name: spin_unlockr
@ -238,7 +250,7 @@ void spin_unlockr(FAR struct spinlock_s *lock);
****************************************************************************/
/* bool spin_islocked(FAR spinlock_t lock); */
#define spin_islocked(l) ((l) == SP_LOCKED)
#define spin_islocked(l) (*(l) == SP_LOCKED)
/****************************************************************************
* Name: spin_islockedr
@ -257,5 +269,47 @@ void spin_unlockr(FAR struct spinlock_s *lock);
/* bool spin_islockedr(FAR struct spinlock_s *lock); */
#define spin_islockedr(l) ((l)->sp_lock == SP_LOCKED)
/****************************************************************************
* Name: spin_setbit
*
* Description:
* Makes setting a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock lock protecting the set
* orlock - Will be set to SP_LOCKED while holding setlock
*
* Returned Value:
* None
*
****************************************************************************/
void spin_setbit(FAR volatile cpuset_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock);
/****************************************************************************
* Name: spin_clrbit
*
* Description:
* Makes clearing a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock lock protecting the set
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
*
* Returned Value:
* None
*
****************************************************************************/
void spin_clrbit(FAR volatile cpuset_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock);
#endif /* CONFIG_SPINLOCK */
#endif /* __INCLUDE_NUTTX_SPINLOCK_H */

View File

@ -63,21 +63,12 @@ extern FAR xcpt_t g_irqvector[NR_IRQS+1];
* disabled.
*/
extern spinlock_t g_cpu_irqlock;
extern volatile spinlock_t g_cpu_irqlock;
/* Used to keep track of which CPU(s) hold the IRQ lock. There really should
* only be one.
*/
/* Used to keep track of which CPU(s) hold the IRQ lock. */
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 32)
volatile uint32_t g_cpu_irqset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
extern volatile spinlock_t g_cpu_irqsetlock;
extern volatile cpuset_t g_cpu_irqset;
#endif
/****************************************************************************

View File

@ -54,21 +54,12 @@
* disabled.
*/
spinlock_t g_cpu_irqlock = SP_UNLOCKED;
volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. There really should
* only be one.
*/
/* Used to keep track of which CPU(s) hold the IRQ lock. */
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 32)
volatile uint32_t g_cpu_irqset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
volatile spinlock_t g_cpu_irqsetlock;
volatile cpuset_t g_cpu_irqset;
/****************************************************************************
* Public Functions
@ -125,10 +116,9 @@ irqstate_t enter_critical_section(void)
* lockcount: Both will disable pre-emption.
*/
spin_lock(&g_cpu_irqlock);
spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
rtcb->irqcount = 1;
g_cpu_irqset |= (1 << this_cpu());
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
/* Note that we have entered the critical section */
@ -186,16 +176,13 @@ void leave_critical_section(irqstate_t flags)
*/
rtcb->irqcount = 0;
g_cpu_irqset &= ~(1 << this_cpu());
spin_clrbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
/* Have all CPUs release the lock? */
if (g_cpu_irqset == 0)
if (!spin_islocked(&g_cpu_irqlock))
{
/* Unlock the IRQ spinlock */
spin_unlock(g_cpu_irqlock);
/* Check if there are pending tasks and that pre-emption is
* also enabled.
*/

View File

@ -355,15 +355,10 @@ extern volatile uint32_t g_cpuload_total;
extern volatile spinlock_t g_cpu_schedlock;
#if (CONFIG_SMP_NCPUS <= 8)
extern volatile uint8_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 16)
extern volatile uint16_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 32)
extern volatile uint32_t g_cpu_lockset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile spinlock_t g_cpu_locksetlock;
extern volatile cpuset_t g_cpu_lockset;
#endif /* CONFIG_SMP */
@ -423,7 +418,7 @@ void sched_sporadic_lowpriority(FAR struct tcb_s *tcb);
#ifdef CONFIG_SMP
int sched_cpu_select(void);
# define sched_islocked(tcb) spin_islocked(g_cpu_schedlock)
# define sched_islocked(tcb) spin_islocked(&g_cpu_schedlock)
#else
# define sched_islocked(tcb) ((tcb)->lockcount > 0)
# define sched_cpu_select (0)

View File

@ -229,7 +229,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
* start running the task. Be we cannot do that if pre-emption is disable.
*/
if (spin_islocked(g_cpu_schedlock) && task_state == TSTATE_TASK_RUNNING)
if (spin_islocked(&g_cpu_schedlock) && task_state == TSTATE_TASK_RUNNING)
{
/* Preemption would occur! Add the new ready-to-run task to the
* g_pendingtasks task list for now.
@ -304,16 +304,13 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
if (btcb->lockcount > 0)
{
g_cpu_lockset |= (1 << cpu);
g_cpu_schedlock = SP_LOCKED;
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
}
else
{
g_cpu_lockset &= ~(1 << cpu);
if (g_cpu_lockset == 0)
{
g_cpu_schedlock = SP_UNLOCKED;
}
spin_clrbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
}
/* Adjust global IRQ controls. If irqcount is greater than zero,
@ -322,16 +319,13 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
if (btcb->irqcount > 0)
{
g_cpu_irqset |= (1 << cpu);
g_cpu_irqlock = SP_LOCKED;
spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
else
{
g_cpu_irqset &= ~(1 << cpu);
if (g_cpu_irqset == 0)
{
g_cpu_irqlock = SP_UNLOCKED;
}
spin_clrbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
/* If the following task is not assigned to this CPU, then it must

View File

@ -106,17 +106,12 @@
* least one CPU has pre-emption disabled.
*/
volatile spinlock_t g_cpu_schedlock;
volatile spinlock_t g_cpu_schedlock = SP_UNLOCKED;
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 32)
volatile uint32_t g_cpu_lockset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_locksetlock;
volatile cpuset_t g_cpu_lockset;
#endif /* CONFIG_SMP */
@ -175,14 +170,8 @@ int sched_lock(void)
* If the scheduler is locked on another CPU, then we for the lock.
*/
spin_lock(&g_cpu_schedlock);
/* Set a bit in g_cpu_lockset to indicate that this CPU holds the
* scheduler lock. This is mostly for debug purposes but should
* also handle few cornercases during context switching.
*/
g_cpu_lockset |= (1 << this_cpu());
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
}
else
{

View File

@ -117,15 +117,15 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
/* Yes... make sure that scheduling logic knows about this */
g_cpu_lockset |= (1 << this_cpu());
g_cpu_schedlock = SP_LOCKED;
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
}
else
{
/* No.. we may need to perform release our hold on the lock. */
g_cpu_lockset &= ~(1 << this_cpu());
g_cpu_schedlock = ((g_cpu_lockset == 0) ? SP_UNLOCKED : SP_LOCKED);
spin_clrbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
}
/* Interrupts be disabled after the switch. If irqcount is greater
@ -136,15 +136,15 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
/* Yes... make sure that scheduling logic knows about this */
g_cpu_irqset |= (1 << this_cpu());
g_cpu_irqlock = SP_LOCKED;
spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
else
{
/* No.. we may need to perform release our hold on the lock. */
g_cpu_irqset &= ~(1 << this_cpu());
g_cpu_irqlock = ((g_cpu_irqset == 0) ? SP_UNLOCKED : SP_LOCKED);
spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
#endif

View File

@ -107,11 +107,8 @@ int sched_unlock(void)
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
(g_cpu_lockset & (1 << this_cpu())) != 0);
g_cpu_lockset &= ~(1 << this_cpu());
if (g_cpu_lockset == 0)
{
spin_unlock(g_cpu_schedlock);
}
spin_clrbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
#endif
/* Release any ready-to-run tasks that have collected in

View File

@ -295,4 +295,76 @@ void spin_unlockr(FAR struct spinlock_s *lock)
#endif /* CONFIG_SMP */
}
/****************************************************************************
* Name: spin_setbit
*
* Description:
* Makes setting a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock lock protecting the set
* orlock - Will be set to SP_LOCKED while holding setlock
*
* Returned Value:
* None
*
****************************************************************************/
void spin_setbit(FAR volatile cpuset_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock)
{
/* First, get the 'setlock' spinlock */
spin_lock(setlock);
/* Then set the bit and mark the 'orlock' as locked */
*set |= (1 << cpu);
*orlock = SP_LOCKED;
/* Release the 'setlock' */
spin_unlock(setlock);
}
/****************************************************************************
* Name: spin_clrbit
*
* Description:
* Makes clearing a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock lock protecting the set
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
*
* Returned Value:
* None
*
****************************************************************************/
void spin_clrbit(FAR volatile cpuset_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock)
{
/* First, get the 'setlock' spinlock */
spin_lock(setlock);
/* Then clear the bit in the CPU set. Set/clear the 'orlock' depending
* upon the resulting state of the CPU set.
*/
*set &= ~(1 << cpu);
*orlock = (*set != 0) ? SP_LOCKED : SP_UNLOCKED;
/* Release the 'setlock' */
spin_unlock(setlock);
}
#endif /* CONFIG_SPINLOCK */

View File

@ -111,8 +111,8 @@ int task_exit(void)
#ifdef CONFIG_SMP
/* Make sure that the system knows about the locked state */
g_cpu_schedlock = SP_LOCKED;
g_cpu_lockset |= (1 << this_cpu());
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
#endif
rtcb->task_state = TSTATE_TASK_READYTORUN;
@ -151,11 +151,8 @@ int task_exit(void)
{
/* Make sure that the system knows about the unlocked state */
g_cpu_lockset &= ~(1 << this_cpu());
if (g_cpu_lockset == 0)
{
g_cpu_schedlock = SP_UNLOCKED;
}
spin_clrbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
}
#endif