SMP: First cut integration of enter/leave_critical_section and the scheduler. There are some issues.

This commit is contained in:
Gregory Nutt 2016-02-15 08:06:17 -06:00
parent 4d0103f210
commit c7df82147f
7 changed files with 155 additions and 36 deletions

2
arch

@ -1 +1 @@
Subproject commit 70afd900cc88772c12618e1b099e47e20d947979
Subproject commit 839a04f989000895820af4691d9596fc4385f48b

View File

@ -64,6 +64,20 @@ extern FAR xcpt_t g_irqvector[NR_IRQS+1];
*/
extern spinlock_t g_cpu_irqlock;
/* Used to keep track of which CPU(s) hold the IRQ lock. There really should
* only be one.
*/
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 32)
volatile uint32_t g_cpu_irqset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
#endif
/****************************************************************************

View File

@ -39,6 +39,7 @@
#include <nuttx/config.h>
#include <nuttx/sched.h>
#include <nuttx/spinlock.h>
#include <arch/irq.h>
@ -56,6 +57,20 @@
spinlock_t g_cpu_irqlock = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. There really should
* only be one.
*/
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_irqset;
#elif (CONFIG_SMP_NCPUS <= 32)
volatile uint32_t g_cpu_irqset;
#else
# error SMP: Extensions needed to support this number of CPUs
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
@ -72,10 +87,22 @@ spinlock_t g_cpu_irqlock = SP_UNLOCKED;
irqstate_t enter_critical_section(void)
{
FAR struct tcb_s *rtcb = this_task();
FAR struct tcb_s *rtcb;
/* Do nothing if called from an interrupt handler */
if (up_interrupt_context())
{
/* The value returned does not matter. We assume only that it is a
* scalar here.
*/
return (irqstate_t)0;
}
/* Do we already have interrupts disabled? */
rtcb = this_task();
if (rtcb->irqcount > 0)
{
/* Yes... make sure that the spinlock is set and increment the IRQ
@ -89,13 +116,28 @@ irqstate_t enter_critical_section(void)
{
/* NO.. Take the spinlock to get exclusive access and set the lock
* count to 1.
*
* We must avoid that case where a context occurs between taking the
* g_cpu_irqlock and disabling interrupts. Also interrupts disables
* must follow a stacked order. We cannot other context switches to
* re-order the enabling/disabling of interrupts. We can both by
* locking the scheduler while interrupts are disabled.
*
* NOTE: that sched_lock() also uses a spinlock. We will avoid
* deadlocks by assuring that the scheduler spinlock is always
* taken before the IRQ spinlock.
*/
sched_lock();
spin_lock(&g_cpu_irqlock);
rtcb->irqcount = 1;
g_cpu_irqset |= (1 << this_cpu());
}
/* Then disable interrupts (if they have not already been disabeld) */
/* Then disable interrupts (they may already be disabled, be we need to
* return valid interrupt status in any event).
*/
return up_irq_save();
}
@ -111,32 +153,51 @@ irqstate_t enter_critical_section(void)
void leave_critical_section(irqstate_t flags)
{
FAR struct tcb_s *rtcb = this_task();
/* Do nothing if called from an interrupt handler */
DEBUGASSERT(rtcb->irqcount > 0);
/* Will we still have interrupts disabled after decrementing the count? */
if (rtcb->irqcount > 1)
if (!up_interrupt_context())
{
/* Yes... make sure that the spinlock is set */
FAR struct tcb_s *rtcb = this_task();
DEBUGASSERT(g_cpu_irqlock == SP_LOCKED);
rtcb->irqcount--;
DEBUGASSERT(rtcb->irqcount > 0);
/* Will we still have interrupts disabled after decrementing the
* count?
*/
if (rtcb->irqcount > 1)
{
/* Yes... make sure that the spinlock is set */
DEBUGASSERT(g_cpu_irqlock == SP_LOCKED);
rtcb->irqcount--;
}
else
{
/* NO.. Release the spinlock to allow other access.
*
* REVISIT: There is a cornercase where multiple CPUs may think
* they are the holder of the IRQ spinlock. We will need to disable
* the scheduler to assure that the following operation is atomic
* Hmmm... but that could cause a deadlock! What to do? Perhaps
* an array of booleans instead of a bitset?
*/
g_cpu_irqset &= ~(1 << this_cpu());
rtcb->irqcount = 0;
spin_unlock(g_cpu_irqlock);
/* And re-enable pre-emption */
sched_unlock();
}
/* Restore the previous interrupt state which may still be interrupts
* disabled (but we don't have a mechanism to verify that now)
*/
up_irq_restore(flags);
}
else
{
/* NO.. Take the spinlock to get exclusive access. */
rtcb->irqcount = 0;
spin_unlock(g_cpu_irqlock);
}
/* Restore the previous interrupt state which may still be interrupts
* disabled (but we don't have a mechanism to verify that now)
*/
up_irq_restore(flags);
}
#endif /* CONFIG_SMP */

View File

@ -43,6 +43,7 @@
#include <queue.h>
#include <assert.h>
#include "irq/irq.h"
#include "sched/sched.h"
/****************************************************************************
@ -326,7 +327,10 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
btcb->cpu = cpu;
btcb->task_state = TSTATE_TASK_RUNNING;
/* Adjust global pre-emption controls. */
/* Adjust global pre-emption controls. If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler
* lock.
*/
if (btcb->lockcount > 0)
{
@ -335,13 +339,31 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
}
else
{
g_cpu_lockset &= ~(1 << cpu);
g_cpu_lockset &= ~(1 << cpu);
if (g_cpu_lockset == 0)
{
g_cpu_schedlock = SP_UNLOCKED;
}
}
/* Adjust global IRQ controls. If irqcount is greater than zero,
* then this task/this CPU holds the IRQ lock
*/
if (btcb->irqcount > 0)
{
g_cpu_irqset |= (1 << cpu);
g_cpu_irqlock = SP_LOCKED;
}
else
{
g_cpu_irqset &= ~(1 << cpu);
if (g_cpu_irqset == 0)
{
g_cpu_irqlock = SP_UNLOCKED;
}
}
/* If the following task is not assigned to this CPU, then it must
* be moved to the g_readytorun list. Since it cannot be at the
* head of the list, we can do this without invoking any heavy

View File

@ -109,7 +109,7 @@
volatile spinlock_t g_cpu_schedlock;
#if (CONFIG_SMP_NCPUS <= 8)
volatile uint8_t g_cpu_lockset;
volatile uint8_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 16)
volatile uint16_t g_cpu_lockset;
#elif (CONFIG_SMP_NCPUS <= 32)

View File

@ -43,6 +43,7 @@
#include <queue.h>
#include <assert.h>
#include "irq/irq.h"
#include "sched/sched.h"
/****************************************************************************
@ -109,7 +110,9 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
DEBUGASSERT(ntcb != NULL);
#ifdef CONFIG_SMP
/* Will pre-emption be disabled after the switch? */
/* Will pre-emption be disabled after the switch? If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler lock.
*/
if (ntcb->lockcount > 0)
{
@ -120,15 +123,30 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
}
else
{
/* No.. we may need to perform release our hold on the lock.
*
* REVISIT: It might be possible for two CPUs to hold the logic in
* some strange cornercases like:
*/
/* No.. we may need to perform release our hold on the lock. */
g_cpu_lockset &= ~(1 << this_cpu());
g_cpu_schedlock = ((g_cpu_lockset == 0) ? SP_UNLOCKED : SP_LOCKED);
}
/* Interrupts be disabled after the switch. If irqcount is greater
* than zero, then this task/this CPU holds the IRQ lock
*/
if (ntcb->irqcount > 0)
{
/* Yes... make sure that scheduling logic knows about this */
g_cpu_irqset |= (1 << this_cpu());
g_cpu_irqlock = SP_LOCKED;
}
else
{
/* No.. we may need to perform release our hold on the lock. */
g_cpu_irqset &= ~(1 << this_cpu());
g_cpu_irqlock = ((g_cpu_irqset == 0) ? SP_UNLOCKED : SP_LOCKED);
}
#endif
/* Inform the instrumentation layer that we are switching tasks */

View File

@ -73,13 +73,17 @@ int sched_unlock(void)
if (rtcb && !up_interrupt_context())
{
/* Prevent context switches throughout the following */
/* Prevent context switches throughout the following.
*
* REVISIT: This is awkward. In the SMP case, enter_critical_section
* increments the lockcount!
*/
irqstate_t flags = enter_critical_section();
/* Decrement the preemption lock counter */
if (rtcb->lockcount)
if (rtcb->lockcount > 0)
{
rtcb->lockcount--;
}