Fix a contention problem in the previous critical section design

This commit is contained in:
Gregory Nutt 2016-02-15 08:50:20 -06:00
parent c7df82147f
commit 3c46fa3f9f
6 changed files with 49 additions and 51 deletions

View File

@ -92,7 +92,25 @@ extern "C"
#define EXTERN extern #define EXTERN extern
#endif #endif
/****************************************************************************
* Name: irq_initialize
*
* Description:
* Configure the IRQ subsystem
*
****************************************************************************/
void weak_function irq_initialize(void); void weak_function irq_initialize(void);
/****************************************************************************
* Name: irq_unexpected_isr
*
* Description:
* An interrupt has been received for an IRQ that was never registered
* with the system.
*
****************************************************************************/
int irq_unexpected_isr(int irq, FAR void *context); int irq_unexpected_isr(int irq, FAR void *context);
#undef EXTERN #undef EXTERN

View File

@ -39,7 +39,6 @@
#include <nuttx/config.h> #include <nuttx/config.h>
#include <nuttx/sched.h>
#include <nuttx/spinlock.h> #include <nuttx/spinlock.h>
#include <arch/irq.h> #include <arch/irq.h>
@ -120,15 +119,12 @@ irqstate_t enter_critical_section(void)
* We must avoid that case where a context occurs between taking the * We must avoid that case where a context occurs between taking the
* g_cpu_irqlock and disabling interrupts. Also interrupts disables * g_cpu_irqlock and disabling interrupts. Also interrupts disables
* must follow a stacked order. We cannot other context switches to * must follow a stacked order. We cannot other context switches to
* re-order the enabling/disabling of interrupts. We can both by * re-order the enabling/disabling of interrupts.
* locking the scheduler while interrupts are disabled.
* *
* NOTE: that sched_lock() also uses a spinlock. We will avoid * The scheduler accomplishes this by treating the irqcount like
* deadlocks by assuring that the scheduler spinlock is always * lockcount: Both will disable pre-emption.
* taken before the IRQ spinlock.
*/ */
sched_lock();
spin_lock(&g_cpu_irqlock); spin_lock(&g_cpu_irqlock);
rtcb->irqcount = 1; rtcb->irqcount = 1;
@ -174,22 +170,23 @@ void leave_critical_section(irqstate_t flags)
} }
else else
{ {
/* NO.. Release the spinlock to allow other access. /* Release any ready-to-run tasks that have collected in
* g_pendingtasks if the scheduler is not locked.
* *
* REVISIT: There is a cornercase where multiple CPUs may think * NOTE: This operation has a very high likelihood of causing
* they are the holder of the IRQ spinlock. We will need to disable * this task to be switched out!
* the scheduler to assure that the following operation is atomic
* Hmmm... but that could cause a deadlock! What to do? Perhaps
* an array of booleans instead of a bitset?
*/ */
if (g_pendingtasks.head != NULL && rtcb->lockcount <= 0)
{
up_release_pending();
}
/* NO.. Release the spinlock to allow other access. */
g_cpu_irqset &= ~(1 << this_cpu()); g_cpu_irqset &= ~(1 << this_cpu());
rtcb->irqcount = 0; rtcb->irqcount = 0;
spin_unlock(g_cpu_irqlock); spin_unlock(g_cpu_irqlock);
/* And re-enable pre-emption */
sched_unlock();
} }
/* Restore the previous interrupt state which may still be interrupts /* Restore the previous interrupt state which may still be interrupts

View File

@ -45,26 +45,6 @@
#include "irq/irq.h" #include "irq/irq.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Type Declarations
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Private Variables
****************************************************************************/
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/**************************************************************************** /****************************************************************************
* Public Functions * Public Functions
****************************************************************************/ ****************************************************************************/

View File

@ -86,10 +86,12 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
/* Check if pre-emption is disabled for the current running task and if /* Check if pre-emption is disabled for the current running task and if
* the new ready-to-run task would cause the current running task to be * the new ready-to-run task would cause the current running task to be
* pre-empted. * pre-empted. NOTE that IRQs disabled implies that pre-emption is
* also disabled.
*/ */
if (rtcb->lockcount && rtcb->sched_priority < btcb->sched_priority) if ((rtcb->lockcount > 0 || rtcb->irqcount > 0) &&
rtcb->sched_priority < btcb->sched_priority)
{ {
/* Yes. Preemption would occur! Add the new ready-to-run task to the /* Yes. Preemption would occur! Add the new ready-to-run task to the
* g_pendingtasks task list for now. * g_pendingtasks task list for now.
@ -112,7 +114,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
* is now the new active task! * is now the new active task!
*/ */
ASSERT(!rtcb->lockcount && btcb->flink != NULL); ASSERT(rtcb->lockcount == 0 && btcb->flink != NULL);
btcb->task_state = TSTATE_TASK_RUNNING; btcb->task_state = TSTATE_TASK_RUNNING;
btcb->flink->task_state = TSTATE_TASK_READYTORUN; btcb->flink->task_state = TSTATE_TASK_READYTORUN;

View File

@ -73,11 +73,7 @@ int sched_unlock(void)
if (rtcb && !up_interrupt_context()) if (rtcb && !up_interrupt_context())
{ {
/* Prevent context switches throughout the following. /* Prevent context switches throughout the following. */
*
* REVISIT: This is awkward. In the SMP case, enter_critical_section
* increments the lockcount!
*/
irqstate_t flags = enter_critical_section(); irqstate_t flags = enter_critical_section();
@ -99,9 +95,6 @@ int sched_unlock(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* The lockcount has decremented to zero and we need to perform /* The lockcount has decremented to zero and we need to perform
* release our hold on the lock. * release our hold on the lock.
*
* REVISIT: It might be possible for two CPUs to hold the logic in
* some strange cornercases like:
*/ */
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED && DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
@ -115,10 +108,18 @@ int sched_unlock(void)
#endif #endif
/* Release any ready-to-run tasks that have collected in /* Release any ready-to-run tasks that have collected in
* g_pendingtasks. * g_pendingtasks. In the SMP case, the scheduler remains
* locked if interrupts are disabled.
*
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
*/ */
if (g_pendingtasks.head) #ifdef CONFIG_SMP
if (g_pendingtasks.head != NULL)
#else
if (g_pendingtasks.head != NULL && rtcb->irqcount <= 0)
#endif
{ {
up_release_pending(); up_release_pending();
} }

View File

@ -131,7 +131,7 @@ int task_exit(void)
* task list now * task list now
*/ */
if (g_pendingtasks.head) if (g_pendingtasks.head != NULL)
{ {
(void)sched_mergepending(); (void)sched_mergepending();
} }