Fix a contention problem in the previous critical section design

This commit is contained in:
Gregory Nutt 2016-02-15 08:50:20 -06:00
parent c7df82147f
commit 3c46fa3f9f
6 changed files with 49 additions and 51 deletions

View File

@ -92,7 +92,25 @@ extern "C"
#define EXTERN extern
#endif
/****************************************************************************
* Name: irq_initialize
*
* Description:
* Configure the IRQ subsystem
*
****************************************************************************/
void weak_function irq_initialize(void);
/****************************************************************************
* Name: irq_unexpected_isr
*
* Description:
* An interrupt has been received for an IRQ that was never registered
* with the system.
*
****************************************************************************/
int irq_unexpected_isr(int irq, FAR void *context);
#undef EXTERN

View File

@ -39,7 +39,6 @@
#include <nuttx/config.h>
#include <nuttx/sched.h>
#include <nuttx/spinlock.h>
#include <arch/irq.h>
@ -120,15 +119,12 @@ irqstate_t enter_critical_section(void)
* We must avoid that case where a context occurs between taking the
* g_cpu_irqlock and disabling interrupts. Also interrupts disables
* must follow a stacked order. We cannot other context switches to
* re-order the enabling/disabling of interrupts. We can both by
* locking the scheduler while interrupts are disabled.
* re-order the enabling/disabling of interrupts.
*
* NOTE: that sched_lock() also uses a spinlock. We will avoid
* deadlocks by assuring that the scheduler spinlock is always
* taken before the IRQ spinlock.
* The scheduler accomplishes this by treating the irqcount like
* lockcount: Both will disable pre-emption.
*/
sched_lock();
spin_lock(&g_cpu_irqlock);
rtcb->irqcount = 1;
@ -174,22 +170,23 @@ void leave_critical_section(irqstate_t flags)
}
else
{
/* NO.. Release the spinlock to allow other access.
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks if the scheduler is not locked.
*
* REVISIT: There is a cornercase where multiple CPUs may think
* they are the holder of the IRQ spinlock. We will need to disable
* the scheduler to assure that the following operation is atomic
* Hmmm... but that could cause a deadlock! What to do? Perhaps
* an array of booleans instead of a bitset?
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
*/
if (g_pendingtasks.head != NULL && rtcb->lockcount <= 0)
{
up_release_pending();
}
/* NO.. Release the spinlock to allow other access. */
g_cpu_irqset &= ~(1 << this_cpu());
rtcb->irqcount = 0;
spin_unlock(g_cpu_irqlock);
/* And re-enable pre-emption */
sched_unlock();
}
/* Restore the previous interrupt state which may still be interrupts

View File

@ -45,26 +45,6 @@
#include "irq/irq.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Private Type Declarations
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Private Variables
****************************************************************************/
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/****************************************************************************
* Public Functions
****************************************************************************/

View File

@ -86,10 +86,12 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
/* Check if pre-emption is disabled for the current running task and if
* the new ready-to-run task would cause the current running task to be
* pre-empted.
* pre-empted. NOTE that IRQs disabled implies that pre-emption is
* also disabled.
*/
if (rtcb->lockcount && rtcb->sched_priority < btcb->sched_priority)
if ((rtcb->lockcount > 0 || rtcb->irqcount > 0) &&
rtcb->sched_priority < btcb->sched_priority)
{
/* Yes. Preemption would occur! Add the new ready-to-run task to the
* g_pendingtasks task list for now.
@ -112,7 +114,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
* is now the new active task!
*/
ASSERT(!rtcb->lockcount && btcb->flink != NULL);
ASSERT(rtcb->lockcount == 0 && btcb->flink != NULL);
btcb->task_state = TSTATE_TASK_RUNNING;
btcb->flink->task_state = TSTATE_TASK_READYTORUN;

View File

@ -73,11 +73,7 @@ int sched_unlock(void)
if (rtcb && !up_interrupt_context())
{
/* Prevent context switches throughout the following.
*
* REVISIT: This is awkward. In the SMP case, enter_critical_section
* increments the lockcount!
*/
/* Prevent context switches throughout the following. */
irqstate_t flags = enter_critical_section();
@ -99,9 +95,6 @@ int sched_unlock(void)
#ifdef CONFIG_SMP
/* The lockcount has decremented to zero and we need to perform
* release our hold on the lock.
*
* REVISIT: It might be possible for two CPUs to hold the logic in
* some strange cornercases like:
*/
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
@ -115,10 +108,18 @@ int sched_unlock(void)
#endif
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
* g_pendingtasks. In the SMP case, the scheduler remains
* locked if interrupts are disabled.
*
* NOTE: This operation has a very high likelihood of causing
* this task to be switched out!
*/
if (g_pendingtasks.head)
#ifdef CONFIG_SMP
if (g_pendingtasks.head != NULL)
#else
if (g_pendingtasks.head != NULL && rtcb->irqcount <= 0)
#endif
{
up_release_pending();
}

View File

@ -131,7 +131,7 @@ int task_exit(void)
* task list now
*/
if (g_pendingtasks.head)
if (g_pendingtasks.head != NULL)
{
(void)sched_mergepending();
}