diff --git a/sched/irq/irq.h b/sched/irq/irq.h index 75d18755b9..586f86904f 100644 --- a/sched/irq/irq.h +++ b/sched/irq/irq.h @@ -92,7 +92,25 @@ extern "C" #define EXTERN extern #endif +/**************************************************************************** + * Name: irq_initialize + * + * Description: + * Configure the IRQ subsystem + * + ****************************************************************************/ + void weak_function irq_initialize(void); + +/**************************************************************************** + * Name: irq_unexpected_isr + * + * Description: + * An interrupt has been received for an IRQ that was never registered + * with the system. + * + ****************************************************************************/ + int irq_unexpected_isr(int irq, FAR void *context); #undef EXTERN diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index c3467abfbb..efd0f2f7be 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -39,7 +39,6 @@ #include -#include #include #include @@ -120,15 +119,12 @@ irqstate_t enter_critical_section(void) * We must avoid that case where a context occurs between taking the * g_cpu_irqlock and disabling interrupts. Also interrupts disables * must follow a stacked order. We cannot other context switches to - * re-order the enabling/disabling of interrupts. We can both by - * locking the scheduler while interrupts are disabled. + * re-order the enabling/disabling of interrupts. * - * NOTE: that sched_lock() also uses a spinlock. We will avoid - * deadlocks by assuring that the scheduler spinlock is always - * taken before the IRQ spinlock. + * The scheduler accomplishes this by treating the irqcount like + * lockcount: Both will disable pre-emption. */ - sched_lock(); spin_lock(&g_cpu_irqlock); rtcb->irqcount = 1; @@ -174,22 +170,23 @@ void leave_critical_section(irqstate_t flags) } else { - /* NO.. Release the spinlock to allow other access. + /* Release any ready-to-run tasks that have collected in + * g_pendingtasks if the scheduler is not locked. * - * REVISIT: There is a cornercase where multiple CPUs may think - * they are the holder of the IRQ spinlock. We will need to disable - * the scheduler to assure that the following operation is atomic - * Hmmm... but that could cause a deadlock! What to do? Perhaps - * an array of booleans instead of a bitset? + * NOTE: This operation has a very high likelihood of causing + * this task to be switched out! */ + if (g_pendingtasks.head != NULL && rtcb->lockcount <= 0) + { + up_release_pending(); + } + + /* NO.. Release the spinlock to allow other access. */ + g_cpu_irqset &= ~(1 << this_cpu()); rtcb->irqcount = 0; spin_unlock(g_cpu_irqlock); - - /* And re-enable pre-emption */ - - sched_unlock(); } /* Restore the previous interrupt state which may still be interrupts diff --git a/sched/irq/irq_unexpectedisr.c b/sched/irq/irq_unexpectedisr.c index 490a6251e1..191d0a2194 100644 --- a/sched/irq/irq_unexpectedisr.c +++ b/sched/irq/irq_unexpectedisr.c @@ -45,26 +45,6 @@ #include "irq/irq.h" -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -/**************************************************************************** - * Private Type Declarations - ****************************************************************************/ - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/**************************************************************************** - * Private Variables - ****************************************************************************/ - -/**************************************************************************** - * Private Function Prototypes - ****************************************************************************/ - /**************************************************************************** * Public Functions ****************************************************************************/ diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 5a0cac5c9d..2ccc5d723b 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -86,10 +86,12 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) /* Check if pre-emption is disabled for the current running task and if * the new ready-to-run task would cause the current running task to be - * pre-empted. + * pre-empted. NOTE that IRQs disabled implies that pre-emption is + * also disabled. */ - if (rtcb->lockcount && rtcb->sched_priority < btcb->sched_priority) + if ((rtcb->lockcount > 0 || rtcb->irqcount > 0) && + rtcb->sched_priority < btcb->sched_priority) { /* Yes. Preemption would occur! Add the new ready-to-run task to the * g_pendingtasks task list for now. @@ -112,7 +114,7 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) * is now the new active task! */ - ASSERT(!rtcb->lockcount && btcb->flink != NULL); + ASSERT(rtcb->lockcount == 0 && btcb->flink != NULL); btcb->task_state = TSTATE_TASK_RUNNING; btcb->flink->task_state = TSTATE_TASK_READYTORUN; diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index 602ed81761..f8d7f79f71 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -73,11 +73,7 @@ int sched_unlock(void) if (rtcb && !up_interrupt_context()) { - /* Prevent context switches throughout the following. - * - * REVISIT: This is awkward. In the SMP case, enter_critical_section - * increments the lockcount! - */ + /* Prevent context switches throughout the following. */ irqstate_t flags = enter_critical_section(); @@ -99,9 +95,6 @@ int sched_unlock(void) #ifdef CONFIG_SMP /* The lockcount has decremented to zero and we need to perform * release our hold on the lock. - * - * REVISIT: It might be possible for two CPUs to hold the logic in - * some strange cornercases like: */ DEBUGASSERT(g_cpu_schedlock == SP_LOCKED && @@ -115,10 +108,18 @@ int sched_unlock(void) #endif /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. + * g_pendingtasks. In the SMP case, the scheduler remains + * locked if interrupts are disabled. + * + * NOTE: This operation has a very high likelihood of causing + * this task to be switched out! */ - if (g_pendingtasks.head) +#ifdef CONFIG_SMP + if (g_pendingtasks.head != NULL) +#else + if (g_pendingtasks.head != NULL && rtcb->irqcount <= 0) +#endif { up_release_pending(); } diff --git a/sched/task/task_exit.c b/sched/task/task_exit.c index a8de026ca8..ed67101bd6 100644 --- a/sched/task/task_exit.c +++ b/sched/task/task_exit.c @@ -131,7 +131,7 @@ int task_exit(void) * task list now */ - if (g_pendingtasks.head) + if (g_pendingtasks.head != NULL) { (void)sched_mergepending(); }