diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index dd1e1a6288..2177440c98 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -258,10 +258,8 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg); ****************************************************************************/ #ifdef CONFIG_IRQCOUNT -irqstate_t enter_critical_section_nonirq(void) noinstrument_function; irqstate_t enter_critical_section(void) noinstrument_function; #else -# define enter_critical_section_nonirq() up_irq_save() # define enter_critical_section() up_irq_save() #endif @@ -290,10 +288,8 @@ irqstate_t enter_critical_section(void) noinstrument_function; ****************************************************************************/ #ifdef CONFIG_IRQCOUNT -void leave_critical_section_nonirq(irqstate_t flags) noinstrument_function; void leave_critical_section(irqstate_t flags) noinstrument_function; #else -# define leave_critical_section_nonirq(f) up_irq_restore(f) # define leave_critical_section(f) up_irq_restore(f) #endif diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index ef7629f8af..d4b71cfc61 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -182,13 +182,24 @@ static inline_function bool irq_waitlock(int cpu) #ifdef CONFIG_SMP irqstate_t enter_critical_section(void) { + FAR struct tcb_s *rtcb; + irqstate_t ret; int cpu; - /* Verify that the system has sufficiently initialized so that the task - * lists are valid. + /* Disable interrupts. + * + * NOTE 1: Ideally this should disable interrupts on all CPUs, but most + * architectures only support disabling interrupts on the local CPU. + * NOTE 2: Interrupts may already be disabled, but we call up_irq_save() + * unconditionally because we need to return valid interrupt status in any + * event. + * NOTE 3: We disable local interrupts BEFORE taking the spinlock in order + * to prevent possible waits on the spinlock from interrupt handling on + * the local CPU. */ - DEBUGASSERT(OSINIT_TASK_READY()); +try_again: + ret = up_irq_save(); /* If called from an interrupt handler, then just take the spinlock. * If we are already in a critical section, this will lock the CPU @@ -303,114 +314,81 @@ try_again_in_irq: DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && (g_cpu_irqset & (1 << cpu)) != 0); } - - return 0; } else { - return enter_critical_section_nonirq(); - } -} - -inline_function irqstate_t enter_critical_section_nonirq(void) -{ - FAR struct tcb_s *rtcb; - irqstate_t ret; - int cpu; - - /* Disable interrupts. - * - * NOTE 1: Ideally this should disable interrupts on all CPUs, but most - * architectures only support disabling interrupts on the local CPU. - * NOTE 2: Interrupts may already be disabled, but we call up_irq_save() - * unconditionally because we need to return valid interrupt status in any - * event. - * NOTE 3: We disable local interrupts BEFORE taking the spinlock in order - * to prevent possible waits on the spinlock from interrupt handling on - * the local CPU. - */ - -try_again: - ret = up_irq_save(); - - /* Verify that the system has sufficiently initialized so that the task - * lists are valid. - */ - - DEBUGASSERT(OSINIT_TASK_READY()); - DEBUGASSERT(!up_interrupt_context()); - - /* Normal tasking environment. - * - * Get the TCB of the currently executing task on this CPU (avoid - * using this_task() which can recurse. - */ - - cpu = this_cpu(); - rtcb = current_task(cpu); - DEBUGASSERT(rtcb != NULL); - - /* Do we already have interrupts disabled? */ - - if (rtcb->irqcount > 0) - { - /* Yes... make sure that the spinlock is set and increment the - * IRQ lock count. + /* Normal tasking environment. * - * NOTE: If irqcount > 0 then (1) we are in a critical section, - * and (2) this CPU should hold the lock. + * Get the TCB of the currently executing task on this CPU (avoid + * using this_task() which can recurse. */ - DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && - (g_cpu_irqset & (1 << this_cpu())) != 0 && - rtcb->irqcount < INT16_MAX); - rtcb->irqcount++; - } - else - { - /* If we get here with irqcount == 0, then we know that the - * current task running on this CPU is not in a critical - * section. However other tasks on other CPUs may be in a - * critical section. If so, we must wait until they release - * the spinlock. - */ + cpu = this_cpu(); + rtcb = current_task(cpu); + DEBUGASSERT(rtcb != NULL); - DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0); + /* Do we already have interrupts disabled? */ - if (!irq_waitlock(cpu)) + if (rtcb->irqcount > 0) { - /* We are in a deadlock condition due to a pending pause - * request interrupt. Re-enable interrupts on this CPU - * and try again. Briefly re-enabling interrupts should - * be sufficient to permit processing the pending pause - * request. + /* Yes... make sure that the spinlock is set and increment the + * IRQ lock count. + * + * NOTE: If irqcount > 0 then (1) we are in a critical section, + * and (2) this CPU should hold the lock. */ - up_irq_restore(ret); - goto try_again; + DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && + (g_cpu_irqset & (1 << this_cpu())) != 0 && + rtcb->irqcount < INT16_MAX); + rtcb->irqcount++; } + else + { + /* If we get here with irqcount == 0, then we know that the + * current task running on this CPU is not in a critical + * section. However other tasks on other CPUs may be in a + * critical section. If so, we must wait until they release + * the spinlock. + */ - /* Then set the lock count to 1. - * - * Interrupts disables must follow a stacked order. We - * cannot other context switches to re-order the enabling - * disabling of interrupts. - * - * The scheduler accomplishes this by treating the irqcount - * like lockcount: Both will disable pre-emption. - */ + DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0); - cpu_irqlock_set(cpu); - rtcb->irqcount = 1; + if (!irq_waitlock(cpu)) + { + /* We are in a deadlock condition due to a pending pause + * request interrupt. Re-enable interrupts on this CPU + * and try again. Briefly re-enabling interrupts should + * be sufficient to permit processing the pending pause + * request. + */ - /* Note that we have entered the critical section */ + up_irq_restore(ret); + goto try_again; + } + + /* Then set the lock count to 1. + * + * Interrupts disables must follow a stacked order. We + * cannot other context switches to re-order the enabling + * disabling of interrupts. + * + * The scheduler accomplishes this by treating the irqcount + * like lockcount: Both will disable pre-emption. + */ + + cpu_irqlock_set(cpu); + rtcb->irqcount = 1; + + /* Note that we have entered the critical section */ #ifdef CONFIG_SCHED_CRITMONITOR - nxsched_critmon_csection(rtcb, true); + nxsched_critmon_csection(rtcb, true); #endif #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - sched_note_csection(rtcb, true); + sched_note_csection(rtcb, true); #endif + } } /* Return interrupt status */ @@ -422,56 +400,35 @@ try_again: irqstate_t enter_critical_section(void) { - /* Verify that the system has sufficiently initialized so that the task - * lists are valid. - */ - - DEBUGASSERT(OSINIT_TASK_READY()); - - /* Check if we were called from an interrupt handler */ - - if (!up_interrupt_context()) - { - return enter_critical_section_nonirq(); - } - - /* Return interrupt status */ - - return 0; -} - -inline_function irqstate_t enter_critical_section_nonirq(void) -{ - FAR struct tcb_s *rtcb; irqstate_t ret; /* Disable interrupts */ ret = up_irq_save(); - /* Verify that the system has sufficiently initialized so that the task - * lists are valid. - */ + /* Check if we were called from an interrupt handler */ - DEBUGASSERT(OSINIT_TASK_READY()); - DEBUGASSERT(!up_interrupt_context()); - - rtcb = this_task(); - DEBUGASSERT(rtcb != NULL); - - /* Have we just entered the critical section? Or is this a nested - * call to enter_critical_section. - */ - - DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX); - if (++rtcb->irqcount == 1) + if (!up_interrupt_context()) { + FAR struct tcb_s *rtcb = this_task(); + DEBUGASSERT(rtcb != NULL); + + /* Have we just entered the critical section? Or is this a nested + * call to enter_critical_section. + */ + + DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX); + if (++rtcb->irqcount == 1) + { + /* Note that we have entered the critical section */ + #ifdef CONFIG_SCHED_CRITMONITOR - nxsched_critmon_csection(rtcb, true); + nxsched_critmon_csection(rtcb, true); #endif #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - sched_note_csection(rtcb, true); + sched_note_csection(rtcb, true); #endif + } } /* Return interrupt status */ @@ -492,7 +449,6 @@ inline_function irqstate_t enter_critical_section_nonirq(void) #ifdef CONFIG_SMP void leave_critical_section(irqstate_t flags) { - FAR struct tcb_s *rtcb; int cpu; /* If called from an interrupt handler, then just release the @@ -525,7 +481,7 @@ void leave_critical_section(irqstate_t flags) DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && g_cpu_nestcount[cpu] == 1); - rtcb = current_task(cpu); + FAR struct tcb_s *rtcb = current_task(cpu); DEBUGASSERT(rtcb != NULL); DEBUGASSERT((g_cpu_irqset & (1 << cpu)) != 0); @@ -539,68 +495,63 @@ void leave_critical_section(irqstate_t flags) } else { - leave_critical_section_nonirq(flags); - } -} + FAR struct tcb_s *rtcb; -inline_function void leave_critical_section_nonirq(irqstate_t flags) -{ - FAR struct tcb_s *rtcb; - int cpu; + /* Get the TCB of the currently executing task on this CPU (avoid + * using this_task() which can recurse. + */ - DEBUGASSERT(OSINIT_TASK_READY()); - DEBUGASSERT(!up_interrupt_context()); + cpu = this_cpu(); + rtcb = current_task(cpu); + DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0); - /* Get the TCB of the currently executing task on this CPU (avoid - * using this_task() which can recurse. - */ + /* Normal tasking context. We need to coordinate with other + * tasks. + * + * Will we still have interrupts disabled after decrementing the + * count? + */ - cpu = this_cpu(); - rtcb = current_task(cpu); - DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0); + if (rtcb->irqcount > 1) + { + /* Yes... the spinlock should remain set */ - /* Normal tasking context. We need to coordinate with other - * tasks. - * - * Will we still have interrupts disabled after decrementing the - * count? - */ - - if (rtcb->irqcount > 1) - { - /* Yes... the spinlock should remain set */ - - DEBUGASSERT(spin_is_locked(&g_cpu_irqlock)); - rtcb->irqcount--; - } - else - { - /* No.. Note that we have left the critical section */ + DEBUGASSERT(spin_is_locked(&g_cpu_irqlock)); + rtcb->irqcount--; + } + else + { + /* No.. Note that we have left the critical section */ #ifdef CONFIG_SCHED_CRITMONITOR - nxsched_critmon_csection(rtcb, false); + nxsched_critmon_csection(rtcb, false); #endif #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - sched_note_csection(rtcb, false); + sched_note_csection(rtcb, false); #endif - /* Decrement our count on the lock. If all CPUs have - * released, then unlock the spinlock. - */ + /* Decrement our count on the lock. If all CPUs have + * released, then unlock the spinlock. + */ - DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && - (g_cpu_irqset & (1 << cpu)) != 0); + DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) && + (g_cpu_irqset & (1 << cpu)) != 0); - /* Now, possibly on return from a context switch, clear our - * count on the lock. If all CPUs have released the lock, - * then unlock the global IRQ spinlock. - */ + /* Now, possibly on return from a context switch, clear our + * count on the lock. If all CPUs have released the lock, + * then unlock the global IRQ spinlock. + */ - rtcb->irqcount = 0; - cpu_irqlock_clear(); + rtcb->irqcount = 0; + cpu_irqlock_clear(); - /* Have all CPUs released the lock? */ + /* Have all CPUs released the lock? */ + } } + /* Restore the previous interrupt state which may still be interrupts + * disabled (but we don't have a mechanism to verify that now) + */ + up_irq_restore(flags); } @@ -614,35 +565,29 @@ void leave_critical_section(irqstate_t flags) if (!up_interrupt_context()) { - leave_critical_section_nonirq(flags); - } -} + FAR struct tcb_s *rtcb = this_task(); + DEBUGASSERT(rtcb != NULL); -inline_function void leave_critical_section_nonirq(irqstate_t flags) -{ - FAR struct tcb_s *rtcb = this_task(); + /* Have we left entered the critical section? Or are we still + * nested. + */ - DEBUGASSERT(OSINIT_TASK_READY()); - DEBUGASSERT(!up_interrupt_context()); - DEBUGASSERT(rtcb != NULL); - - /* Have we left entered the critical section? Or are we still - * nested. - */ - - DEBUGASSERT(rtcb->irqcount > 0); - if (--rtcb->irqcount <= 0) - { - /* Note that we have left the critical section */ + DEBUGASSERT(rtcb->irqcount > 0); + if (--rtcb->irqcount <= 0) + { + /* Note that we have left the critical section */ #ifdef CONFIG_SCHED_CRITMONITOR - nxsched_critmon_csection(rtcb, false); + nxsched_critmon_csection(rtcb, false); #endif #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION - sched_note_csection(rtcb, false); + sched_note_csection(rtcb, false); #endif + } } + /* Restore the previous interrupt state. */ + up_irq_restore(flags); } #endif diff --git a/sched/mqueue/mq_receive.c b/sched/mqueue/mq_receive.c index ef14880ec7..1c2c26e133 100644 --- a/sched/mqueue/mq_receive.c +++ b/sched/mqueue/mq_receive.c @@ -98,7 +98,7 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg, size_t msglen, * because messages can be sent from interrupt level. */ - flags = enter_critical_section_nonirq(); + flags = enter_critical_section(); /* Get the message from the message queue */ @@ -116,7 +116,7 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg, size_t msglen, ret = nxmq_do_receive(msgq, mqmsg, msg, prio); } - leave_critical_section_nonirq(flags); + leave_critical_section(flags); return ret; } diff --git a/sched/mqueue/mq_timedreceive.c b/sched/mqueue/mq_timedreceive.c index 08c242f7f8..37e2d39eff 100644 --- a/sched/mqueue/mq_timedreceive.c +++ b/sched/mqueue/mq_timedreceive.c @@ -158,7 +158,7 @@ file_mq_timedreceive_internal(FAR struct file *mq, FAR char *msg, * because messages can be sent from interrupt level. */ - flags = enter_critical_section_nonirq(); + flags = enter_critical_section(); /* Check if the message queue is empty. If it is NOT empty, then we * will not need to start timer. @@ -233,7 +233,7 @@ file_mq_timedreceive_internal(FAR struct file *mq, FAR char *msg, /* We can now restore interrupts */ errout_in_critical_section: - leave_critical_section_nonirq(flags); + leave_critical_section(flags); return ret; } diff --git a/sched/semaphore/sem_clockwait.c b/sched/semaphore/sem_clockwait.c index f5fb894969..2bfc0479af 100644 --- a/sched/semaphore/sem_clockwait.c +++ b/sched/semaphore/sem_clockwait.c @@ -108,7 +108,7 @@ int nxsem_clockwait(FAR sem_t *sem, clockid_t clockid, * enabled while we are blocked waiting for the semaphore. */ - flags = enter_critical_section_nonirq(); + flags = enter_critical_section(); /* Try to take the semaphore without waiting. */ @@ -166,7 +166,7 @@ int nxsem_clockwait(FAR sem_t *sem, clockid_t clockid, /* We can now restore interrupts and delete the watchdog */ out: - leave_critical_section_nonirq(flags); + leave_critical_section(flags); return ret; } diff --git a/sched/semaphore/sem_tickwait.c b/sched/semaphore/sem_tickwait.c index d7ad1cdf45..76d189fd0d 100644 --- a/sched/semaphore/sem_tickwait.c +++ b/sched/semaphore/sem_tickwait.c @@ -82,7 +82,7 @@ int nxsem_tickwait(FAR sem_t *sem, uint32_t delay) * enabled while we are blocked waiting for the semaphore. */ - flags = enter_critical_section_nonirq(); + flags = enter_critical_section(); /* Try to take the semaphore without waiting. */ @@ -120,7 +120,7 @@ int nxsem_tickwait(FAR sem_t *sem, uint32_t delay) /* We can now restore interrupts */ out: - leave_critical_section_nonirq(flags); + leave_critical_section(flags); return ret; } diff --git a/sched/semaphore/sem_wait.c b/sched/semaphore/sem_wait.c index 9179290975..d515ce6ddd 100644 --- a/sched/semaphore/sem_wait.c +++ b/sched/semaphore/sem_wait.c @@ -86,7 +86,7 @@ int nxsem_wait(FAR sem_t *sem) * handler. */ - flags = enter_critical_section_nonirq(); + flags = enter_critical_section(); /* Make sure we were supplied with a valid semaphore. */ @@ -99,7 +99,7 @@ int nxsem_wait(FAR sem_t *sem) ret = nxsem_protect_wait(sem); if (ret < 0) { - leave_critical_section_nonirq(flags); + leave_critical_section(flags); return ret; } @@ -224,7 +224,7 @@ int nxsem_wait(FAR sem_t *sem) #endif } - leave_critical_section_nonirq(flags); + leave_critical_section(flags); return ret; }