2016-02-13 17:25:36 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* sched/irq/irq_csection.c
|
|
|
|
*
|
2018-02-05 20:12:36 +01:00
|
|
|
* Copyright (C) 2016-2018 Gregory Nutt. All rights reserved.
|
2016-02-13 17:25:36 +01:00
|
|
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name NuttX nor the names of its contributors may be
|
|
|
|
* used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2016-02-19 22:57:07 +01:00
|
|
|
#include <sys/types.h>
|
|
|
|
|
2016-05-18 16:21:28 +02:00
|
|
|
#include <nuttx/init.h>
|
2016-02-13 17:25:36 +01:00
|
|
|
#include <nuttx/spinlock.h>
|
2016-03-17 16:49:43 +01:00
|
|
|
#include <nuttx/sched_note.h>
|
2016-02-13 17:25:36 +01:00
|
|
|
#include <arch/irq.h>
|
|
|
|
|
|
|
|
#include "sched/sched.h"
|
|
|
|
#include "irq/irq.h"
|
|
|
|
|
2018-11-25 18:50:15 +01:00
|
|
|
#ifdef CONFIG_IRQCOUNT
|
2016-02-13 17:25:36 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Data
|
|
|
|
****************************************************************************/
|
2016-03-22 00:08:07 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2016-02-13 17:25:36 +01:00
|
|
|
/* This is the spinlock that enforces critical sections when interrupts are
|
|
|
|
* disabled.
|
|
|
|
*/
|
|
|
|
|
2016-11-26 15:47:03 +01:00
|
|
|
volatile spinlock_t g_cpu_irqlock SP_SECTION = SP_UNLOCKED;
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-02-17 20:20:01 +01:00
|
|
|
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
2016-02-15 15:06:17 +01:00
|
|
|
|
2016-11-26 15:47:03 +01:00
|
|
|
volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
|
|
|
volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
2016-11-17 13:37:24 +01:00
|
|
|
|
2016-11-17 05:45:08 +01:00
|
|
|
/* Handles nested calls to enter_critical section from interrupt handlers */
|
|
|
|
|
2017-01-13 13:48:10 +01:00
|
|
|
volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
2016-03-22 00:08:07 +01:00
|
|
|
#endif
|
2016-02-15 15:06:17 +01:00
|
|
|
|
2016-11-22 18:34:16 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: irq_waitlock
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Spin to get g_irq_waitlock, handling a known deadlock condition:
|
|
|
|
*
|
2016-11-26 14:05:27 +01:00
|
|
|
* A deadlock may occur if enter_critical_section is called from an
|
|
|
|
* interrupt handler. Suppose:
|
2016-11-22 18:34:16 +01:00
|
|
|
*
|
|
|
|
* - CPUn is in a critical section and has the g_cpu_irqlock spinlock.
|
|
|
|
* - CPUm takes an interrupt and attempts to enter the critical section.
|
|
|
|
* - It spins waiting on g_cpu_irqlock with interrupts disabled.
|
|
|
|
* - CPUn calls up_cpu_pause() to pause operation on CPUm. This will
|
|
|
|
* issue an inter-CPU interrupt to CPUm
|
|
|
|
* - But interrupts are disabled on CPUm so the up_cpu_pause() is never
|
|
|
|
* handled, causing the deadlock.
|
|
|
|
*
|
2016-11-26 14:05:27 +01:00
|
|
|
* This same deadlock can occur in the normal tasking case:
|
2016-11-22 18:34:16 +01:00
|
|
|
*
|
2016-11-26 14:05:27 +01:00
|
|
|
* - A task on CPUn enters a critical section and has the g_cpu_irqlock
|
|
|
|
* spinlock.
|
|
|
|
* - Another task on CPUm attempts to enter the critical section but has
|
|
|
|
* to wait, spinning to get g_cpu_irqlock with interrupts disabled.
|
2018-01-27 16:37:46 +01:00
|
|
|
* - The task on CPUn causes a new task to become ready-to-run and the
|
2016-11-26 14:05:27 +01:00
|
|
|
* scheduler selects CPUm. CPUm is requested to pause via a pause
|
|
|
|
* interrupt.
|
|
|
|
* - But the task on CPUm is also attempting to enter the critical
|
|
|
|
* section. Since it is spinning with interrupts disabled, CPUm cannot
|
|
|
|
* process the pending pause interrupt, causing the deadlock.
|
2016-11-26 06:04:27 +01:00
|
|
|
*
|
|
|
|
* This function detects this deadlock condition while spinning with \
|
|
|
|
* interrupts disabled.
|
|
|
|
*
|
2016-11-26 14:05:27 +01:00
|
|
|
* Input Parameters:
|
|
|
|
* cpu - The index of CPU that is trying to enter the critical section.
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* True: The g_cpu_irqlock spinlock has been taken.
|
|
|
|
* False: The g_cpu_irqlock spinlock has not been taken yet, but there is
|
|
|
|
* a pending pause interrupt request.
|
|
|
|
*
|
2016-11-26 06:04:27 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2016-11-26 14:05:27 +01:00
|
|
|
static inline bool irq_waitlock(int cpu)
|
2016-11-26 06:04:27 +01:00
|
|
|
{
|
2016-11-28 17:33:46 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
2018-02-06 16:17:28 +01:00
|
|
|
FAR struct tcb_s *tcb = current_task(cpu);
|
2016-11-28 17:33:46 +01:00
|
|
|
|
|
|
|
/* Notify that we are waiting for a spinlock */
|
|
|
|
|
|
|
|
sched_note_spinlock(tcb, &g_cpu_irqlock);
|
|
|
|
#endif
|
|
|
|
|
2016-11-26 06:04:27 +01:00
|
|
|
/* Duplicate the spin_lock() logic from spinlock.c, but adding the check
|
|
|
|
* for the deadlock condition.
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (spin_trylock(&g_cpu_irqlock) == SP_LOCKED)
|
|
|
|
{
|
|
|
|
/* Is a pause request pending? */
|
|
|
|
|
|
|
|
if (up_cpu_pausereq(cpu))
|
|
|
|
{
|
|
|
|
/* Yes.. some other CPU is requesting to pause this CPU!
|
|
|
|
* Abort the wait and return false.
|
|
|
|
*/
|
|
|
|
|
2016-11-28 17:33:46 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
|
|
|
/* Notify that we are waiting for a spinlock */
|
|
|
|
|
|
|
|
sched_note_spinabort(tcb, &g_cpu_irqlock);
|
|
|
|
#endif
|
|
|
|
|
2016-11-26 06:04:27 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SP_DSB();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We have g_cpu_irqlock! */
|
|
|
|
|
2016-11-28 17:33:46 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
|
|
|
/* Notify that we have the spinlock */
|
|
|
|
|
|
|
|
sched_note_spinlocked(tcb, &g_cpu_irqlock);
|
|
|
|
#endif
|
|
|
|
|
2016-11-26 06:04:27 +01:00
|
|
|
SP_DMB();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-02-13 17:25:36 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: enter_critical_section
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Take the CPU IRQ lock and disable interrupts on all CPUs. A thread-
|
|
|
|
* specific counter is increment to indicate that the thread has IRQs
|
|
|
|
* disabled and to support nested calls to enter_critical_section().
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2016-03-22 00:08:07 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2016-02-13 17:25:36 +01:00
|
|
|
irqstate_t enter_critical_section(void)
|
|
|
|
{
|
2016-02-15 15:06:17 +01:00
|
|
|
FAR struct tcb_s *rtcb;
|
2016-11-15 14:54:18 +01:00
|
|
|
irqstate_t ret;
|
2016-11-17 13:37:24 +01:00
|
|
|
int cpu;
|
2016-11-15 14:54:18 +01:00
|
|
|
|
|
|
|
/* Disable interrupts.
|
|
|
|
*
|
|
|
|
* NOTE 1: Ideally this should disable interrupts on all CPUs, but most
|
|
|
|
* architectures only support disabling interrupts on the local CPU.
|
|
|
|
* NOTE 2: Interrupts may already be disabled, but we call up_irq_save()
|
|
|
|
* unconditionally because we need to return valid interrupt status in any
|
|
|
|
* event.
|
|
|
|
* NOTE 3: We disable local interrupts BEFORE taking the spinlock in order
|
|
|
|
* to prevent possible waits on the spinlock from interrupt handling on
|
|
|
|
* the local CPU.
|
|
|
|
*/
|
|
|
|
|
2016-11-26 06:04:27 +01:00
|
|
|
try_again:
|
|
|
|
|
2016-11-15 14:54:18 +01:00
|
|
|
ret = up_irq_save();
|
2016-02-15 15:06:17 +01:00
|
|
|
|
2016-11-17 02:11:31 +01:00
|
|
|
/* Verify that the system has sufficiently initialized so that the task
|
|
|
|
* lists are valid.
|
2016-05-18 16:21:28 +02:00
|
|
|
*/
|
2016-02-15 15:06:17 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
if (g_os_initstate >= OSINIT_TASKLISTS)
|
2016-02-15 15:06:17 +01:00
|
|
|
{
|
2016-11-17 01:28:35 +01:00
|
|
|
/* If called from an interrupt handler, then just take the spinlock.
|
|
|
|
* If we are already in a critical section, this will lock the CPU
|
|
|
|
* in the interrupt handler. Sounds worse than it is.
|
|
|
|
*/
|
2016-03-22 00:08:07 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
if (up_interrupt_context())
|
2016-05-18 16:21:28 +02:00
|
|
|
{
|
2016-11-17 02:11:31 +01:00
|
|
|
/* We are in an interrupt handler. How can this happen?
|
2016-11-17 01:28:35 +01:00
|
|
|
*
|
2016-11-17 02:11:31 +01:00
|
|
|
* 1. We were not in a critical section when the interrupt
|
2016-11-18 14:38:16 +01:00
|
|
|
* occurred. In this case, the interrupt was entered with:
|
2016-11-17 22:36:27 +01:00
|
|
|
*
|
|
|
|
* g_cpu_irqlock = SP_UNLOCKED.
|
|
|
|
* g_cpu_nestcount = 0
|
|
|
|
* All CPU bits in g_cpu_irqset should be zero
|
|
|
|
*
|
2016-11-17 23:15:06 +01:00
|
|
|
* 2. We were in a critical section and interrupts on this
|
|
|
|
* this CPU were disabled -- this is an impossible case.
|
|
|
|
*
|
|
|
|
* 3. We were in critical section, but up_irq_save() only
|
2016-11-17 02:11:31 +01:00
|
|
|
* disabled local interrupts on a different CPU;
|
|
|
|
* Interrupts could still be enabled on this CPU.
|
2016-11-17 22:36:27 +01:00
|
|
|
*
|
|
|
|
* g_cpu_irqlock = SP_LOCKED.
|
|
|
|
* g_cpu_nestcount = 0
|
2016-11-26 06:04:27 +01:00
|
|
|
* The bit in g_cpu_irqset for this CPU should be zero
|
2016-11-17 22:36:27 +01:00
|
|
|
*
|
2016-11-18 00:30:12 +01:00
|
|
|
* 4. An extension of 3 is that we may be re-entered numerous
|
2016-11-17 23:15:06 +01:00
|
|
|
* times from the same interrupt handler. In that case:
|
2016-11-17 22:36:27 +01:00
|
|
|
*
|
|
|
|
* g_cpu_irqlock = SP_LOCKED.
|
|
|
|
* g_cpu_nestcount > 0
|
2016-11-26 06:04:27 +01:00
|
|
|
* The bit in g_cpu_irqset for this CPU should be zero
|
2016-11-18 14:38:16 +01:00
|
|
|
*
|
|
|
|
* NOTE: However, the interrupt entry conditions can change due
|
|
|
|
* to previous processing by the interrupt handler that may
|
|
|
|
* instantiate a new thread that has irqcount > 0 and may then
|
|
|
|
* set the bit in g_cpu_irqset and g_cpu_irqlock = SP_LOCKED
|
2016-11-17 01:28:35 +01:00
|
|
|
*/
|
|
|
|
|
2016-11-17 05:45:08 +01:00
|
|
|
/* Handle nested calls to enter_critical_section() from the same
|
|
|
|
* interrupt.
|
2016-05-18 16:21:28 +02:00
|
|
|
*/
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-11-17 13:37:24 +01:00
|
|
|
cpu = this_cpu();
|
2016-11-17 05:45:08 +01:00
|
|
|
if (g_cpu_nestcount[cpu] > 0)
|
|
|
|
{
|
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
|
|
|
|
g_cpu_nestcount[cpu] < UINT8_MAX);
|
|
|
|
g_cpu_nestcount[cpu]++;
|
|
|
|
}
|
2016-11-18 14:38:16 +01:00
|
|
|
|
|
|
|
/* This is the first call to enter_critical_section from the
|
|
|
|
* interrupt handler.
|
|
|
|
*/
|
|
|
|
|
2016-11-17 05:45:08 +01:00
|
|
|
else
|
|
|
|
{
|
2016-11-18 14:38:16 +01:00
|
|
|
/* Make sure that the g_cpu_irqlock() was not already set
|
|
|
|
* by previous logic on this CPU that was executed by the
|
2016-11-18 15:20:52 +01:00
|
|
|
* interrupt handler. We know that the bit in g_cpu_irqset
|
|
|
|
* for this CPU was zero on entry into the interrupt handler,
|
|
|
|
* so if it is non-zero now then we know that was the case.
|
2016-11-17 05:45:08 +01:00
|
|
|
*/
|
|
|
|
|
2016-11-18 14:38:16 +01:00
|
|
|
if ((g_cpu_irqset & (1 << cpu)) == 0)
|
|
|
|
{
|
|
|
|
/* Wait until we can get the spinlock (meaning that we are
|
2016-11-22 23:48:57 +01:00
|
|
|
* no longer blocked by the critical section).
|
2016-11-18 14:38:16 +01:00
|
|
|
*/
|
2016-11-17 05:45:08 +01:00
|
|
|
|
2016-11-26 14:05:27 +01:00
|
|
|
if (!irq_waitlock(cpu))
|
|
|
|
{
|
|
|
|
/* We are in a deadlock condition due to a pending
|
|
|
|
* pause request interrupt request. Break the
|
|
|
|
* deadlock by handling the pause interrupt now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGVERIFY(up_cpu_paused(cpu));
|
|
|
|
}
|
2016-11-18 14:38:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* In any event, the nesting count is now one */
|
2016-11-17 05:45:08 +01:00
|
|
|
|
|
|
|
g_cpu_nestcount[cpu] = 1;
|
2017-01-13 13:48:10 +01:00
|
|
|
|
|
|
|
/* Also set the CPU bit so that other CPUs will be aware that this
|
|
|
|
* CPU holds the critical section.
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
|
|
|
&g_cpu_irqlock);
|
2016-11-17 05:45:08 +01:00
|
|
|
}
|
2016-05-18 16:21:28 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-11-17 01:28:35 +01:00
|
|
|
/* Normal tasking environment. */
|
2018-02-06 16:17:28 +01:00
|
|
|
/* Get the TCB of the currently executing task on this CPU (avoid
|
|
|
|
* using this_task() which can recurse.
|
|
|
|
*/
|
2016-11-17 01:28:35 +01:00
|
|
|
|
2018-02-06 16:17:28 +01:00
|
|
|
cpu = this_cpu();
|
|
|
|
rtcb = current_task(cpu);
|
2017-08-15 01:19:27 +02:00
|
|
|
DEBUGASSERT(rtcb != NULL);
|
2016-11-17 01:28:35 +01:00
|
|
|
|
2018-02-06 16:17:28 +01:00
|
|
|
/* Do we already have interrupts disabled? */
|
|
|
|
|
2017-08-15 01:19:27 +02:00
|
|
|
if (rtcb->irqcount > 0)
|
2016-11-17 01:28:35 +01:00
|
|
|
{
|
|
|
|
/* Yes... make sure that the spinlock is set and increment the
|
|
|
|
* IRQ lock count.
|
|
|
|
*
|
|
|
|
* NOTE: If irqcount > 0 then (1) we are in a critical section, and
|
2016-11-17 02:11:31 +01:00
|
|
|
* (2) this CPU should hold the lock.
|
2016-11-17 01:28:35 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
|
2016-11-17 02:58:51 +01:00
|
|
|
(g_cpu_irqset & (1 << this_cpu())) != 0 &&
|
2016-11-17 01:28:35 +01:00
|
|
|
rtcb->irqcount < INT16_MAX);
|
|
|
|
rtcb->irqcount++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* If we get here with irqcount == 0, then we know that the
|
2016-11-17 15:03:45 +01:00
|
|
|
* current task running on this CPU is not in a critical
|
2016-11-17 01:28:35 +01:00
|
|
|
* section. However other tasks on other CPUs may be in a
|
2016-11-17 02:11:31 +01:00
|
|
|
* critical section. If so, we must wait until they release
|
2016-11-17 01:28:35 +01:00
|
|
|
* the spinlock.
|
|
|
|
*/
|
2016-11-15 15:37:58 +01:00
|
|
|
|
2016-11-17 13:37:24 +01:00
|
|
|
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
|
2016-11-26 06:04:27 +01:00
|
|
|
|
2016-11-26 14:05:27 +01:00
|
|
|
if (!irq_waitlock(cpu))
|
2016-11-26 06:04:27 +01:00
|
|
|
{
|
2016-11-26 14:05:27 +01:00
|
|
|
/* We are in a deadlock condition due to a pending pause
|
|
|
|
* request interrupt. Re-enable interrupts on this CPU
|
|
|
|
* and try again. Briefly re-enabling interrupts should
|
|
|
|
* be sufficient to permit processing the pending pause
|
|
|
|
* request.
|
2016-12-05 15:52:40 +01:00
|
|
|
*
|
|
|
|
* NOTE: This should never happen on architectures like
|
|
|
|
* the Cortex-A; the inter-CPU interrupt (SGI) is not
|
|
|
|
* maskable.
|
2016-11-26 06:04:27 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
up_irq_restore(ret);
|
|
|
|
goto try_again;
|
|
|
|
}
|
2016-11-17 01:28:35 +01:00
|
|
|
|
|
|
|
/* The set the lock count to 1.
|
|
|
|
*
|
|
|
|
* Interrupts disables must follow a stacked order. We
|
|
|
|
* cannot other context switches to re-order the enabling
|
|
|
|
* disabling of interrupts.
|
|
|
|
*
|
|
|
|
* The scheduler accomplishes this by treating the irqcount
|
|
|
|
* like lockcount: Both will disable pre-emption.
|
|
|
|
*/
|
|
|
|
|
2016-11-17 13:37:24 +01:00
|
|
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
2016-11-17 01:28:35 +01:00
|
|
|
&g_cpu_irqlock);
|
|
|
|
rtcb->irqcount = 1;
|
2016-02-16 22:21:45 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
/* Note that we have entered the critical section */
|
2016-02-16 22:21:45 +01:00
|
|
|
|
2018-11-24 17:32:45 +01:00
|
|
|
#ifdef CONFIG_SCHED_CRITMONITOR
|
|
|
|
sched_critmon_csection(rtcb, true);
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
2016-11-17 01:28:35 +01:00
|
|
|
sched_note_csection(rtcb, true);
|
2016-02-16 22:21:45 +01:00
|
|
|
#endif
|
2016-11-17 01:28:35 +01:00
|
|
|
}
|
2016-05-18 16:21:28 +02:00
|
|
|
}
|
2016-11-15 22:44:43 +01:00
|
|
|
}
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-11-15 14:54:18 +01:00
|
|
|
/* Return interrupt status */
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-11-15 14:54:18 +01:00
|
|
|
return ret;
|
2016-02-13 17:25:36 +01:00
|
|
|
}
|
2018-11-25 18:50:15 +01:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
2016-03-22 00:08:07 +01:00
|
|
|
irqstate_t enter_critical_section(void)
|
|
|
|
{
|
2016-11-18 20:57:30 +01:00
|
|
|
irqstate_t ret;
|
|
|
|
|
|
|
|
/* Disable interrupts */
|
|
|
|
|
|
|
|
ret = up_irq_save();
|
|
|
|
|
2016-11-17 02:11:31 +01:00
|
|
|
/* Check if we were called from an interrupt handler and that the task
|
2016-05-18 16:21:28 +02:00
|
|
|
* lists have been initialized.
|
|
|
|
*/
|
2016-03-22 00:08:07 +01:00
|
|
|
|
2016-05-18 16:21:28 +02:00
|
|
|
if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS)
|
2016-03-22 00:08:07 +01:00
|
|
|
{
|
|
|
|
FAR struct tcb_s *rtcb = this_task();
|
|
|
|
DEBUGASSERT(rtcb != NULL);
|
|
|
|
|
2018-11-24 22:07:12 +01:00
|
|
|
/* Have we just entered the critical section? Or is this a nested
|
|
|
|
* call to enter_critical_section.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < UINT16_MAX);
|
|
|
|
if (++rtcb->irqcount == 1)
|
|
|
|
{
|
|
|
|
/* Note that we have entered the critical section */
|
2016-03-22 00:08:07 +01:00
|
|
|
|
2018-11-24 17:32:45 +01:00
|
|
|
#ifdef CONFIG_SCHED_CRITMONITOR
|
2018-11-24 22:07:12 +01:00
|
|
|
sched_critmon_csection(rtcb, true);
|
2018-11-24 17:32:45 +01:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
2018-11-24 22:07:12 +01:00
|
|
|
sched_note_csection(rtcb, true);
|
2018-11-24 17:32:45 +01:00
|
|
|
#endif
|
2018-11-24 22:07:12 +01:00
|
|
|
}
|
2016-03-22 00:08:07 +01:00
|
|
|
}
|
|
|
|
|
2016-11-18 20:57:30 +01:00
|
|
|
/* Return interrupt status */
|
2016-03-22 00:08:07 +01:00
|
|
|
|
2016-11-18 20:57:30 +01:00
|
|
|
return ret;
|
2016-03-22 00:08:07 +01:00
|
|
|
}
|
|
|
|
#endif
|
2016-02-13 17:25:36 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: leave_critical_section
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Decrement the IRQ lock count and if it decrements to zero then release
|
|
|
|
* the spinlock.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2016-03-22 00:08:07 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2016-02-13 17:25:36 +01:00
|
|
|
void leave_critical_section(irqstate_t flags)
|
|
|
|
{
|
2016-11-17 13:37:24 +01:00
|
|
|
int cpu;
|
|
|
|
|
2016-11-17 02:11:31 +01:00
|
|
|
/* Verify that the system has sufficiently initialized so that the task
|
|
|
|
* lists are valid.
|
2016-05-18 16:21:28 +02:00
|
|
|
*/
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
if (g_os_initstate >= OSINIT_TASKLISTS)
|
2016-02-13 17:25:36 +01:00
|
|
|
{
|
2016-11-17 01:28:35 +01:00
|
|
|
/* If called from an interrupt handler, then just release the
|
|
|
|
* spinlock. The interrupt handling logic should already hold the
|
|
|
|
* spinlock if enter_critical_section() has been called. Unlocking
|
|
|
|
* the spinlock will allow interrupt handlers on other CPUs to execute
|
|
|
|
* again.
|
2016-02-15 15:06:17 +01:00
|
|
|
*/
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
if (up_interrupt_context())
|
2016-02-15 15:06:17 +01:00
|
|
|
{
|
2016-11-17 05:45:08 +01:00
|
|
|
/* We are in an interrupt handler. Check if the last call to
|
|
|
|
* enter_critical_section() was nested.
|
|
|
|
*/
|
2016-11-17 01:28:35 +01:00
|
|
|
|
2016-11-17 13:37:24 +01:00
|
|
|
cpu = this_cpu();
|
2016-11-17 05:45:08 +01:00
|
|
|
if (g_cpu_nestcount[cpu] > 1)
|
2016-11-17 01:28:35 +01:00
|
|
|
{
|
2016-11-17 05:45:08 +01:00
|
|
|
/* Yes.. then just decrement the nesting count */
|
|
|
|
|
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock));
|
|
|
|
g_cpu_nestcount[cpu]--;
|
2016-11-17 01:28:35 +01:00
|
|
|
}
|
2016-11-17 05:45:08 +01:00
|
|
|
else
|
|
|
|
{
|
2017-01-13 13:48:10 +01:00
|
|
|
/* No, not nested. Restore the g_cpu_irqset for this CPU
|
|
|
|
* and release the spinlock (if necessary).
|
|
|
|
*/
|
2016-11-17 05:45:08 +01:00
|
|
|
|
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
|
|
|
|
g_cpu_nestcount[cpu] == 1);
|
|
|
|
|
2018-02-06 16:17:28 +01:00
|
|
|
FAR struct tcb_s *rtcb = current_task(cpu);
|
2017-01-13 13:48:10 +01:00
|
|
|
DEBUGASSERT(rtcb != NULL);
|
|
|
|
|
|
|
|
if (rtcb->irqcount <= 0)
|
2016-11-17 05:45:08 +01:00
|
|
|
{
|
2017-01-13 13:48:10 +01:00
|
|
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
|
|
|
&g_cpu_irqlock);
|
2016-11-17 05:45:08 +01:00
|
|
|
}
|
2016-11-17 01:28:35 +01:00
|
|
|
|
2016-11-17 05:45:08 +01:00
|
|
|
g_cpu_nestcount[cpu] = 0;
|
|
|
|
}
|
2016-02-15 15:06:17 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-02-06 16:17:28 +01:00
|
|
|
FAR struct tcb_s *rtcb;
|
|
|
|
|
|
|
|
/* Get the TCB of the currently executing task on this CPU (avoid
|
|
|
|
* using this_task() which can recurse.
|
|
|
|
*/
|
|
|
|
|
|
|
|
cpu = this_cpu();
|
|
|
|
rtcb = current_task(cpu);
|
2016-11-17 04:48:27 +01:00
|
|
|
DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0);
|
2016-02-16 22:21:45 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
/* Normal tasking context. We need to coordinate with other
|
|
|
|
* tasks.
|
|
|
|
*
|
|
|
|
* Will we still have interrupts disabled after decrementing the
|
|
|
|
* count?
|
2016-11-15 16:24:00 +01:00
|
|
|
*/
|
2016-02-15 17:15:07 +01:00
|
|
|
|
2016-11-17 01:28:35 +01:00
|
|
|
if (rtcb->irqcount > 1)
|
|
|
|
{
|
|
|
|
/* Yes... the spinlock should remain set */
|
2016-11-15 15:37:58 +01:00
|
|
|
|
2016-11-17 02:11:31 +01:00
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock));
|
2016-11-17 01:28:35 +01:00
|
|
|
rtcb->irqcount--;
|
|
|
|
}
|
|
|
|
else
|
2016-11-15 22:44:43 +01:00
|
|
|
{
|
2016-11-17 01:28:35 +01:00
|
|
|
/* No.. Note that we have left the critical section */
|
|
|
|
|
2018-11-24 17:32:45 +01:00
|
|
|
#ifdef CONFIG_SCHED_CRITMONITOR
|
|
|
|
sched_critmon_csection(rtcb, false);
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
2016-11-17 01:28:35 +01:00
|
|
|
sched_note_csection(rtcb, false);
|
|
|
|
#endif
|
|
|
|
/* Decrement our count on the lock. If all CPUs have
|
|
|
|
* released, then unlock the spinlock.
|
2016-02-17 17:55:18 +01:00
|
|
|
*/
|
|
|
|
|
2018-02-06 04:13:42 +01:00
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
|
2016-11-17 13:37:24 +01:00
|
|
|
(g_cpu_irqset & (1 << cpu)) != 0);
|
|
|
|
|
2016-12-28 20:58:24 +01:00
|
|
|
/* Check if releasing the lock held by this CPU will unlock the
|
|
|
|
* critical section.
|
|
|
|
*/
|
2016-12-26 14:48:22 +01:00
|
|
|
|
2016-12-28 20:58:24 +01:00
|
|
|
if ((g_cpu_irqset & ~(1 << cpu)) == 0)
|
2016-12-26 14:48:22 +01:00
|
|
|
{
|
2016-12-28 20:58:24 +01:00
|
|
|
/* Yes.. Check if there are pending tasks and that pre-emption
|
|
|
|
* is also enabled. This is necessary because we may have
|
2016-12-26 15:15:02 +01:00
|
|
|
* deferred the up_release_pending() call in sched_unlock()
|
|
|
|
* because we were within a critical section then.
|
2016-12-26 14:48:22 +01:00
|
|
|
*/
|
|
|
|
|
2018-02-05 20:12:36 +01:00
|
|
|
if (g_pendingtasks.head != NULL && !sched_islocked_global())
|
2016-12-26 14:48:22 +01:00
|
|
|
{
|
|
|
|
/* Release any ready-to-run tasks that have collected
|
2016-12-28 20:58:24 +01:00
|
|
|
* in g_pendingtasks. NOTE: This operation has a very
|
|
|
|
* high likelihood of causing this task to be switched
|
|
|
|
* out!
|
2016-12-26 14:48:22 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
up_release_pending();
|
|
|
|
}
|
|
|
|
}
|
2016-12-28 20:58:24 +01:00
|
|
|
|
|
|
|
/* Now, possibly on return from a context switch, clear our
|
|
|
|
* count on the lock. If all CPUs have released the lock,
|
|
|
|
* then unlock the global IRQ spinlock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
rtcb->irqcount = 0;
|
|
|
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
|
|
|
&g_cpu_irqlock);
|
|
|
|
|
|
|
|
/* Have all CPUs released the lock? */
|
2016-02-15 15:50:20 +01:00
|
|
|
}
|
2016-02-15 15:06:17 +01:00
|
|
|
}
|
2016-05-18 16:21:28 +02:00
|
|
|
}
|
2016-02-15 15:06:17 +01:00
|
|
|
|
2016-05-18 16:21:28 +02:00
|
|
|
/* Restore the previous interrupt state which may still be interrupts
|
|
|
|
* disabled (but we don't have a mechanism to verify that now)
|
|
|
|
*/
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-05-18 16:21:28 +02:00
|
|
|
up_irq_restore(flags);
|
2016-02-13 17:25:36 +01:00
|
|
|
}
|
2018-11-24 17:32:45 +01:00
|
|
|
|
2018-11-25 18:50:15 +01:00
|
|
|
#else
|
2018-11-24 17:32:45 +01:00
|
|
|
|
2016-03-22 00:08:07 +01:00
|
|
|
void leave_critical_section(irqstate_t flags)
|
|
|
|
{
|
2016-05-18 16:21:28 +02:00
|
|
|
/* Check if we were called from an interrupt handler and that the tasks
|
|
|
|
* lists have been initialized.
|
|
|
|
*/
|
2016-03-22 00:08:07 +01:00
|
|
|
|
2016-05-18 16:21:28 +02:00
|
|
|
if (!up_interrupt_context() && g_os_initstate >= OSINIT_TASKLISTS)
|
2016-03-22 00:08:07 +01:00
|
|
|
{
|
|
|
|
FAR struct tcb_s *rtcb = this_task();
|
|
|
|
DEBUGASSERT(rtcb != NULL);
|
|
|
|
|
2018-11-24 22:07:12 +01:00
|
|
|
/* Have we left entered the critical section? Or are we still nested. */
|
|
|
|
|
|
|
|
DEBUGASSERT(rtcb->irqcount > 0);
|
|
|
|
if (--rtcb->irqcount <= 0)
|
|
|
|
{
|
|
|
|
/* Note that we have left the critical section */
|
2016-03-22 00:08:07 +01:00
|
|
|
|
2018-11-24 17:32:45 +01:00
|
|
|
#ifdef CONFIG_SCHED_CRITMONITOR
|
2018-11-24 22:07:12 +01:00
|
|
|
sched_critmon_csection(rtcb, false);
|
2018-11-24 17:32:45 +01:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
|
2018-11-24 22:07:12 +01:00
|
|
|
sched_note_csection(rtcb, false);
|
2018-11-24 17:32:45 +01:00
|
|
|
#endif
|
2018-11-24 22:07:12 +01:00
|
|
|
}
|
2016-03-22 00:08:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Restore the previous interrupt state. */
|
|
|
|
|
|
|
|
up_irq_restore(flags);
|
|
|
|
}
|
|
|
|
#endif
|
2016-02-13 17:25:36 +01:00
|
|
|
|
2016-12-29 15:17:10 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: irq_cpu_locked
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
|
|
|
* There is an interaction with pre-emption controls and IRQ locking:
|
|
|
|
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
|
|
|
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
|
|
|
* holder of the IRQ lock.
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2016-12-29 15:17:10 +01:00
|
|
|
* rtcb - Points to the blocked TCB that is ready-to-run
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Returned Value:
|
2016-12-29 15:17:10 +01:00
|
|
|
* true - IRQs are locked by a different CPU.
|
|
|
|
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
|
|
|
* is the holder of the lock.
|
|
|
|
*
|
|
|
|
* Warning: This values are volatile at only valid at the instance that
|
|
|
|
* the CPU set was queried.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
bool irq_cpu_locked(int cpu)
|
|
|
|
{
|
|
|
|
cpu_set_t irqset;
|
|
|
|
|
|
|
|
/* g_cpu_irqset is not valid in early phases of initialization */
|
|
|
|
|
|
|
|
if (g_os_initstate < OSINIT_OSREADY)
|
|
|
|
{
|
|
|
|
/* We are still single threaded. In either state of g_cpu_irqlock,
|
|
|
|
* the correct return value should always be false.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-02-05 20:32:09 +01:00
|
|
|
#if defined(CONFIG_ARCH_HAVE_FETCHADD) && !defined(CONFIG_ARCH_GLOBAL_IRQDISABLE)
|
2018-02-05 20:12:36 +01:00
|
|
|
/* If the global lockcount has been incremented then simply return true */
|
|
|
|
|
|
|
|
if (g_global_lockcount > 0)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-12-29 15:17:10 +01:00
|
|
|
/* Test if g_cpu_irqlock is locked. We don't really need to use check
|
|
|
|
* g_cpu_irqlock to do this, we can use the g_cpu_set.
|
|
|
|
*
|
|
|
|
* Sample the g_cpu_irqset once. That is an atomic operation. All
|
|
|
|
* subsequent operations will operate on the sampled cpu set.
|
|
|
|
*/
|
|
|
|
|
|
|
|
irqset = (cpu_set_t)g_cpu_irqset;
|
|
|
|
if (irqset != 0)
|
|
|
|
{
|
|
|
|
/* Some CPU holds the lock. So g_cpu_irqlock should be locked.
|
|
|
|
* Return false if the 'cpu' is the holder of the lock; return
|
|
|
|
* true if g_cpu_irqlock is locked, but this CPU is not the
|
|
|
|
* holder of the lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return ((irqset & (1 << cpu)) == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No CPU holds the lock */
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* In this case g_cpu_irqlock should be unlocked. However, if
|
|
|
|
* the lock was established in the interrupt handler AND there are
|
2018-01-27 16:37:46 +01:00
|
|
|
* no bits set in g_cpu_irqset, that probably means only that
|
2016-12-29 15:17:10 +01:00
|
|
|
* critical section was established from an interrupt handler.
|
|
|
|
* Return false in either case.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-11-25 18:50:15 +01:00
|
|
|
#endif /* CONFIG_IRQCOUNT */
|