Implement deferred IRQ locking. So far only form ARMv7-M.

This commit is contained in:
Gregory Nutt 2016-12-23 07:55:41 -06:00
parent 5f9caad078
commit e6fff09ef8
7 changed files with 154 additions and 52 deletions

View File

@ -50,21 +50,7 @@
#include "up_arch.h"
#include "up_internal.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
#include "irq/irq.h"
/****************************************************************************
* Public Functions
@ -107,15 +93,35 @@ uint32_t *up_doirq(int irq, uint32_t *regs)
* switch occurred during interrupt processing.
*/
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a "voting"
* algorithm with current task on each CPU casting its "vote" by the
* state of the TCB irqcount flag. That irqcount for the current task
* on this CPU will be different is a context switch occurrred.
*/
if (regs != (uint32_t *)CURRENT_REGS)
{
/* A context switch has occurred, time for the current task on this
* CPU to cast its vote.
*/
irq_restore_lock();
}
#endif
/* Return the current state of CURRENT_REGS */
regs = (uint32_t *)CURRENT_REGS;
/* Restore the previous value of CURRENT_REGS. NULL would indicate that
* we are no longer in an interrupt handler. It will be non-NULL if we
* are returning from a nested interrupt.
* are returning from a nested interrupt (which are NOT fully supported).
*/
CURRENT_REGS = savestate;
#endif
board_autoled_off(LED_INIRQ);
return regs;
}

View File

@ -36,7 +36,7 @@
CSRCS += irq_initialize.c irq_attach.c irq_dispatch.c irq_unexpectedisr.c
ifeq ($(CONFIG_SMP),y)
CSRCS += irq_csection.c
CSRCS += irq_csection.c irq_restorelock.c
else ifeq ($(CONFIG_SCHED_INSTRUMENTATION_CSECTION),y)
CSRCS += irq_csection.c
endif

View File

@ -106,6 +106,32 @@ void weak_function irq_initialize(void);
int irq_unexpected_isr(int irq, FAR void *context);
/****************************************************************************
* Name: irq_restore_lock
*
* Description:
* Restore the state of g_cpu_irqlock. This function is called after a
* context switch. A consequence of the context switch is that the the
* global g_cpu_irqlock spinlock may need to change states. However, the
* actual realization of that change cannot occur until all context
* switching operations have completed. This function implements the
* deferred setting of g_cpu_irqlock.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
* g_cpu_irqlock is set upon entry. It may or may not be set upon return.
*
****************************************************************************/
#ifdef CONFIG_SMP
void irq_restore_lock(void);
#endif
#undef EXTERN
#ifdef __cplusplus
}

View File

@ -570,4 +570,4 @@ void leave_critical_section(irqstate_t flags)
}
#endif
#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION*/
#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION */

View File

@ -0,0 +1,95 @@
/****************************************************************************
* sched/irq/irq_restore.c
*
* Copyright (C) 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "sched/sched.h"
#include "irq/irq.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: irq_restore_lock
*
* Description:
* Restore the state of g_cpu_irqlock. This function is called after a
* context switch. A consequence of the context switch is that the the
* global g_cpu_irqlock spinlock may need to change states. However, the
* actual realization of that change cannot occur until all context
* switching operations have completed. This function implements the
* deferred setting of g_cpu_irqlock.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
* g_cpu_irqlock is set upon entry. It may or may not be set upon return.
*
****************************************************************************/
void irq_restore_lock(void)
{
FAR struct tcb_s *rtcb;
int cpu;
cpu = this_cpu();
rtcb = current_task(cpu);
/* Adjust global IRQ controls. If the irqcount of the newly running task is
* greater than zero, then this task/CPU holds the IRQ lock
*/
if (rtcb->irqcount > 0)
{
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
else
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
}
#endif /* CONFIG_SMP */

View File

@ -295,6 +295,11 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
/* Adjust global pre-emption controls. If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler
* lock.
*
* NOTE that the global IRQ controls cannot yet be changed. We
* must maintain the critical section until the full context
* switch is complete. irq_restore_lock() will perform that
* operation.
*/
if (btcb->lockcount > 0)
@ -308,21 +313,6 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
&g_cpu_schedlock);
}
/* Adjust global IRQ controls. If irqcount is greater than zero,
* then this task/this CPU holds the IRQ lock
*/
if (btcb->irqcount > 0)
{
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
else
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
/* If the following task is not locked to this CPU, then it must
* be moved to the g_readytorun list. Since it cannot be at the
* head of the list, we can do this without invoking any heavy

View File

@ -236,6 +236,10 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
/* Will pre-emption be disabled after the switch? If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler lock.
*
* NOTE that the global IRQ controls cannot yet be changed. We must
* maintain the critical section until the full context switch is
* complete. irq_restore_lock() will perform that operation.
*/
if (nxttcb->lockcount > 0)
@ -253,25 +257,6 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
&g_cpu_schedlock);
}
/* Interrupts may be disabled after the switch. If irqcount is greater
* than zero, then this task/this CPU holds the IRQ lock
*/
if (nxttcb->irqcount > 0)
{
/* Yes... make sure that scheduling logic knows about this */
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
else
{
/* No.. we may need to release our hold on the irq state. */
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
nxttcb->task_state = TSTATE_TASK_RUNNING;
/* All done, restart the other CPU (if it was paused). */