From e6fff09ef8c64ddf1a0418df3f55f350be7d73f0 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Fri, 23 Dec 2016 07:55:41 -0600 Subject: [PATCH] Implement deferred IRQ locking. So far only form ARMv7-M. --- arch/arm/src/armv7-m/up_doirq.c | 38 ++++++----- sched/irq/Make.defs | 2 +- sched/irq/irq.h | 26 ++++++++ sched/irq/irq_csection.c | 2 +- sched/irq/irq_restorelock.c | 95 ++++++++++++++++++++++++++++ sched/sched/sched_addreadytorun.c | 20 ++---- sched/sched/sched_removereadytorun.c | 23 ++----- 7 files changed, 154 insertions(+), 52 deletions(-) create mode 100644 sched/irq/irq_restorelock.c diff --git a/arch/arm/src/armv7-m/up_doirq.c b/arch/arm/src/armv7-m/up_doirq.c index b51c10e558..99d37f1564 100644 --- a/arch/arm/src/armv7-m/up_doirq.c +++ b/arch/arm/src/armv7-m/up_doirq.c @@ -50,21 +50,7 @@ #include "up_arch.h" #include "up_internal.h" -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -/**************************************************************************** - * Public Data - ****************************************************************************/ - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/**************************************************************************** - * Private Functions - ****************************************************************************/ +#include "irq/irq.h" /**************************************************************************** * Public Functions @@ -107,15 +93,35 @@ uint32_t *up_doirq(int irq, uint32_t *regs) * switch occurred during interrupt processing. */ +#ifdef CONFIG_SMP + /* In the SMP configuration, critical section management uses a "voting" + * algorithm with current task on each CPU casting its "vote" by the + * state of the TCB irqcount flag. That irqcount for the current task + * on this CPU will be different is a context switch occurrred. + */ + + if (regs != (uint32_t *)CURRENT_REGS) + { + /* A context switch has occurred, time for the current task on this + * CPU to cast its vote. + */ + + irq_restore_lock(); + } +#endif + + /* Return the current state of CURRENT_REGS */ + regs = (uint32_t *)CURRENT_REGS; /* Restore the previous value of CURRENT_REGS. NULL would indicate that * we are no longer in an interrupt handler. It will be non-NULL if we - * are returning from a nested interrupt. + * are returning from a nested interrupt (which are NOT fully supported). */ CURRENT_REGS = savestate; #endif + board_autoled_off(LED_INIRQ); return regs; } diff --git a/sched/irq/Make.defs b/sched/irq/Make.defs index 9b5b264d66..b6178a6d3a 100644 --- a/sched/irq/Make.defs +++ b/sched/irq/Make.defs @@ -36,7 +36,7 @@ CSRCS += irq_initialize.c irq_attach.c irq_dispatch.c irq_unexpectedisr.c ifeq ($(CONFIG_SMP),y) -CSRCS += irq_csection.c +CSRCS += irq_csection.c irq_restorelock.c else ifeq ($(CONFIG_SCHED_INSTRUMENTATION_CSECTION),y) CSRCS += irq_csection.c endif diff --git a/sched/irq/irq.h b/sched/irq/irq.h index 69b3d344b3..77b8740984 100644 --- a/sched/irq/irq.h +++ b/sched/irq/irq.h @@ -106,6 +106,32 @@ void weak_function irq_initialize(void); int irq_unexpected_isr(int irq, FAR void *context); +/**************************************************************************** + * Name: irq_restore_lock + * + * Description: + * Restore the state of g_cpu_irqlock. This function is called after a + * context switch. A consequence of the context switch is that the the + * global g_cpu_irqlock spinlock may need to change states. However, the + * actual realization of that change cannot occur until all context + * switching operations have completed. This function implements the + * deferred setting of g_cpu_irqlock. + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + * Assumptions: + * g_cpu_irqlock is set upon entry. It may or may not be set upon return. + * + ****************************************************************************/ + +#ifdef CONFIG_SMP +void irq_restore_lock(void); +#endif + #undef EXTERN #ifdef __cplusplus } diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 935cbfd3af..c9f9f91c18 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -570,4 +570,4 @@ void leave_critical_section(irqstate_t flags) } #endif -#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION*/ +#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION */ diff --git a/sched/irq/irq_restorelock.c b/sched/irq/irq_restorelock.c new file mode 100644 index 0000000000..a798ff483d --- /dev/null +++ b/sched/irq/irq_restorelock.c @@ -0,0 +1,95 @@ +/**************************************************************************** + * sched/irq/irq_restore.c + * + * Copyright (C) 2016 Gregory Nutt. All rights reserved. + * Author: Gregory Nutt + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * 3. Neither the name NuttX nor the names of its contributors may be + * used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include "sched/sched.h" +#include "irq/irq.h" + +#ifdef CONFIG_SMP + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: irq_restore_lock + * + * Description: + * Restore the state of g_cpu_irqlock. This function is called after a + * context switch. A consequence of the context switch is that the the + * global g_cpu_irqlock spinlock may need to change states. However, the + * actual realization of that change cannot occur until all context + * switching operations have completed. This function implements the + * deferred setting of g_cpu_irqlock. + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + * Assumptions: + * g_cpu_irqlock is set upon entry. It may or may not be set upon return. + * + ****************************************************************************/ + +void irq_restore_lock(void) +{ + FAR struct tcb_s *rtcb; + int cpu; + + cpu = this_cpu(); + rtcb = current_task(cpu); + + /* Adjust global IRQ controls. If the irqcount of the newly running task is + * greater than zero, then this task/CPU holds the IRQ lock + */ + + if (rtcb->irqcount > 0) + { + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); + } + else + { + spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); + } +} + +#endif /* CONFIG_SMP */ diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 77b1492c53..b19cd41841 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -295,6 +295,11 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) /* Adjust global pre-emption controls. If the lockcount is * greater than zero, then this task/this CPU holds the scheduler * lock. + * + * NOTE that the global IRQ controls cannot yet be changed. We + * must maintain the critical section until the full context + * switch is complete. irq_restore_lock() will perform that + * operation. */ if (btcb->lockcount > 0) @@ -308,21 +313,6 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) &g_cpu_schedlock); } - /* Adjust global IRQ controls. If irqcount is greater than zero, - * then this task/this CPU holds the IRQ lock - */ - - if (btcb->irqcount > 0) - { - spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } - else - { - spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } - /* If the following task is not locked to this CPU, then it must * be moved to the g_readytorun list. Since it cannot be at the * head of the list, we can do this without invoking any heavy diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index 5b6b663d97..99a039aded 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -236,6 +236,10 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb) /* Will pre-emption be disabled after the switch? If the lockcount is * greater than zero, then this task/this CPU holds the scheduler lock. + * + * NOTE that the global IRQ controls cannot yet be changed. We must + * maintain the critical section until the full context switch is + * complete. irq_restore_lock() will perform that operation. */ if (nxttcb->lockcount > 0) @@ -253,25 +257,6 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb) &g_cpu_schedlock); } - /* Interrupts may be disabled after the switch. If irqcount is greater - * than zero, then this task/this CPU holds the IRQ lock - */ - - if (nxttcb->irqcount > 0) - { - /* Yes... make sure that scheduling logic knows about this */ - - spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } - else - { - /* No.. we may need to release our hold on the irq state. */ - - spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } - nxttcb->task_state = TSTATE_TASK_RUNNING; /* All done, restart the other CPU (if it was paused). */