From e3fe320e08f082779748522b98135669bc346ef2 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Sat, 26 Nov 2016 08:47:03 -0600 Subject: [PATCH] SMP: Add support for linking spinlocks into a special, non-cached memory region. --- arch/arm/src/armv7-a/arm_cpupause.c | 4 +- arch/sim/src/up_idle.c | 2 +- arch/sim/src/up_internal.h | 4 +- arch/xtensa/src/common/xtensa_cpupause.c | 81 +++++++++++++++---- .../src/esp32/esp32_intercpu_interrupt.c | 4 +- include/nuttx/spinlock.h | 23 ++++-- sched/irq/irq.h | 6 +- sched/irq/irq_csection.c | 6 +- sched/sched/sched.h | 6 +- sched/sched/sched_lock.c | 6 +- sched/semaphore/spinlock.c | 7 +- 11 files changed, 109 insertions(+), 40 deletions(-) diff --git a/arch/arm/src/armv7-a/arm_cpupause.c b/arch/arm/src/armv7-a/arm_cpupause.c index 1b5726aba9..8b8df44a73 100644 --- a/arch/arm/src/armv7-a/arm_cpupause.c +++ b/arch/arm/src/armv7-a/arm_cpupause.c @@ -69,8 +69,8 @@ * so that it will be ready for the next pause operation. */ -static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; +static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION; +static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION; /**************************************************************************** * Public Functions diff --git a/arch/sim/src/up_idle.c b/arch/sim/src/up_idle.c index 2e218b319d..1912bf755c 100644 --- a/arch/sim/src/up_idle.c +++ b/arch/sim/src/up_idle.c @@ -105,7 +105,7 @@ void up_idle(void) * should not matter which, however. */ - static volatile spinlock_t lock = SP_UNLOCKED; + static volatile spinlock_t lock SP_SECTION = SP_UNLOCKED; /* The one that gets the lock is the one that executes the IDLE operations */ diff --git a/arch/sim/src/up_internal.h b/arch/sim/src/up_internal.h index 9facb343af..53f4e2cd42 100644 --- a/arch/sim/src/up_internal.h +++ b/arch/sim/src/up_internal.h @@ -214,8 +214,8 @@ extern volatile int g_uart_data_available; * so that it will be ready for the next pause operation. */ -volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; +volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION; +volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION; #endif /**************************************************************************** diff --git a/arch/xtensa/src/common/xtensa_cpupause.c b/arch/xtensa/src/common/xtensa_cpupause.c index 46bac69bbd..e310ad44da 100644 --- a/arch/xtensa/src/common/xtensa_cpupause.c +++ b/arch/xtensa/src/common/xtensa_cpupause.c @@ -54,39 +54,64 @@ * Private Data ****************************************************************************/ -static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS]; -static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS]; +static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION; +static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION; /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** - * Name: xtensa_pause_handler + * Name: up_cpu_pausereq * * Description: - * This is the handler for CPU_INTCODE_PAUSE CPU interrupt. This - * implements up_cpu_pause() by performing the following operations: - * - * 1. The current task state at the head of the current assigned task - * list was saved when the interrupt was entered. - * 2. This function simply waits on a spinlock, then returns. - * 3. Upon return, the interrupt exit logic will restore the state of - * the new task at the head of the ready to run list. + * Return true if a pause request is pending for this CPU. * * Input Parameters: - * None + * cpu - The index of the CPU to be queried * * Returned Value: - * None + * true = a pause request is pending. + * false = no pasue request is pending. * ****************************************************************************/ -void xtensa_pause_handler(void) +bool up_cpu_pausereq(int cpu) +{ + return spin_islocked(&g_cpu_paused[cpu]); +} + +/**************************************************************************** + * Name: up_cpu_paused + * + * Description: + * Handle a pause request from another CPU. Normally, this logic is + * executed from interrupt handling logic within the architecture-specific + * However, it is sometimes necessary necessary to perform the pending + * pause operation in other contexts where the interrupt cannot be taken + * in order to avoid deadlocks. + * + * This function performs the following operations: + * + * 1. It saves the current task state at the head of the current assigned + * task list. + * 2. It waits on a spinlock, then + * 3. Returns from interrupt, restoring the state of the new task at the + * head of the ready to run list. + * + * Input Parameters: + * cpu - The index of the CPU to be paused + * + * Returned Value: + * On success, OK is returned. Otherwise, a negated errno value indicating + * the nature of the failure is returned. + * + ****************************************************************************/ + +int up_cpu_paused(int cpu) { FAR struct tcb_s *otcb = this_task(); FAR struct tcb_s *ntcb; - int cpu = up_cpu_index(); /* Update scheduler parameters */ @@ -128,6 +153,32 @@ void xtensa_pause_handler(void) spin_unlock(&g_cpu_wait[cpu]); } +/**************************************************************************** + * Name: xtensa_pause_handler + * + * Description: + * This is the handler for CPU_INTCODE_PAUSE CPU interrupt. This + * implements up_cpu_pause() by performing the following operations: + * + * 1. The current task state at the head of the current assigned task + * list was saved when the interrupt was entered. + * 2. This function simply waits on a spinlock, then returns. + * 3. Upon return, the interrupt exit logic will restore the state of + * the new task at the head of the ready to run list. + * + * Input Parameters: + * None + * + * Returned Value: + * None + * + ****************************************************************************/ + +void xtensa_pause_handler(void) +{ + (void)up_cpu_paused(up_cpu_index()); +} + /**************************************************************************** * Name: up_cpu_pause * diff --git a/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c b/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c index 66ae7bac06..ccbe9de4e5 100644 --- a/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c +++ b/arch/xtensa/src/esp32/esp32_intercpu_interrupt.c @@ -59,11 +59,11 @@ /* Single parameter passed with the inter-CPU interrupt */ -static volatile uint8_t g_intcode[CONFIG_SMP_NCPUS]; +static volatile uint8_t g_intcode[CONFIG_SMP_NCPUS] SP_SECTION; /* Spinlock protects parameter array */ -static volatile spinlock_t g_intercpu_spin[CONFIG_SMP_NCPUS] = +static volatile spinlock_t g_intercpu_spin[CONFIG_SMP_NCPUS] SP_SECTION = { SP_UNLOCKED, SP_UNLOCKED }; diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h index ee6bf65b64..afcc4a1853 100644 --- a/include/nuttx/spinlock.h +++ b/include/nuttx/spinlock.h @@ -69,16 +69,29 @@ * DSB - Data syncrhonization barrier. */ -#define HAVE_DMB 1 -#ifndef SP_DMB +#undef __SP_UNLOCK_FUNCTION +#if !defined(SP_DMB) # define SP_DMB() -# undef HAVE_DMB +#else +# define __SP_UNLOCK_FUNCTION 1 #endif -#ifndef SP_DSB +#if !defined(SP_DSB) # define SP_DSB() #endif +/* If the target CPU supports a data cache then it may be necessary to + * manage spinlocks in a special way, perhaps linking them all into a + * special non-cacheable memory region. + * + * SP_SECTION - Special storage attributes may be required to force + * spinlocks into a special, non-cacheable section. + */ + +#if !defined(SP_SECTION) +# define SP_SECTION +#endif + /**************************************************************************** * Public Types ****************************************************************************/ @@ -244,7 +257,7 @@ void spin_lockr(FAR struct spinlock_s *lock); * ****************************************************************************/ -#ifdef HAVE_DMB +#ifdef __SP_UNLOCK_FUNCTION void spin_unlock(FAR volatile spinlock_t *lock); #else # define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0) diff --git a/sched/irq/irq.h b/sched/irq/irq.h index 60d38443fb..69b3d344b3 100644 --- a/sched/irq/irq.h +++ b/sched/irq/irq.h @@ -65,12 +65,12 @@ extern FAR xcpt_t g_irqvector[NR_IRQS]; * disabled. */ -extern volatile spinlock_t g_cpu_irqlock; +extern volatile spinlock_t g_cpu_irqlock SP_SECTION; /* Used to keep track of which CPU(s) hold the IRQ lock. */ -extern volatile spinlock_t g_cpu_irqsetlock; -extern volatile cpu_set_t g_cpu_irqset; +extern volatile spinlock_t g_cpu_irqsetlock SP_SECTION; +extern volatile cpu_set_t g_cpu_irqset SP_SECTION; #endif /**************************************************************************** diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 8c91852c1a..20660dd728 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -60,12 +60,12 @@ * disabled. */ -volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED; +volatile spinlock_t g_cpu_irqlock SP_SECTION = SP_UNLOCKED; /* Used to keep track of which CPU(s) hold the IRQ lock. */ -volatile spinlock_t g_cpu_irqsetlock; -volatile cpu_set_t g_cpu_irqset; +volatile spinlock_t g_cpu_irqsetlock SP_SECTION; +volatile cpu_set_t g_cpu_irqset SP_SECTION; #endif /**************************************************************************** diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 9e4bd64ba3..c8534e1399 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -353,12 +353,12 @@ extern volatile uint32_t g_cpuload_total; * least one CPU has pre-emption disabled. */ -extern volatile spinlock_t g_cpu_schedlock; +extern volatile spinlock_t g_cpu_schedlock SP_SECTION; /* Used to keep track of which CPU(s) hold the IRQ lock. */ -extern volatile spinlock_t g_cpu_locksetlock; -extern volatile cpu_set_t g_cpu_lockset; +extern volatile spinlock_t g_cpu_locksetlock SP_SECTION; +extern volatile cpu_set_t g_cpu_lockset SP_SECTION; #endif /* CONFIG_SMP */ diff --git a/sched/sched/sched_lock.c b/sched/sched/sched_lock.c index 36b53775d2..5f389eed57 100644 --- a/sched/sched/sched_lock.c +++ b/sched/sched/sched_lock.c @@ -109,12 +109,12 @@ * least one CPU has pre-emption disabled. */ -volatile spinlock_t g_cpu_schedlock = SP_UNLOCKED; +volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED; /* Used to keep track of which CPU(s) hold the IRQ lock. */ -volatile spinlock_t g_cpu_locksetlock; -volatile cpu_set_t g_cpu_lockset; +volatile spinlock_t g_cpu_locksetlock SP_SECTION; +volatile cpu_set_t g_cpu_lockset SP_SECTION; #endif /* CONFIG_SMP */ diff --git a/sched/semaphore/spinlock.c b/sched/semaphore/spinlock.c index e03f14c72f..34a87126d2 100644 --- a/sched/semaphore/spinlock.c +++ b/sched/semaphore/spinlock.c @@ -144,7 +144,7 @@ void spin_lock(FAR volatile spinlock_t *lock) * ****************************************************************************/ -#ifdef HAVE_DMB +#ifdef __SP_UNLOCK_FUNCTION void spin_unlock(FAR volatile spinlock_t *lock) { *lock = SP_UNLOCKED; @@ -218,8 +218,11 @@ void spin_lockr(FAR struct spinlock_s *lock) up_irq_restore(flags); sched_yield(); flags = up_irq_save(); + SP_DSB(); } + SP_DMB(); + /* Take one count on the lock */ lock->sp_cpu = cpu; @@ -238,8 +241,10 @@ void spin_lockr(FAR struct spinlock_s *lock) while (up_testset(&lock->sp_lock) == SP_LOCKED) { sched_yield(); + SP_DSB() } + SP_DMB(); #endif /* CONFIG_SMP */ }