SMP: Add support for linking spinlocks into a special, non-cached memory region.

This commit is contained in:
Gregory Nutt 2016-11-26 08:47:03 -06:00
parent 1d06e786e1
commit e3fe320e08
11 changed files with 109 additions and 40 deletions

View File

@ -69,8 +69,8 @@
* so that it will be ready for the next pause operation.
*/
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
/****************************************************************************
* Public Functions

View File

@ -105,7 +105,7 @@ void up_idle(void)
* should not matter which, however.
*/
static volatile spinlock_t lock = SP_UNLOCKED;
static volatile spinlock_t lock SP_SECTION = SP_UNLOCKED;
/* The one that gets the lock is the one that executes the IDLE operations */

View File

@ -214,8 +214,8 @@ extern volatile int g_uart_data_available;
* so that it will be ready for the next pause operation.
*/
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
#endif
/****************************************************************************

View File

@ -54,39 +54,64 @@
* Private Data
****************************************************************************/
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: xtensa_pause_handler
* Name: up_cpu_pausereq
*
* Description:
* This is the handler for CPU_INTCODE_PAUSE CPU interrupt. This
* implements up_cpu_pause() by performing the following operations:
*
* 1. The current task state at the head of the current assigned task
* list was saved when the interrupt was entered.
* 2. This function simply waits on a spinlock, then returns.
* 3. Upon return, the interrupt exit logic will restore the state of
* the new task at the head of the ready to run list.
* Return true if a pause request is pending for this CPU.
*
* Input Parameters:
* None
* cpu - The index of the CPU to be queried
*
* Returned Value:
* None
* true = a pause request is pending.
* false = no pasue request is pending.
*
****************************************************************************/
void xtensa_pause_handler(void)
bool up_cpu_pausereq(int cpu)
{
return spin_islocked(&g_cpu_paused[cpu]);
}
/****************************************************************************
* Name: up_cpu_paused
*
* Description:
* Handle a pause request from another CPU. Normally, this logic is
* executed from interrupt handling logic within the architecture-specific
* However, it is sometimes necessary necessary to perform the pending
* pause operation in other contexts where the interrupt cannot be taken
* in order to avoid deadlocks.
*
* This function performs the following operations:
*
* 1. It saves the current task state at the head of the current assigned
* task list.
* 2. It waits on a spinlock, then
* 3. Returns from interrupt, restoring the state of the new task at the
* head of the ready to run list.
*
* Input Parameters:
* cpu - The index of the CPU to be paused
*
* Returned Value:
* On success, OK is returned. Otherwise, a negated errno value indicating
* the nature of the failure is returned.
*
****************************************************************************/
int up_cpu_paused(int cpu)
{
FAR struct tcb_s *otcb = this_task();
FAR struct tcb_s *ntcb;
int cpu = up_cpu_index();
/* Update scheduler parameters */
@ -128,6 +153,32 @@ void xtensa_pause_handler(void)
spin_unlock(&g_cpu_wait[cpu]);
}
/****************************************************************************
* Name: xtensa_pause_handler
*
* Description:
* This is the handler for CPU_INTCODE_PAUSE CPU interrupt. This
* implements up_cpu_pause() by performing the following operations:
*
* 1. The current task state at the head of the current assigned task
* list was saved when the interrupt was entered.
* 2. This function simply waits on a spinlock, then returns.
* 3. Upon return, the interrupt exit logic will restore the state of
* the new task at the head of the ready to run list.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
void xtensa_pause_handler(void)
{
(void)up_cpu_paused(up_cpu_index());
}
/****************************************************************************
* Name: up_cpu_pause
*

View File

@ -59,11 +59,11 @@
/* Single parameter passed with the inter-CPU interrupt */
static volatile uint8_t g_intcode[CONFIG_SMP_NCPUS];
static volatile uint8_t g_intcode[CONFIG_SMP_NCPUS] SP_SECTION;
/* Spinlock protects parameter array */
static volatile spinlock_t g_intercpu_spin[CONFIG_SMP_NCPUS] =
static volatile spinlock_t g_intercpu_spin[CONFIG_SMP_NCPUS] SP_SECTION =
{
SP_UNLOCKED, SP_UNLOCKED
};

View File

@ -69,16 +69,29 @@
* DSB - Data syncrhonization barrier.
*/
#define HAVE_DMB 1
#ifndef SP_DMB
#undef __SP_UNLOCK_FUNCTION
#if !defined(SP_DMB)
# define SP_DMB()
# undef HAVE_DMB
#else
# define __SP_UNLOCK_FUNCTION 1
#endif
#ifndef SP_DSB
#if !defined(SP_DSB)
# define SP_DSB()
#endif
/* If the target CPU supports a data cache then it may be necessary to
* manage spinlocks in a special way, perhaps linking them all into a
* special non-cacheable memory region.
*
* SP_SECTION - Special storage attributes may be required to force
* spinlocks into a special, non-cacheable section.
*/
#if !defined(SP_SECTION)
# define SP_SECTION
#endif
/****************************************************************************
* Public Types
****************************************************************************/
@ -244,7 +257,7 @@ void spin_lockr(FAR struct spinlock_s *lock);
*
****************************************************************************/
#ifdef HAVE_DMB
#ifdef __SP_UNLOCK_FUNCTION
void spin_unlock(FAR volatile spinlock_t *lock);
#else
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0)

View File

@ -65,12 +65,12 @@ extern FAR xcpt_t g_irqvector[NR_IRQS];
* disabled.
*/
extern volatile spinlock_t g_cpu_irqlock;
extern volatile spinlock_t g_cpu_irqlock SP_SECTION;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile spinlock_t g_cpu_irqsetlock;
extern volatile cpu_set_t g_cpu_irqset;
extern volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
extern volatile cpu_set_t g_cpu_irqset SP_SECTION;
#endif
/****************************************************************************

View File

@ -60,12 +60,12 @@
* disabled.
*/
volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED;
volatile spinlock_t g_cpu_irqlock SP_SECTION = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_irqsetlock;
volatile cpu_set_t g_cpu_irqset;
volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
volatile cpu_set_t g_cpu_irqset SP_SECTION;
#endif
/****************************************************************************

View File

@ -353,12 +353,12 @@ extern volatile uint32_t g_cpuload_total;
* least one CPU has pre-emption disabled.
*/
extern volatile spinlock_t g_cpu_schedlock;
extern volatile spinlock_t g_cpu_schedlock SP_SECTION;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile spinlock_t g_cpu_locksetlock;
extern volatile cpu_set_t g_cpu_lockset;
extern volatile spinlock_t g_cpu_locksetlock SP_SECTION;
extern volatile cpu_set_t g_cpu_lockset SP_SECTION;
#endif /* CONFIG_SMP */

View File

@ -109,12 +109,12 @@
* least one CPU has pre-emption disabled.
*/
volatile spinlock_t g_cpu_schedlock = SP_UNLOCKED;
volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_locksetlock;
volatile cpu_set_t g_cpu_lockset;
volatile spinlock_t g_cpu_locksetlock SP_SECTION;
volatile cpu_set_t g_cpu_lockset SP_SECTION;
#endif /* CONFIG_SMP */

View File

@ -144,7 +144,7 @@ void spin_lock(FAR volatile spinlock_t *lock)
*
****************************************************************************/
#ifdef HAVE_DMB
#ifdef __SP_UNLOCK_FUNCTION
void spin_unlock(FAR volatile spinlock_t *lock)
{
*lock = SP_UNLOCKED;
@ -218,8 +218,11 @@ void spin_lockr(FAR struct spinlock_s *lock)
up_irq_restore(flags);
sched_yield();
flags = up_irq_save();
SP_DSB();
}
SP_DMB();
/* Take one count on the lock */
lock->sp_cpu = cpu;
@ -238,8 +241,10 @@ void spin_lockr(FAR struct spinlock_s *lock)
while (up_testset(&lock->sp_lock) == SP_LOCKED)
{
sched_yield();
SP_DSB()
}
SP_DMB();
#endif /* CONFIG_SMP */
}