spinlock: Remove SP_SECTION
Summary: - SP_SECTION was introduced to allocate spinlock in non-cachable region mainly for Cortex-A to stabilize the NuttX SMP kernel - However, all spinlocks are now allocated in cachable area and works without any problems - So SP_SECTION should be removed to simplify the kernel code Impact: - None Testing: - Build test only Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
parent
4a75b41b4d
commit
1b00e5d518
@ -55,8 +55,8 @@
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -40,8 +40,8 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
|
||||
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
|
||||
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
|
@ -49,7 +49,7 @@
|
||||
****************************************************************************/
|
||||
|
||||
static volatile bool g_appcpu_started;
|
||||
static volatile spinlock_t g_appcpu_interlock SP_SECTION;
|
||||
static volatile spinlock_t g_appcpu_interlock;
|
||||
|
||||
/****************************************************************************
|
||||
* ROM function prototypes
|
||||
|
@ -81,18 +81,6 @@ typedef struct
|
||||
# define __SP_UNLOCK_FUNCTION 1
|
||||
#endif
|
||||
|
||||
/* If the target CPU supports a data cache then it may be necessary to
|
||||
* manage spinlocks in a special way, perhaps linking them all into a
|
||||
* special non-cacheable memory region.
|
||||
*
|
||||
* SP_SECTION - Special storage attributes may be required to force
|
||||
* spinlocks into a special, non-cacheable section.
|
||||
*/
|
||||
|
||||
#if !defined(SP_SECTION)
|
||||
# define SP_SECTION
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Function Prototypes
|
||||
****************************************************************************/
|
||||
|
@ -110,12 +110,12 @@ extern const irq_mapped_t g_irqmap[NR_IRQS];
|
||||
* disabled.
|
||||
*/
|
||||
|
||||
extern volatile spinlock_t g_cpu_irqlock SP_SECTION;
|
||||
extern volatile spinlock_t g_cpu_irqlock;
|
||||
|
||||
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
||||
|
||||
extern volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
||||
extern volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
||||
extern volatile spinlock_t g_cpu_irqsetlock;
|
||||
extern volatile cpu_set_t g_cpu_irqset;
|
||||
|
||||
/* Handles nested calls to enter_critical section from interrupt handlers */
|
||||
|
||||
|
@ -45,12 +45,12 @@
|
||||
* disabled.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_irqlock SP_SECTION = SP_UNLOCKED;
|
||||
volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED;
|
||||
|
||||
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
||||
|
||||
volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
||||
volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
||||
volatile spinlock_t g_cpu_irqsetlock;
|
||||
volatile cpu_set_t g_cpu_irqset;
|
||||
|
||||
/* Handles nested calls to enter_critical section from interrupt handlers */
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
|
||||
/* Used for access control */
|
||||
|
||||
static volatile spinlock_t g_irq_spin SP_SECTION = SP_UNLOCKED;
|
||||
static volatile spinlock_t g_irq_spin = SP_UNLOCKED;
|
||||
|
||||
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */
|
||||
|
||||
|
@ -337,16 +337,16 @@ extern volatile uint32_t g_cpuload_total;
|
||||
* least one CPU has pre-emption disabled.
|
||||
*/
|
||||
|
||||
extern volatile spinlock_t g_cpu_schedlock SP_SECTION;
|
||||
extern volatile spinlock_t g_cpu_schedlock;
|
||||
|
||||
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
||||
|
||||
extern volatile spinlock_t g_cpu_locksetlock SP_SECTION;
|
||||
extern volatile cpu_set_t g_cpu_lockset SP_SECTION;
|
||||
extern volatile spinlock_t g_cpu_locksetlock;
|
||||
extern volatile cpu_set_t g_cpu_lockset;
|
||||
|
||||
/* Used to lock tasklist to prevent from concurrent access */
|
||||
|
||||
extern volatile spinlock_t g_cpu_tasklistlock SP_SECTION;
|
||||
extern volatile spinlock_t g_cpu_tasklistlock;
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -97,12 +97,12 @@
|
||||
* least one CPU has pre-emption disabled.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED;
|
||||
volatile spinlock_t g_cpu_schedlock = SP_UNLOCKED;
|
||||
|
||||
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
||||
|
||||
volatile spinlock_t g_cpu_locksetlock SP_SECTION;
|
||||
volatile cpu_set_t g_cpu_lockset SP_SECTION;
|
||||
volatile spinlock_t g_cpu_locksetlock;
|
||||
volatile cpu_set_t g_cpu_lockset;
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user