spinlock: Remove SP_SECTION

Summary:
- SP_SECTION was introduced to allocate spinlock in non-cachable
  region mainly for Cortex-A to stabilize the NuttX SMP kernel
- However, all spinlocks are now allocated in cachable area and
  works without any problems
- So SP_SECTION should be removed to simplify the kernel code

Impact:
- None

Testing:
- Build test only

Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
Masayuki Ishikawa 2021-04-21 10:27:47 +09:00 committed by Xiang Xiao
parent 4a75b41b4d
commit 1b00e5d518
9 changed files with 19 additions and 31 deletions

View File

@ -55,8 +55,8 @@
* so that it will be ready for the next pause operation.
*/
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
/****************************************************************************
* Public Functions

View File

@ -40,8 +40,8 @@
* Private Data
****************************************************************************/
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS] SP_SECTION;
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS] SP_SECTION;
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
/****************************************************************************
* Public Functions

View File

@ -49,7 +49,7 @@
****************************************************************************/
static volatile bool g_appcpu_started;
static volatile spinlock_t g_appcpu_interlock SP_SECTION;
static volatile spinlock_t g_appcpu_interlock;
/****************************************************************************
* ROM function prototypes

View File

@ -81,18 +81,6 @@ typedef struct
# define __SP_UNLOCK_FUNCTION 1
#endif
/* If the target CPU supports a data cache then it may be necessary to
* manage spinlocks in a special way, perhaps linking them all into a
* special non-cacheable memory region.
*
* SP_SECTION - Special storage attributes may be required to force
* spinlocks into a special, non-cacheable section.
*/
#if !defined(SP_SECTION)
# define SP_SECTION
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/

View File

@ -110,12 +110,12 @@ extern const irq_mapped_t g_irqmap[NR_IRQS];
* disabled.
*/
extern volatile spinlock_t g_cpu_irqlock SP_SECTION;
extern volatile spinlock_t g_cpu_irqlock;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
extern volatile cpu_set_t g_cpu_irqset SP_SECTION;
extern volatile spinlock_t g_cpu_irqsetlock;
extern volatile cpu_set_t g_cpu_irqset;
/* Handles nested calls to enter_critical section from interrupt handlers */

View File

@ -45,12 +45,12 @@
* disabled.
*/
volatile spinlock_t g_cpu_irqlock SP_SECTION = SP_UNLOCKED;
volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
volatile cpu_set_t g_cpu_irqset SP_SECTION;
volatile spinlock_t g_cpu_irqsetlock;
volatile cpu_set_t g_cpu_irqset;
/* Handles nested calls to enter_critical section from interrupt handlers */

View File

@ -38,7 +38,7 @@
/* Used for access control */
static volatile spinlock_t g_irq_spin SP_SECTION = SP_UNLOCKED;
static volatile spinlock_t g_irq_spin = SP_UNLOCKED;
/* Handles nested calls to spin_lock_irqsave and spin_unlock_irqrestore */

View File

@ -337,16 +337,16 @@ extern volatile uint32_t g_cpuload_total;
* least one CPU has pre-emption disabled.
*/
extern volatile spinlock_t g_cpu_schedlock SP_SECTION;
extern volatile spinlock_t g_cpu_schedlock;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile spinlock_t g_cpu_locksetlock SP_SECTION;
extern volatile cpu_set_t g_cpu_lockset SP_SECTION;
extern volatile spinlock_t g_cpu_locksetlock;
extern volatile cpu_set_t g_cpu_lockset;
/* Used to lock tasklist to prevent from concurrent access */
extern volatile spinlock_t g_cpu_tasklistlock SP_SECTION;
extern volatile spinlock_t g_cpu_tasklistlock;
#endif /* CONFIG_SMP */

View File

@ -97,12 +97,12 @@
* least one CPU has pre-emption disabled.
*/
volatile spinlock_t g_cpu_schedlock SP_SECTION = SP_UNLOCKED;
volatile spinlock_t g_cpu_schedlock = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_locksetlock SP_SECTION;
volatile cpu_set_t g_cpu_lockset SP_SECTION;
volatile spinlock_t g_cpu_locksetlock;
volatile cpu_set_t g_cpu_lockset;
#endif /* CONFIG_SMP */