sched:remove g_cpu_schedlock g_cpu_irqsetlock g_cpu_locksetlock

we can use g_cpu_lockset to determine whether we are currently in the scheduling lock,
and all accesses and modifications to g_cpu_lockset, g_cpu_irqlock, g_cpu_irqset
are in the critical section, so we can directly operate on it.

test:
We can use qemu for testing.

compiling
make distclean -j20; ./tools/configure.sh -l qemu-armv8a:nsh_smp ;make -j20
running
qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic -machine virt,virtualization=on,gic-version=3 -net none -chardev stdio,id=con,mux=on -serial chardev:con -mon chardev=con,mode=readline -kernel ./nuttx

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5 2023-12-12 11:53:18 +08:00 committed by Masayuki Ishikawa
parent 769e65ef8e
commit f7843e2198
11 changed files with 52 additions and 315 deletions

View File

@ -372,17 +372,6 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
#endif
}
/* In an SMP configuration, the interrupt disable logic also
* involves spinlocks that are configured per the TCB irqcount
* field. This is logically equivalent to
* enter_critical_section(). The matching call to
* leave_critical_section() will be performed in
* arm_sigdeliver().
*/
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
/* RESUME the other CPU if it was PAUSED */
if (cpu != me)

View File

@ -70,6 +70,18 @@
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SMP
# define cpu_irqlock_clear() \
do \
{ \
g_cpu_irqset = 0; \
SP_DMB(); \
g_cpu_irqlock = SP_UNLOCKED; \
SP_DSB(); \
} \
while (0)
#endif
/****************************************************************************
* Public Types
****************************************************************************/

View File

@ -344,52 +344,6 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock);
# define spin_is_locked(l) (*(l) == SP_LOCKED)
#endif
/****************************************************************************
* Name: spin_setbit
*
* Description:
* Makes setting a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock protecting the set
* orlock - Will be set to SP_LOCKED while holding setlock
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SPINLOCK
void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock);
#endif
/****************************************************************************
* Name: spin_clrbit
*
* Description:
* Makes clearing a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock protecting the set
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SPINLOCK
void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock);
#endif
/****************************************************************************
* Name: spin_initialize
*

View File

@ -36,6 +36,18 @@
#include "irq/irq.h"
#ifdef CONFIG_IRQCOUNT
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#ifdef CONFIG_SMP
# define cpu_irqlock_set(cpu) \
do \
{ \
g_cpu_irqset |= (1 << cpu); \
} \
while (0)
#endif
/****************************************************************************
* Public Data
@ -50,7 +62,6 @@ volatile spinlock_t g_cpu_irqlock = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_irqsetlock;
volatile cpu_set_t g_cpu_irqset;
/* Handles nested calls to enter_critical section from interrupt handlers */
@ -277,17 +288,7 @@ try_again_in_irq:
DEBUGVERIFY(up_cpu_paused(cpu));
paused = true;
/* NOTE: As the result of up_cpu_paused(cpu), this CPU
* might set g_cpu_irqset in nxsched_resume_scheduler()
* However, another CPU might hold g_cpu_irqlock.
* To avoid this situation, releae g_cpu_irqlock first.
*/
if ((g_cpu_irqset & (1 << cpu)) != 0)
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
/* NOTE: Here, this CPU does not hold g_cpu_irqlock,
* so call irq_waitlock(cpu) to acquire g_cpu_irqlock.
@ -295,22 +296,21 @@ try_again_in_irq:
goto try_again_in_irq;
}
cpu_irqlock_set(cpu);
}
/* In any event, the nesting count is now one */
g_cpu_nestcount[cpu] = 1;
/* Also set the CPU bit so that other CPUs will be aware that
* this CPU holds the critical section.
*/
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
if (paused)
{
up_cpu_paused_restore();
}
DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
(g_cpu_irqset & (1 << cpu)) != 0);
}
}
else
@ -375,8 +375,7 @@ try_again_in_irq:
* like lockcount: Both will disable pre-emption.
*/
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_set(cpu);
rtcb->irqcount = 1;
/* Note that we have entered the critical section */
@ -482,11 +481,11 @@ void leave_critical_section(irqstate_t flags)
FAR struct tcb_s *rtcb = current_task(cpu);
DEBUGASSERT(rtcb != NULL);
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) != 0);
if (rtcb->irqcount <= 0)
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_clear();
}
g_cpu_nestcount[cpu] = 0;
@ -541,8 +540,7 @@ void leave_critical_section(irqstate_t flags)
*/
rtcb->irqcount = 0;
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_clear();
/* Have all CPUs released the lock? */
}
@ -643,8 +641,7 @@ void restore_critical_section(void)
if ((g_cpu_irqset & (1 << me)) != 0)
{
spin_clrbit(&g_cpu_irqset, me, &g_cpu_irqsetlock,
&g_cpu_irqlock);
cpu_irqlock_clear();
}
}
}

View File

@ -277,50 +277,8 @@ extern volatile clock_t g_cpuload_total;
*/
#ifdef CONFIG_SMP
/* In the multiple CPU, SMP case, disabling context switches will not give a
* task exclusive access to the (multiple) CPU resources (at least without
* stopping the other CPUs): Even though pre-emption is disabled, other
* threads will still be executing on the other CPUS.
*
* There are additional rules for this multi-CPU case:
*
* 1. There is a global lock count 'g_cpu_lockset' that includes a bit for
* each CPU: If the bit is '1', then the corresponding CPU has the
* scheduler locked; if '0', then the CPU does not have the scheduler
* locked.
* 2. Scheduling logic would set the bit associated with the cpu in
* 'g_cpu_lockset' when the TCB at the head of the g_assignedtasks[cpu]
* list transitions has 'lockcount' > 0. This might happen when
* sched_lock() is called, or after a context switch that changes the
* TCB at the head of the g_assignedtasks[cpu] list.
* 3. Similarly, the cpu bit in the global 'g_cpu_lockset' would be cleared
* when the TCB at the head of the g_assignedtasks[cpu] list has
* 'lockcount' == 0. This might happen when sched_unlock() is called, or
* after a context switch that changes the TCB at the head of the
* g_assignedtasks[cpu] list.
* 4. Modification of the global 'g_cpu_lockset' must be protected by a
* spinlock, 'g_cpu_schedlock'. That spinlock would be taken when
* sched_lock() is called, and released when sched_unlock() is called.
* This assures that the scheduler does enforce the critical section.
* NOTE: Because of this spinlock, there should never be more than one
* bit set in 'g_cpu_lockset'; attempts to set additional bits should
* be cause the CPU to block on the spinlock. However, additional bits
* could get set in 'g_cpu_lockset' due to the context switches on the
* various CPUs.
* 5. Each the time the head of a g_assignedtasks[] list changes and the
* scheduler modifies 'g_cpu_lockset', it must also set 'g_cpu_schedlock'
* depending on the new state of 'g_cpu_lockset'.
* 5. Logic that currently uses the currently running tasks lockcount
* instead uses the global 'g_cpu_schedlock'. A value of SP_UNLOCKED
* means that no CPU has pre-emption disabled; SP_LOCKED means that at
* least one CPU has pre-emption disabled.
*/
extern volatile spinlock_t g_cpu_schedlock;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
extern volatile spinlock_t g_cpu_locksetlock;
extern volatile cpu_set_t g_cpu_lockset;
/* Used to lock tasklist to prevent from concurrent access */
@ -400,7 +358,7 @@ FAR struct tcb_s *this_task(void) noinstrument_function;
int nxsched_select_cpu(cpu_set_t affinity);
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
# define nxsched_islocked_global() spin_is_locked(&g_cpu_schedlock)
# define nxsched_islocked_global() (g_cpu_lockset != 0)
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()
#else

View File

@ -205,7 +205,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* situation.
*/
me = this_cpu();
if ((nxsched_islocked_global()) &&
task_state != TSTATE_TASK_ASSIGNED)
{
@ -238,6 +237,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
* will need to stop that CPU.
*/
me = this_cpu();
if (cpu != me)
{
DEBUGVERIFY(up_cpu_pause(cpu));
@ -277,13 +277,11 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
if (btcb->lockcount > 0)
{
spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset |= (1 << cpu);
}
else
{
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset &= ~(1 << cpu);
}
/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ

View File

@ -58,50 +58,8 @@
*/
#ifdef CONFIG_SMP
/* In the multiple CPU, SMP case, disabling context switches will not give a
* task exclusive access to the (multiple) CPU resources (at least without
* stopping the other CPUs): Even though pre-emption is disabled, other
* threads will still be executing on the other CPUS.
*
* There are additional rules for this multi-CPU case:
*
* 1. There is a global lock count 'g_cpu_lockset' that includes a bit for
* each CPU: If the bit is '1', then the corresponding CPU has the
* scheduler locked; if '0', then the CPU does not have the scheduler
* locked.
* 2. Scheduling logic would set the bit associated with the cpu in
* 'g_cpu_lockset' when the TCB at the head of the g_assignedtasks[cpu]
* list transitions has 'lockcount' > 0. This might happen when
* sched_lock() is called, or after a context switch that changes the
* TCB at the head of the g_assignedtasks[cpu] list.
* 3. Similarly, the cpu bit in the global 'g_cpu_lockset' would be cleared
* when the TCB at the head of the g_assignedtasks[cpu] list has
* 'lockcount' == 0. This might happen when sched_unlock() is called, or
* after a context switch that changes the TCB at the head of the
* g_assignedtasks[cpu] list.
* 4. Modification of the global 'g_cpu_lockset' must be protected by a
* spinlock, 'g_cpu_schedlock'. That spinlock would be taken when
* sched_lock() is called, and released when sched_unlock() is called.
* This assures that the scheduler does enforce the critical section.
* NOTE: Because of this spinlock, there should never be more than one
* bit set in 'g_cpu_lockset'; attempts to set additional bits should
* cause the CPU to block on the spinlock. However, additional bits
* could get set in 'g_cpu_lockset' due to the context switches on the
* various CPUs.
* 5. Each time the head of a g_assignedtasks[] list changes and the
* scheduler modifies 'g_cpu_lockset', it must also set 'g_cpu_schedlock'
* depending on the new state of 'g_cpu_lockset'.
* 5. Logic that currently uses the currently running tasks lockcount
* instead uses the global 'g_cpu_schedlock'. A value of SP_UNLOCKED
* means that no CPU has pre-emption disabled; SP_LOCKED means that at
* least one CPU has pre-emption disabled.
*/
volatile spinlock_t g_cpu_schedlock = SP_UNLOCKED;
/* Used to keep track of which CPU(s) hold the IRQ lock. */
volatile spinlock_t g_cpu_locksetlock;
volatile cpu_set_t g_cpu_lockset;
#endif /* CONFIG_SMP */
@ -133,6 +91,7 @@ volatile cpu_set_t g_cpu_lockset;
int sched_lock(void)
{
FAR struct tcb_s *rtcb;
int cpu;
/* If the CPU supports suppression of interprocessor interrupts, then
* simple disabling interrupts will provide sufficient protection for
@ -157,6 +116,7 @@ int sched_lock(void)
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
flags = enter_critical_section();
cpu = this_cpu();
/* We must hold the lock on this CPU before we increment the lockcount
* for the first time. Holding the lock is sufficient to lockout
@ -171,18 +131,17 @@ int sched_lock(void)
* locked (or we would not be executing!).
*/
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
DEBUGASSERT((g_cpu_lockset & (1 << cpu)) == 0);
g_cpu_lockset |= (1 << cpu);
}
else
{
/* If this thread already has the scheduler locked, then
* g_cpu_schedlock() should indicate that the scheduler is locked
* g_cpu_lockset should indicate that the scheduler is locked
* and g_cpu_lockset should include the bit setting for this CPU.
*/
DEBUGASSERT(spin_is_locked(&g_cpu_schedlock) &&
(g_cpu_lockset & (1 << this_cpu())) != 0);
DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0);
}
/* A counter is used to support locking. This allows nested lock

View File

@ -233,15 +233,13 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
{
/* Yes... make sure that scheduling logic knows about this */
spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset |= (1 << cpu);
}
else
{
/* No.. we may need to perform release our hold on the lock. */
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset &= ~(1 << cpu);
}
/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ

View File

@ -105,11 +105,9 @@ int sched_unlock(void)
* release our hold on the lock.
*/
DEBUGASSERT(spin_is_locked(&g_cpu_schedlock) &&
(g_cpu_lockset & (1 << cpu)) != 0);
DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0);
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset &= ~(1 << cpu);
/* Release any ready-to-run tasks that have collected in
* g_pendingtasks.
@ -121,7 +119,7 @@ int sched_unlock(void)
/* In the SMP case, the tasks remains pend(1) if we are
* in a critical section, i.e., g_cpu_irqlock is locked by other
* CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
* g_cpu_schedlock is locked. In those cases, the release of the
* g_cpu_lockset is locked. In those cases, the release of the
* pending tasks must be deferred until those conditions are met.
*
* There are certain conditions that we must avoid by preventing

View File

@ -329,130 +329,6 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock)
SP_SEV();
}
/****************************************************************************
* Name: spin_setbit
*
* Description:
* Makes setting a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock protecting the set
* orlock - Will be set to SP_LOCKED while holding setlock
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock)
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
cpu_set_t prev;
#endif
irqstate_t flags;
/* Disable local interrupts to prevent being re-entered from an interrupt
* on the same CPU. This may not effect interrupt behavior on other CPUs.
*/
flags = up_irq_save();
/* Then, get the 'setlock' spinlock */
spin_lock(setlock);
/* Then set the bit and mark the 'orlock' as locked */
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
prev = *set;
#endif
*set |= (1 << cpu);
*orlock = SP_LOCKED;
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
if (prev == 0)
{
/* Notify that we have locked the spinlock */
sched_note_spinlock(this_task(), orlock, NOTE_SPINLOCK_LOCKED);
}
#endif
/* Release the 'setlock' and restore local interrupts */
spin_unlock(setlock);
up_irq_restore(flags);
}
#endif
/****************************************************************************
* Name: spin_clrbit
*
* Description:
* Makes clearing a CPU bit in a bitset an atomic action
*
* Input Parameters:
* set - A reference to the bitset to set the CPU bit in
* cpu - The bit number to be set
* setlock - A reference to the lock protecting the set
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void spin_clrbit(FAR volatile cpu_set_t *set, unsigned int cpu,
FAR volatile spinlock_t *setlock,
FAR volatile spinlock_t *orlock)
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
cpu_set_t prev;
#endif
irqstate_t flags;
/* Disable local interrupts to prevent being re-entered from an interrupt
* on the same CPU. This may not effect interrupt behavior on other CPUs.
*/
flags = up_irq_save();
/* First, get the 'setlock' spinlock */
spin_lock(setlock);
/* Then clear the bit in the CPU set. Set/clear the 'orlock' depending
* upon the resulting state of the CPU set.
*/
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
prev = *set;
#endif
*set &= ~(1 << cpu);
*orlock = (*set != 0) ? SP_LOCKED : SP_UNLOCKED;
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
if (prev != 0 && *set == 0)
{
/* Notify that we have unlocked the spinlock */
sched_note_spinlock(this_task(), orlock, NOTE_SPINLOCK_UNLOCK);
}
#endif
/* Release the 'setlock' and restore local interrupts */
spin_unlock(setlock);
up_irq_restore(flags);
}
#endif
#ifdef CONFIG_RW_SPINLOCK
/****************************************************************************

View File

@ -138,8 +138,7 @@ int nxtask_exit(void)
#ifdef CONFIG_SMP
/* Make sure that the system knows about the locked state */
spin_setbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset |= (1 << cpu);
#endif
rtcb->task_state = TSTATE_TASK_READYTORUN;
@ -181,8 +180,7 @@ int nxtask_exit(void)
{
/* Make sure that the system knows about the unlocked state */
spin_clrbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
&g_cpu_schedlock);
g_cpu_lockset &= ~(1 << cpu);
}
#endif