sched: Fix sched_lock() logic for SMP

Summary:
- I noticed sched_lock() logic is different from sched_unlock()
- I think sched_lock() should use critical section
- Also, the code should be simple like sched_unlock()
- This commit fixes these issues

Impact:
- Affects SMP only

Testing:
- Tested with spresense:wifi_smp (both NCPUS=2 and 3)
- Tested with lc823450-xgevk:rndis
- Tested with maix-bit:smp (QEMU)
- Tested with esp32-core:smp (QEMU)
- Tested with sabre-6quad:smp (QEMU)

Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
Masayuki Ishikawa 2020-10-15 10:03:25 +09:00 committed by Brennan Ashton
parent 0820549223
commit 46659d5a14

View File

@ -133,29 +133,20 @@ volatile cpu_set_t g_cpu_lockset SP_SECTION;
int sched_lock(void)
{
FAR struct tcb_s *rtcb;
irqstate_t flags;
int cpu;
/* If the CPU supports suppression of interprocessor interrupts, then
* simple disabling interrupts will provide sufficient protection for
* the following operation.
*/
flags = up_irq_save();
cpu = this_cpu();
rtcb = current_task(cpu);
rtcb = this_task();
/* Check for some special cases: (1) rtcb may be NULL only during early
* boot-up phases, and (2) sched_lock() should have no effect if called
* from the interrupt level.
*/
if (rtcb == NULL || up_interrupt_context())
{
up_irq_restore(flags);
}
else
if (rtcb != NULL && !up_interrupt_context())
{
/* Catch attempts to increment the lockcount beyond the range of the
* integer type.
@ -163,6 +154,8 @@ int sched_lock(void)
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
irqstate_t flags = enter_critical_section();
/* We must hold the lock on this CPU before we increment the lockcount
* for the first time. Holding the lock is sufficient to lockout
* context switching.
@ -196,8 +189,6 @@ int sched_lock(void)
rtcb->lockcount++;
up_irq_restore(flags);
#if defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION) || \
defined(CONFIG_SCHED_CRITMONITOR)
/* Check if we just acquired the lock */
@ -223,6 +214,8 @@ int sched_lock(void)
nxsched_merge_prioritized((FAR dq_queue_t *)&g_readytorun,
(FAR dq_queue_t *)&g_pendingtasks,
TSTATE_TASK_PENDING);
leave_critical_section(flags);
}
return OK;