sched: replace sync pause with async pause for nxsched_suspend

reason:
In the kernel, we are planning to remove all occurrences of up_cpu_pause as one of the steps to
simplify the implementation of critical sections. The goal is to enable spin_lock_irqsave to encapsulate critical sections,
thereby facilitating the replacement of critical sections(big lock) with smaller spin_lock_irqsave(small lock)

Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-armv8a:nsh_smp
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic
-machine virt,virtualization=on,gic-version=3
-net none -chardev stdio,id=con,mux=on -serial chardev:con
-mon chardev=con,mode=readline -kernel ./nuttx

Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
hujun5 2024-09-29 21:15:18 +08:00 committed by Mateusz Szafoni
parent 28cfadb4b0
commit b52ad53cc1

View File

@ -36,6 +36,57 @@
#include "sched/sched.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Private Type Declarations
****************************************************************************/
struct suspend_arg_s
{
pid_t pid;
cpu_set_t saved_affinity;
uint16_t saved_flags;
bool need_restore;
};
/****************************************************************************
* Private Functions
****************************************************************************/
static int nxsched_suspend_handler(FAR void *cookie)
{
FAR struct suspend_arg_s *arg = cookie;
FAR struct tcb_s *tcb;
irqstate_t flags;
flags = enter_critical_section();
tcb = nxsched_get_tcb(arg->pid);
if (!tcb || tcb->task_state == TSTATE_TASK_INVALID ||
(tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0)
{
/* There is no TCB with this pid or, if there is, it is not a task. */
leave_critical_section(flags);
return OK;
}
if (arg->need_restore)
{
tcb->affinity = arg->saved_affinity;
tcb->flags = arg->saved_flags;
}
nxsched_remove_readytorun(tcb, false);
tcb->task_state = TSTATE_TASK_STOPPED;
dq_addlast((FAR dq_entry_t *)tcb, &g_stoppedtasks);
leave_critical_section(flags);
return OK;
}
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
@ -94,18 +145,46 @@ void nxsched_suspend(FAR struct tcb_s *tcb)
/* Remove the tcb task from the ready-to-run list. */
switch_needed = nxsched_remove_readytorun(tcb, true);
/* Add the task to the specified blocked task list */
tcb->task_state = TSTATE_TASK_STOPPED;
dq_addlast((FAR dq_entry_t *)tcb, list_stoppedtasks());
/* Now, perform the context switch if one is needed */
if (switch_needed)
#ifdef CONFIG_SMP
if (tcb->task_state == TSTATE_TASK_RUNNING && tcb->cpu != this_cpu())
{
up_switch_context(this_task(), rtcb);
struct suspend_arg_s arg;
if ((tcb->flags & TCB_FLAG_CPU_LOCKED) != 0)
{
arg.pid = tcb->pid;
arg.need_restore = false;
}
else
{
arg.pid = tcb->pid;
arg.saved_flags = tcb->flags;
arg.saved_affinity = tcb->affinity;
arg.need_restore = true;
tcb->flags |= TCB_FLAG_CPU_LOCKED;
CPU_SET(tcb->cpu, &tcb->affinity);
}
nxsched_smp_call_single(tcb->cpu, nxsched_suspend_handler,
&arg, true);
}
else
#endif
{
switch_needed = nxsched_remove_readytorun(tcb, true);
/* Add the task to the specified blocked task list */
tcb->task_state = TSTATE_TASK_STOPPED;
dq_addlast((FAR dq_entry_t *)tcb, list_stoppedtasks());
/* Now, perform the context switch if one is needed */
if (switch_needed)
{
up_switch_context(this_task(), rtcb);
}
}
}