From 28cfadb4b010b5955485d57a10cc1a72b82c3d13 Mon Sep 17 00:00:00 2001 From: hujun5 Date: Fri, 16 Aug 2024 16:46:20 +0800 Subject: [PATCH] sched: replace sync pause with async pause for setpriority reason: In the kernel, we are planning to remove all occurrences of up_cpu_pause as one of the steps to simplify the implementation of critical sections. The goal is to enable spin_lock_irqsave to encapsulate critical sections, thereby facilitating the replacement of critical sections(big lock) with smaller spin_lock_irqsave(small lock) Configuring NuttX and compile: $ ./tools/configure.sh -l qemu-armv8a:nsh_smp $ make Running with qemu $ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic -machine virt,virtualization=on,gic-version=3 -net none -chardev stdio,id=con,mux=on -serial chardev:con -mon chardev=con,mode=readline -kernel ./nuttx Signed-off-by: hujun5 --- sched/sched/sched_setpriority.c | 76 +++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c index 7a3d5bfd33..fe788c8270 100644 --- a/sched/sched/sched_setpriority.c +++ b/sched/sched/sched_setpriority.c @@ -37,10 +37,58 @@ #include "irq/irq.h" #include "sched/sched.h" +/**************************************************************************** + * Private Type Declarations + ****************************************************************************/ + +#ifdef CONFIG_SMP +struct reprioritize_arg_s +{ + pid_t pid; + cpu_set_t saved_affinity; + uint16_t saved_flags; + int sched_priority; + bool need_restore; +}; + /**************************************************************************** * Private Functions ****************************************************************************/ +static int reprioritize_handler(FAR void *cookie) +{ + FAR struct reprioritize_arg_s *arg = cookie; + FAR struct tcb_s *rtcb = this_task(); + FAR struct tcb_s *tcb; + irqstate_t flags; + + flags = enter_critical_section(); + + tcb = nxsched_get_tcb(arg->pid); + + if (!tcb || tcb->task_state == TSTATE_TASK_INVALID || + (tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) + { + leave_critical_section(flags); + return OK; + } + + if (arg->need_restore) + { + tcb->affinity = arg->saved_affinity; + tcb->flags = arg->saved_flags; + } + + if (nxsched_reprioritize_rtr(tcb, arg->sched_priority)) + { + up_switch_context(this_task(), rtcb); + } + + leave_critical_section(flags); + return OK; +} +#endif + /**************************************************************************** * Name: nxsched_nexttcb * @@ -175,6 +223,34 @@ static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb, { /* A context switch will occur. */ +#ifdef CONFIG_SMP + if (tcb->cpu != this_cpu() && + tcb->task_state == TSTATE_TASK_RUNNING) + { + struct reprioritize_arg_s arg; + + if ((tcb->flags & TCB_FLAG_CPU_LOCKED) != 0) + { + arg.pid = tcb->pid; + arg.need_restore = false; + } + else + { + arg.pid = tcb->pid; + arg.saved_flags = tcb->flags; + arg.saved_affinity = tcb->affinity; + arg.need_restore = true; + + tcb->flags |= TCB_FLAG_CPU_LOCKED; + CPU_SET(tcb->cpu, &tcb->affinity); + } + + arg.sched_priority = sched_priority; + nxsched_smp_call_single(tcb->cpu, reprioritize_handler, + &arg, true); + } + else +#endif if (nxsched_reprioritize_rtr(tcb, sched_priority)) { up_switch_context(this_task(), rtcb);