From ed998c08c41699112dc294e3785dfdedcd88f935 Mon Sep 17 00:00:00 2001 From: hujun5 Date: Wed, 10 Jan 2024 16:03:08 +0800 Subject: [PATCH] sched: change the SMP scheduling policy from synchronous to asynchronous reason: Currently, if we need to schedule a task to another CPU, we have to completely halt the other CPU, manipulate the scheduling linked list, and then resume the operation of that CPU. This process is both time-consuming and unnecessary. During this process, both the current CPU and the target CPU are inevitably subjected to busyloop. The improved strategy is to simply send a cross-core interrupt to the target CPU. The current CPU continues to run while the target CPU responds to the interrupt, eliminating the certainty of a busyloop occurring. Signed-off-by: hujun5 --- arch/arm/src/armv7-a/arm_cpupause.c | 32 ++++- arch/arm/src/armv7-r/arm_cpupause.c | 32 ++++- arch/arm/src/cxd56xx/cxd56_cpupause.c | 2 + arch/arm/src/lc823450/lc823450_cpupause.c | 2 + arch/arm/src/rp2040/rp2040_cpupause.c | 2 + arch/arm/src/sam34/sam4cm_cpupause.c | 2 + arch/arm64/src/common/arm64_cpupause.c | 54 +++++--- arch/arm64/src/common/arm64_gic.h | 2 +- arch/arm64/src/common/arm64_gicv2.c | 5 +- arch/arm64/src/common/arm64_gicv3.c | 4 +- arch/risc-v/src/common/riscv_cpupause.c | 35 +++++ arch/sim/src/sim/sim_smpsignal.c | 39 +++++- arch/sparc/src/s698pm/s698pm_cpupause.c | 7 + arch/x86_64/src/intel64/intel64_cpupause.c | 6 +- arch/xtensa/src/common/xtensa_cpupause.c | 63 ++++++--- include/nuttx/arch.h | 25 +++- sched/init/nx_start.c | 1 + sched/sched/CMakeLists.txt | 3 +- sched/sched/Make.defs | 2 +- sched/sched/queue.h | 10 ++ sched/sched/sched.h | 9 ++ sched/sched/sched_addreadytorun.c | 37 +++-- sched/sched/sched_process_delivered.c | 154 +++++++++++++++++++++ sched/sched/sched_removereadytorun.c | 14 ++ sched/task/task_restart.c | 12 +- 25 files changed, 482 insertions(+), 72 deletions(-) create mode 100644 sched/sched/sched_process_delivered.c diff --git a/arch/arm/src/armv7-a/arm_cpupause.c b/arch/arm/src/armv7-a/arm_cpupause.c index b36c6de7aa..0a25b9af5d 100644 --- a/arch/arm/src/armv7-a/arm_cpupause.c +++ b/arch/arm/src/armv7-a/arm_cpupause.c @@ -256,6 +256,34 @@ int arm_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + return OK; } @@ -303,9 +331,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute SGI2 */ - - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm/src/armv7-r/arm_cpupause.c b/arch/arm/src/armv7-r/arm_cpupause.c index a466d963eb..f68f418216 100644 --- a/arch/arm/src/armv7-r/arm_cpupause.c +++ b/arch/arm/src/armv7-r/arm_cpupause.c @@ -256,6 +256,34 @@ int arm_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + return OK; } @@ -303,9 +331,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute SGI2 */ - - arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/arm/src/cxd56xx/cxd56_cpupause.c b/arch/arm/src/cxd56xx/cxd56_cpupause.c index 3857728f45..42807545bf 100644 --- a/arch/arm/src/cxd56xx/cxd56_cpupause.c +++ b/arch/arm/src/cxd56xx/cxd56_cpupause.c @@ -353,6 +353,8 @@ int arm_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + return ret; } diff --git a/arch/arm/src/lc823450/lc823450_cpupause.c b/arch/arm/src/lc823450/lc823450_cpupause.c index c75abbae17..b4249b36d9 100644 --- a/arch/arm/src/lc823450/lc823450_cpupause.c +++ b/arch/arm/src/lc823450/lc823450_cpupause.c @@ -268,6 +268,8 @@ int lc823450_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + return OK; } diff --git a/arch/arm/src/rp2040/rp2040_cpupause.c b/arch/arm/src/rp2040/rp2040_cpupause.c index b5a4e8f820..d71a3c5f65 100644 --- a/arch/arm/src/rp2040/rp2040_cpupause.c +++ b/arch/arm/src/rp2040/rp2040_cpupause.c @@ -324,6 +324,8 @@ int arm_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + return OK; } diff --git a/arch/arm/src/sam34/sam4cm_cpupause.c b/arch/arm/src/sam34/sam4cm_cpupause.c index 4abd9068de..2ebc5ea2d9 100644 --- a/arch/arm/src/sam34/sam4cm_cpupause.c +++ b/arch/arm/src/sam34/sam4cm_cpupause.c @@ -259,6 +259,8 @@ int arm_pause_handler(int irq, void *c, void *arg) return up_cpu_paused(cpu); } + nxsched_process_delivered(cpu); + return OK; } diff --git a/arch/arm64/src/common/arm64_cpupause.c b/arch/arm64/src/common/arm64_cpupause.c index 870917ca15..f6579ba3b7 100644 --- a/arch/arm64/src/common/arm64_cpupause.c +++ b/arch/arm64/src/common/arm64_cpupause.c @@ -259,6 +259,36 @@ int arm64_pause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + nxsched_process_delivered(cpu); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Execute SGI2 */ + + arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); + return OK; } @@ -284,8 +314,6 @@ int arm64_pause_handler(int irq, void *context, void *arg) int up_cpu_pause(int cpu) { - int ret; - DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu()); #ifdef CONFIG_SCHED_INSTRUMENTATION @@ -308,23 +336,13 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute SGI2 */ + up_cpu_pause_async(cpu); - ret = arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu)); - if (ret < 0) - { - /* What happened? Unlock the g_cpu_wait spinlock */ + /* Wait for the other CPU to unlock g_cpu_paused meaning that + * it is fully paused and ready for up_cpu_resume(); + */ - spin_unlock(&g_cpu_wait[cpu]); - } - else - { - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - } + spin_lock(&g_cpu_paused[cpu]); spin_unlock(&g_cpu_paused[cpu]); @@ -333,7 +351,7 @@ int up_cpu_pause(int cpu) * called. g_cpu_paused will be unlocked in any case. */ - return ret; + return OK; } /**************************************************************************** diff --git a/arch/arm64/src/common/arm64_gic.h b/arch/arm64/src/common/arm64_gic.h index cce6eebf3b..5b81e6ebde 100644 --- a/arch/arm64/src/common/arm64_gic.h +++ b/arch/arm64/src/common/arm64_gic.h @@ -317,7 +317,7 @@ int arm64_gic_irq_trigger(unsigned int intid, uint32_t flags); uint64_t * arm64_decodeirq(uint64_t *regs); -int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); +void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list); #ifdef CONFIG_SMP diff --git a/arch/arm64/src/common/arm64_gicv2.c b/arch/arm64/src/common/arm64_gicv2.c index 33a1ced320..3f019bfd9a 100644 --- a/arch/arm64/src/common/arm64_gicv2.c +++ b/arch/arm64/src/common/arm64_gicv2.c @@ -1474,14 +1474,13 @@ void arm64_gic_secondary_init(void) * cpuset - The set of CPUs to receive the SGI * * Returned Value: - * OK is always returned at present. + * None * ****************************************************************************/ -int arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset) +void arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset) { arm_cpu_sgi(sgi, cpuset); - return 0; } # ifdef CONFIG_SMP diff --git a/arch/arm64/src/common/arm64_gicv3.c b/arch/arm64/src/common/arm64_gicv3.c index 5512ab4c5d..59b300cbf2 100644 --- a/arch/arm64/src/common/arm64_gicv3.c +++ b/arch/arm64/src/common/arm64_gicv3.c @@ -408,7 +408,7 @@ static int arm64_gic_send_sgi(unsigned int sgi_id, uint64_t target_aff, return 0; } -int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list) +void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list) { uint64_t pre_cluster_id = UINT64_MAX; uint64_t curr_cluster_id; @@ -437,8 +437,6 @@ int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list) } arm64_gic_send_sgi(sgi_id, pre_cluster_id, tlist); - - return 0; } /* Wake up GIC redistributor. diff --git a/arch/risc-v/src/common/riscv_cpupause.c b/arch/risc-v/src/common/riscv_cpupause.c index e883c73a70..148706ca44 100644 --- a/arch/risc-v/src/common/riscv_cpupause.c +++ b/arch/risc-v/src/common/riscv_cpupause.c @@ -227,6 +227,7 @@ int up_cpu_paused_restore(void) int riscv_pause_handler(int irq, void *c, void *arg) { + struct tcb_s *tcb; int cpu = this_cpu(); nxsched_smp_call_handler(irq, c, arg); @@ -258,6 +259,40 @@ int riscv_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + tcb = current_task(cpu); + riscv_savecontext(tcb); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + riscv_restorecontext(tcb); + + return OK; +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Execute Pause IRQ to CPU(cpu) */ + + riscv_ipi_send(cpu); + return OK; } diff --git a/arch/sim/src/sim/sim_smpsignal.c b/arch/sim/src/sim/sim_smpsignal.c index fd06cb2b97..140adff834 100644 --- a/arch/sim/src/sim/sim_smpsignal.c +++ b/arch/sim/src/sim/sim_smpsignal.c @@ -74,6 +74,7 @@ static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS]; static int sim_cpupause_handler(int irq, void *context, void *arg) { + struct tcb_s *tcb; int cpu = this_cpu(); /* Check for false alarms. Such false could occur as a consequence of @@ -100,6 +101,12 @@ static int sim_cpupause_handler(int irq, void *context, void *arg) leave_critical_section(flags); } + tcb = current_task(cpu); + sim_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + sim_restorestate(tcb->xcp.regs); + return OK; } @@ -339,6 +346,34 @@ int sim_init_ipi(int irq) return irq_attach(irq, sim_cpupause_handler, NULL); } +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Generate IRQ for CPU(cpu) */ + + host_send_ipi(cpu); + + return OK; +} + /**************************************************************************** * Name: up_cpu_pause * @@ -381,9 +416,7 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Generate IRQ for CPU(cpu) */ - - host_send_ipi(cpu); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/sparc/src/s698pm/s698pm_cpupause.c b/arch/sparc/src/s698pm/s698pm_cpupause.c index 4071ba624d..225ef9ec24 100644 --- a/arch/sparc/src/s698pm/s698pm_cpupause.c +++ b/arch/sparc/src/s698pm/s698pm_cpupause.c @@ -226,6 +226,7 @@ int up_cpu_paused_restore(void) int s698pm_pause_handler(int irq, void *c, void *arg) { + struct tcb_s *tcb; int cpu = this_cpu(); nxsched_smp_call_handler(irq, c, arg); @@ -257,6 +258,12 @@ int s698pm_pause_handler(int irq, void *c, void *arg) leave_critical_section(flags); } + tcb = current_task(cpu); + sparc_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + sparc_restorestate(tcb->xcp.regs); + return OK; } diff --git a/arch/x86_64/src/intel64/intel64_cpupause.c b/arch/x86_64/src/intel64/intel64_cpupause.c index 9a543a2ddd..44e3220c2a 100644 --- a/arch/x86_64/src/intel64/intel64_cpupause.c +++ b/arch/x86_64/src/intel64/intel64_cpupause.c @@ -265,7 +265,7 @@ int up_pause_handler(int irq, void *c, void *arg) } /**************************************************************************** - * Name: up_cpu_async_pause + * Name: up_cpu_pause_async * * Description: * pause task execution on the CPU @@ -283,7 +283,7 @@ int up_pause_handler(int irq, void *c, void *arg) * ****************************************************************************/ -inline_function int up_cpu_async_pause(int cpu) +inline_function int up_cpu_pause_async(int cpu) { cpu_set_t cpuset; @@ -362,7 +362,7 @@ int up_cpu_pause(int cpu) /* Execute Pause IRQ to CPU(cpu) */ - up_cpu_async_pause(cpu); + up_cpu_pause_async(cpu); /* Wait for the other CPU to unlock g_cpu_paused meaning that * it is fully paused and ready for up_cpu_resume(); diff --git a/arch/xtensa/src/common/xtensa_cpupause.c b/arch/xtensa/src/common/xtensa_cpupause.c index edc9b8bd85..37f749c44f 100644 --- a/arch/xtensa/src/common/xtensa_cpupause.c +++ b/arch/xtensa/src/common/xtensa_cpupause.c @@ -218,6 +218,7 @@ int up_cpu_paused_restore(void) void xtensa_pause_handler(void) { + struct tcb_s *tcb; int cpu = this_cpu(); /* Check for false alarms. Such false could occur as a consequence of @@ -242,6 +243,40 @@ void xtensa_pause_handler(void) leave_critical_section(flags); } + + tcb = current_task(cpu); + xtensa_savestate(tcb->xcp.regs); + nxsched_process_delivered(cpu); + tcb = current_task(cpu); + xtensa_restorestate(tcb->xcp.regs); +} + +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +inline_function int up_cpu_pause_async(int cpu) +{ + /* Execute the intercpu interrupt */ + + xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); + + return OK; } /**************************************************************************** @@ -291,8 +326,6 @@ void up_send_smp_call(cpu_set_t cpuset) int up_cpu_pause(int cpu) { - int ret; - #ifdef CONFIG_SCHED_INSTRUMENTATION /* Notify of the pause event */ @@ -315,23 +348,13 @@ int up_cpu_pause(int cpu) spin_lock(&g_cpu_wait[cpu]); spin_lock(&g_cpu_paused[cpu]); - /* Execute the intercpu interrupt */ + up_cpu_pause_async(cpu); - ret = xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE); - if (ret < 0) - { - /* What happened? Unlock the g_cpu_wait spinlock */ + /* Wait for the other CPU to unlock g_cpu_paused meaning that + * it is fully paused and ready for up_cpu_resume(); + */ - spin_unlock(&g_cpu_wait[cpu]); - } - else - { - /* Wait for the other CPU to unlock g_cpu_paused meaning that - * it is fully paused and ready for up_cpu_resume(); - */ - - spin_lock(&g_cpu_paused[cpu]); - } + spin_lock(&g_cpu_paused[cpu]); spin_unlock(&g_cpu_paused[cpu]); @@ -340,7 +363,7 @@ int up_cpu_pause(int cpu) * called. g_cpu_paused will be unlocked in any case. */ - return ret; + return OK; } /**************************************************************************** @@ -351,8 +374,8 @@ int up_cpu_pause(int cpu) * state of the task at the head of the g_assignedtasks[cpu] list, and * resume normal tasking. * - * This function is called after up_cpu_pause in order to resume operation - * of the CPU after modifying its g_assignedtasks[cpu] list. + * This function is called after up_cpu_pause in order resume operation of + * the CPU after modifying its g_assignedtasks[cpu] list. * * Input Parameters: * cpu - The index of the CPU being re-started. diff --git a/include/nuttx/arch.h b/include/nuttx/arch.h index 23754dea03..64d68203be 100644 --- a/include/nuttx/arch.h +++ b/include/nuttx/arch.h @@ -2317,6 +2317,29 @@ int up_cpu_start(int cpu); int up_cpu_pause(int cpu); #endif +/**************************************************************************** + * Name: up_cpu_pause_async + * + * Description: + * pause task execution on the CPU + * check whether there are tasks delivered to specified cpu + * and try to run them. + * + * Input Parameters: + * cpu - The index of the CPU to be paused. + * + * Returned Value: + * Zero on success; a negated errno value on failure. + * + * Assumptions: + * Called from within a critical section; + * + ****************************************************************************/ + +#ifdef CONFIG_SMP +int up_cpu_pause_async(int cpu); +#endif + /**************************************************************************** * Name: up_cpu_pausereq * @@ -2418,7 +2441,7 @@ int up_cpu_paused_restore(void); * state of the task at the head of the g_assignedtasks[cpu] list, and * resume normal tasking. * - * This function is called after up_cpu_pause in order ot resume operation + * This function is called after up_cpu_pause in order to resume operation * of the CPU after modifying its g_assignedtasks[cpu] list. * * Input Parameters: diff --git a/sched/init/nx_start.c b/sched/init/nx_start.c index 4bf9969d4c..027b805e2c 100644 --- a/sched/init/nx_start.c +++ b/sched/init/nx_start.c @@ -126,6 +126,7 @@ dq_queue_t g_readytorun; #ifdef CONFIG_SMP dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS]; +FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; #endif /* g_running_tasks[] holds a references to the running task for each cpu. diff --git a/sched/sched/CMakeLists.txt b/sched/sched/CMakeLists.txt index ca4ad08b03..29bf9c1f59 100644 --- a/sched/sched/CMakeLists.txt +++ b/sched/sched/CMakeLists.txt @@ -62,7 +62,8 @@ if(CONFIG_SMP) sched_cpupause.c sched_getcpu.c sched_getaffinity.c - sched_setaffinity.c) + sched_setaffinity.c + sched_process_delivered.c) endif() if(CONFIG_SIG_SIGSTOP_ACTION) diff --git a/sched/sched/Make.defs b/sched/sched/Make.defs index 1103128bc9..b6ca4a7131 100644 --- a/sched/sched/Make.defs +++ b/sched/sched/Make.defs @@ -37,7 +37,7 @@ CSRCS += sched_reprioritize.c endif ifeq ($(CONFIG_SMP),y) -CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c +CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c sched_process_delivered.c CSRCS += sched_getaffinity.c sched_setaffinity.c endif diff --git a/sched/sched/queue.h b/sched/sched/queue.h index c91a37de7a..320638b198 100644 --- a/sched/sched/queue.h +++ b/sched/sched/queue.h @@ -63,4 +63,14 @@ } \ while (0) +#define dq_insert_mid(pre, mid, next) \ + do \ + { \ + mid->flink = next; \ + mid->blink = prev; \ + pre->flink = mid; \ + next->blink = mid; \ + } \ + while (0) + #endif /* __INCLUDE_NUTTX_QUEUE_H_ */ diff --git a/sched/sched/sched.h b/sched/sched/sched.h index a68353a9b0..26600cae45 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -190,6 +190,14 @@ extern dq_queue_t g_readytorun; extern dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS]; #endif +/* g_delivertasks is used to record the tcb that needs to be passed to + * another cpu for scheduling. When it is null, it means that there + * is no tcb that needs to be processed. When it is not null, + * it indicates that there is a tcb that needs to be processed. + */ + +extern FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; + /* g_running_tasks[] holds a references to the running task for each cpu. * It is valid only when up_interrupt_context() returns true. */ @@ -397,6 +405,7 @@ static inline_function FAR struct tcb_s *this_task(void) int nxsched_select_cpu(cpu_set_t affinity); int nxsched_pause_cpu(FAR struct tcb_s *tcb); +void nxsched_process_delivered(int cpu); # define nxsched_islocked_global() (g_cpu_lockset != 0) # define nxsched_islocked_tcb(tcb) nxsched_islocked_global() diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index 4c760b7fa1..ff481ee669 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -222,13 +222,38 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) else /* (task_state == TSTATE_TASK_RUNNING) */ { /* If we are modifying some assigned task list other than our own, we - * will need to stop that CPU. + * will need to switch that CPU. */ me = this_cpu(); if (cpu != me) { - DEBUGVERIFY(up_cpu_pause(cpu)); + if (g_delivertasks[cpu] == NULL) + { + g_delivertasks[cpu] = btcb; + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_ASSIGNED; + up_cpu_pause_async(cpu); + } + else + { + rtcb = g_delivertasks[cpu]; + if (rtcb->sched_priority < btcb->sched_priority) + { + g_delivertasks[cpu] = btcb; + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_ASSIGNED; + nxsched_add_prioritized(rtcb, &g_readytorun); + rtcb->task_state = TSTATE_TASK_READYTORUN; + } + else + { + nxsched_add_prioritized(btcb, &g_readytorun); + btcb->task_state = TSTATE_TASK_READYTORUN; + } + } + + return false; } tasklist = &g_assignedtasks[cpu]; @@ -258,14 +283,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) { g_cpu_lockset |= (1 << cpu); } - - /* All done, restart the other CPU (if it was paused). */ - - if (cpu != me) - { - DEBUGVERIFY(up_cpu_resume(cpu)); - doswitch = false; - } } return doswitch; diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c new file mode 100644 index 0000000000..574e89e0ed --- /dev/null +++ b/sched/sched/sched_process_delivered.c @@ -0,0 +1,154 @@ +/**************************************************************************** + * sched/sched/sched_process_delivered.c + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include + +#include + +#include "irq/irq.h" +#include "sched/sched.h" +#include "sched/queue.h" + +/**************************************************************************** + * Public Functions + ****************************************************************************/ + +/**************************************************************************** + * Name: nxsched_process_delivered + * + * Description: + * This function is used to process the tcb in g_delivertasks. + * 1 We use direct locking instead of enter_critical_section + * to save processing time + * 2 If there is a higher priority task, we will still perform + * the higher priority task + * 3 If the schedule lock is on, the task will be placed in g_pendingtasks + * + * Input Parameters: + * cpu + * + * Returned Value: + * OK + * + * Assumptions: + * - The caller must be in irq + * - current cpu must not be locked + * + ****************************************************************************/ + +void nxsched_process_delivered(int cpu) +{ + FAR dq_queue_t *tasklist; + FAR struct tcb_s *next; + FAR struct tcb_s *prev; + struct tcb_s *btcb = NULL; + struct tcb_s *tcb; + + DEBUGASSERT(g_cpu_nestcount[cpu] == 0); + DEBUGASSERT(up_interrupt_context()); + + if ((g_cpu_irqset & (1 << cpu)) == 0) + { + while (!spin_trylock_wo_note(&g_cpu_irqlock)) + { + if (up_cpu_pausereq(cpu)) + { + up_cpu_paused(cpu); + } + } + + g_cpu_irqset |= (1 << cpu); + } + + if (g_delivertasks[cpu] == NULL) + { + tcb = current_task(cpu); + if (tcb->irqcount <= 0) + { + cpu_irqlock_clear(); + } + + return; + } + + if (nxsched_islocked_global()) + { + btcb = g_delivertasks[cpu]; + g_delivertasks[cpu] = NULL; + nxsched_add_prioritized(btcb, &g_pendingtasks); + btcb->task_state = TSTATE_TASK_PENDING; + tcb = current_task(cpu); + if (tcb->irqcount <= 0) + { + cpu_irqlock_clear(); + } + + return; + } + + btcb = g_delivertasks[cpu]; + tasklist = &g_assignedtasks[cpu]; + + for (next = (FAR struct tcb_s *)tasklist->head; + (next && btcb->sched_priority <= next->sched_priority); + next = next->flink); + + prev = next->blink; + if (prev == NULL) + { + /* Special case: Insert at the head of the list */ + + dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_RUNNING; + + DEBUGASSERT(btcb->flink != NULL); + DEBUGASSERT(next == btcb->flink); + next->task_state = TSTATE_TASK_ASSIGNED; + + if (btcb->lockcount > 0) + { + g_cpu_lockset |= (1 << cpu); + } + } + else + { + /* Insert in the middle of the list */ + + dq_insert_mid(prev, btcb, next); + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_ASSIGNED; + } + + g_delivertasks[cpu] = NULL; + tcb = current_task(cpu); + + if (tcb->irqcount <= 0) + { + cpu_irqlock_clear(); + } +} diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index 923ec59d87..e9b26a68d8 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -324,6 +324,19 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge) else { FAR dq_queue_t *tasklist; + int i; + + /* if tcb == g_delivertasks[i] we set NULL to g_delivertasks[i] */ + + for (i = 0; i < CONFIG_SMP_NCPUS; i++) + { + if (tcb == g_delivertasks[i]) + { + g_delivertasks[i] = NULL; + tcb->task_state = TSTATE_TASK_INVALID; + goto finish; + } + } tasklist = TLIST_HEAD(tcb, tcb->cpu); @@ -341,6 +354,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge) tcb->task_state = TSTATE_TASK_INVALID; } +finish: if (list_pendingtasks()->head && merge) { doswitch |= nxsched_merge_pending(); diff --git a/sched/task/task_restart.c b/sched/task/task_restart.c index f9c70e345a..51d8020d08 100644 --- a/sched/task/task_restart.c +++ b/sched/task/task_restart.c @@ -136,12 +136,20 @@ static int nxtask_restart(pid_t pid) */ #ifdef CONFIG_SMP - tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu); + if ((FAR struct tcb_s *)tcb == g_delivertasks[tcb->cmn.cpu]) + { + g_delivertasks[tcb->cmn.cpu] = NULL; + } + else + { + tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu); + dq_rem((FAR dq_entry_t *)tcb, tasklist); + } #else tasklist = TLIST_HEAD(&tcb->cmn); + dq_rem((FAR dq_entry_t *)tcb, tasklist); #endif - dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; /* Deallocate anything left in the TCB's signal queues */