sched: change the SMP scheduling policy from synchronous to asynchronous
reason: Currently, if we need to schedule a task to another CPU, we have to completely halt the other CPU, manipulate the scheduling linked list, and then resume the operation of that CPU. This process is both time-consuming and unnecessary. During this process, both the current CPU and the target CPU are inevitably subjected to busyloop. The improved strategy is to simply send a cross-core interrupt to the target CPU. The current CPU continues to run while the target CPU responds to the interrupt, eliminating the certainty of a busyloop occurring. Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
cc9d42804b
commit
ed998c08c4
@ -256,6 +256,34 @@ int arm_pause_handler(int irq, void *context, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
@ -303,9 +331,7 @@ int up_cpu_pause(int cpu)
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute SGI2 */
|
||||
|
||||
arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
|
@ -256,6 +256,34 @@ int arm_pause_handler(int irq, void *context, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
@ -303,9 +331,7 @@ int up_cpu_pause(int cpu)
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute SGI2 */
|
||||
|
||||
arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
|
@ -353,6 +353,8 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -268,6 +268,8 @@ int lc823450_pause_handler(int irq, void *c, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -324,6 +324,8 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -259,6 +259,8 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
return up_cpu_paused(cpu);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -259,6 +259,36 @@ int arm64_pause_handler(int irq, void *context, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
/* Execute SGI2 */
|
||||
|
||||
arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
@ -284,8 +314,6 @@ int arm64_pause_handler(int irq, void *context, void *arg)
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
@ -308,23 +336,13 @@ int up_cpu_pause(int cpu)
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute SGI2 */
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
ret = arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
if (ret < 0)
|
||||
{
|
||||
/* What happened? Unlock the g_cpu_wait spinlock */
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
}
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
@ -333,7 +351,7 @@ int up_cpu_pause(int cpu)
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return ret;
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -317,7 +317,7 @@ int arm64_gic_irq_trigger(unsigned int intid, uint32_t flags);
|
||||
|
||||
uint64_t * arm64_decodeirq(uint64_t *regs);
|
||||
|
||||
int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list);
|
||||
void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
|
@ -1474,14 +1474,13 @@ void arm64_gic_secondary_init(void)
|
||||
* cpuset - The set of CPUs to receive the SGI
|
||||
*
|
||||
* Returned Value:
|
||||
* OK is always returned at present.
|
||||
* None
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset)
|
||||
void arm64_gic_raise_sgi(unsigned int sgi, uint16_t cpuset)
|
||||
{
|
||||
arm_cpu_sgi(sgi, cpuset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
# ifdef CONFIG_SMP
|
||||
|
@ -408,7 +408,7 @@ static int arm64_gic_send_sgi(unsigned int sgi_id, uint64_t target_aff,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list)
|
||||
void arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list)
|
||||
{
|
||||
uint64_t pre_cluster_id = UINT64_MAX;
|
||||
uint64_t curr_cluster_id;
|
||||
@ -437,8 +437,6 @@ int arm64_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list)
|
||||
}
|
||||
|
||||
arm64_gic_send_sgi(sgi_id, pre_cluster_id, tlist);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wake up GIC redistributor.
|
||||
|
@ -227,6 +227,7 @@ int up_cpu_paused_restore(void)
|
||||
|
||||
int riscv_pause_handler(int irq, void *c, void *arg)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
nxsched_smp_call_handler(irq, c, arg);
|
||||
@ -258,6 +259,40 @@ int riscv_pause_handler(int irq, void *c, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
riscv_savecontext(tcb);
|
||||
nxsched_process_delivered(cpu);
|
||||
tcb = current_task(cpu);
|
||||
riscv_restorecontext(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
riscv_ipi_send(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,7 @@ static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
static int sim_cpupause_handler(int irq, void *context, void *arg)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
@ -100,6 +101,12 @@ static int sim_cpupause_handler(int irq, void *context, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
sim_savestate(tcb->xcp.regs);
|
||||
nxsched_process_delivered(cpu);
|
||||
tcb = current_task(cpu);
|
||||
sim_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
@ -339,6 +346,34 @@ int sim_init_ipi(int irq)
|
||||
return irq_attach(irq, sim_cpupause_handler, NULL);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
/* Generate IRQ for CPU(cpu) */
|
||||
|
||||
host_send_ipi(cpu);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
@ -381,9 +416,7 @@ int up_cpu_pause(int cpu)
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Generate IRQ for CPU(cpu) */
|
||||
|
||||
host_send_ipi(cpu);
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
|
@ -226,6 +226,7 @@ int up_cpu_paused_restore(void)
|
||||
|
||||
int s698pm_pause_handler(int irq, void *c, void *arg)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
nxsched_smp_call_handler(irq, c, arg);
|
||||
@ -257,6 +258,12 @@ int s698pm_pause_handler(int irq, void *c, void *arg)
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
sparc_savestate(tcb->xcp.regs);
|
||||
nxsched_process_delivered(cpu);
|
||||
tcb = current_task(cpu);
|
||||
sparc_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -265,7 +265,7 @@ int up_pause_handler(int irq, void *c, void *arg)
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_async_pause
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
@ -283,7 +283,7 @@ int up_pause_handler(int irq, void *c, void *arg)
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_async_pause(int cpu)
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
|
||||
@ -362,7 +362,7 @@ int up_cpu_pause(int cpu)
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
up_cpu_async_pause(cpu);
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
|
@ -218,6 +218,7 @@ int up_cpu_paused_restore(void)
|
||||
|
||||
void xtensa_pause_handler(void)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
@ -242,6 +243,40 @@ void xtensa_pause_handler(void)
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
xtensa_savestate(tcb->xcp.regs);
|
||||
nxsched_process_delivered(cpu);
|
||||
tcb = current_task(cpu);
|
||||
xtensa_restorestate(tcb->xcp.regs);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
inline_function int up_cpu_pause_async(int cpu)
|
||||
{
|
||||
/* Execute the intercpu interrupt */
|
||||
|
||||
xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -291,8 +326,6 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
int ret;
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
@ -315,23 +348,13 @@ int up_cpu_pause(int cpu)
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute the intercpu interrupt */
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
ret = xtensa_intercpu_interrupt(cpu, CPU_INTCODE_PAUSE);
|
||||
if (ret < 0)
|
||||
{
|
||||
/* What happened? Unlock the g_cpu_wait spinlock */
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
}
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
@ -340,7 +363,7 @@ int up_cpu_pause(int cpu)
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return ret;
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -351,8 +374,8 @@ int up_cpu_pause(int cpu)
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order to resume operation
|
||||
* of the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
|
@ -2317,6 +2317,29 @@ int up_cpu_start(int cpu);
|
||||
int up_cpu_pause(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
* Description:
|
||||
* pause task execution on the CPU
|
||||
* check whether there are tasks delivered to specified cpu
|
||||
* and try to run them.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section;
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int up_cpu_pause_async(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
@ -2418,7 +2441,7 @@ int up_cpu_paused_restore(void);
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order ot resume operation
|
||||
* This function is called after up_cpu_pause in order to resume operation
|
||||
* of the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
|
@ -126,6 +126,7 @@ dq_queue_t g_readytorun;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS];
|
||||
FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS];
|
||||
#endif
|
||||
|
||||
/* g_running_tasks[] holds a references to the running task for each cpu.
|
||||
|
@ -62,7 +62,8 @@ if(CONFIG_SMP)
|
||||
sched_cpupause.c
|
||||
sched_getcpu.c
|
||||
sched_getaffinity.c
|
||||
sched_setaffinity.c)
|
||||
sched_setaffinity.c
|
||||
sched_process_delivered.c)
|
||||
endif()
|
||||
|
||||
if(CONFIG_SIG_SIGSTOP_ACTION)
|
||||
|
@ -37,7 +37,7 @@ CSRCS += sched_reprioritize.c
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_SMP),y)
|
||||
CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c
|
||||
CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c sched_process_delivered.c
|
||||
CSRCS += sched_getaffinity.c sched_setaffinity.c
|
||||
endif
|
||||
|
||||
|
@ -63,4 +63,14 @@
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#define dq_insert_mid(pre, mid, next) \
|
||||
do \
|
||||
{ \
|
||||
mid->flink = next; \
|
||||
mid->blink = prev; \
|
||||
pre->flink = mid; \
|
||||
next->blink = mid; \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
#endif /* __INCLUDE_NUTTX_QUEUE_H_ */
|
||||
|
@ -190,6 +190,14 @@ extern dq_queue_t g_readytorun;
|
||||
extern dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS];
|
||||
#endif
|
||||
|
||||
/* g_delivertasks is used to record the tcb that needs to be passed to
|
||||
* another cpu for scheduling. When it is null, it means that there
|
||||
* is no tcb that needs to be processed. When it is not null,
|
||||
* it indicates that there is a tcb that needs to be processed.
|
||||
*/
|
||||
|
||||
extern FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS];
|
||||
|
||||
/* g_running_tasks[] holds a references to the running task for each cpu.
|
||||
* It is valid only when up_interrupt_context() returns true.
|
||||
*/
|
||||
@ -397,6 +405,7 @@ static inline_function FAR struct tcb_s *this_task(void)
|
||||
|
||||
int nxsched_select_cpu(cpu_set_t affinity);
|
||||
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
|
||||
void nxsched_process_delivered(int cpu);
|
||||
|
||||
# define nxsched_islocked_global() (g_cpu_lockset != 0)
|
||||
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()
|
||||
|
@ -222,13 +222,38 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
||||
else /* (task_state == TSTATE_TASK_RUNNING) */
|
||||
{
|
||||
/* If we are modifying some assigned task list other than our own, we
|
||||
* will need to stop that CPU.
|
||||
* will need to switch that CPU.
|
||||
*/
|
||||
|
||||
me = this_cpu();
|
||||
if (cpu != me)
|
||||
{
|
||||
DEBUGVERIFY(up_cpu_pause(cpu));
|
||||
if (g_delivertasks[cpu] == NULL)
|
||||
{
|
||||
g_delivertasks[cpu] = btcb;
|
||||
btcb->cpu = cpu;
|
||||
btcb->task_state = TSTATE_TASK_ASSIGNED;
|
||||
up_cpu_pause_async(cpu);
|
||||
}
|
||||
else
|
||||
{
|
||||
rtcb = g_delivertasks[cpu];
|
||||
if (rtcb->sched_priority < btcb->sched_priority)
|
||||
{
|
||||
g_delivertasks[cpu] = btcb;
|
||||
btcb->cpu = cpu;
|
||||
btcb->task_state = TSTATE_TASK_ASSIGNED;
|
||||
nxsched_add_prioritized(rtcb, &g_readytorun);
|
||||
rtcb->task_state = TSTATE_TASK_READYTORUN;
|
||||
}
|
||||
else
|
||||
{
|
||||
nxsched_add_prioritized(btcb, &g_readytorun);
|
||||
btcb->task_state = TSTATE_TASK_READYTORUN;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
tasklist = &g_assignedtasks[cpu];
|
||||
@ -258,14 +283,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
||||
{
|
||||
g_cpu_lockset |= (1 << cpu);
|
||||
}
|
||||
|
||||
/* All done, restart the other CPU (if it was paused). */
|
||||
|
||||
if (cpu != me)
|
||||
{
|
||||
DEBUGVERIFY(up_cpu_resume(cpu));
|
||||
doswitch = false;
|
||||
}
|
||||
}
|
||||
|
||||
return doswitch;
|
||||
|
154
sched/sched/sched_process_delivered.c
Normal file
154
sched/sched/sched_process_delivered.c
Normal file
@ -0,0 +1,154 @@
|
||||
/****************************************************************************
|
||||
* sched/sched/sched_process_delivered.c
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <assert.h>
|
||||
|
||||
#include <nuttx/queue.h>
|
||||
|
||||
#include "irq/irq.h"
|
||||
#include "sched/sched.h"
|
||||
#include "sched/queue.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxsched_process_delivered
|
||||
*
|
||||
* Description:
|
||||
* This function is used to process the tcb in g_delivertasks.
|
||||
* 1 We use direct locking instead of enter_critical_section
|
||||
* to save processing time
|
||||
* 2 If there is a higher priority task, we will still perform
|
||||
* the higher priority task
|
||||
* 3 If the schedule lock is on, the task will be placed in g_pendingtasks
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu
|
||||
*
|
||||
* Returned Value:
|
||||
* OK
|
||||
*
|
||||
* Assumptions:
|
||||
* - The caller must be in irq
|
||||
* - current cpu must not be locked
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void nxsched_process_delivered(int cpu)
|
||||
{
|
||||
FAR dq_queue_t *tasklist;
|
||||
FAR struct tcb_s *next;
|
||||
FAR struct tcb_s *prev;
|
||||
struct tcb_s *btcb = NULL;
|
||||
struct tcb_s *tcb;
|
||||
|
||||
DEBUGASSERT(g_cpu_nestcount[cpu] == 0);
|
||||
DEBUGASSERT(up_interrupt_context());
|
||||
|
||||
if ((g_cpu_irqset & (1 << cpu)) == 0)
|
||||
{
|
||||
while (!spin_trylock_wo_note(&g_cpu_irqlock))
|
||||
{
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
up_cpu_paused(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
g_cpu_irqset |= (1 << cpu);
|
||||
}
|
||||
|
||||
if (g_delivertasks[cpu] == NULL)
|
||||
{
|
||||
tcb = current_task(cpu);
|
||||
if (tcb->irqcount <= 0)
|
||||
{
|
||||
cpu_irqlock_clear();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (nxsched_islocked_global())
|
||||
{
|
||||
btcb = g_delivertasks[cpu];
|
||||
g_delivertasks[cpu] = NULL;
|
||||
nxsched_add_prioritized(btcb, &g_pendingtasks);
|
||||
btcb->task_state = TSTATE_TASK_PENDING;
|
||||
tcb = current_task(cpu);
|
||||
if (tcb->irqcount <= 0)
|
||||
{
|
||||
cpu_irqlock_clear();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
btcb = g_delivertasks[cpu];
|
||||
tasklist = &g_assignedtasks[cpu];
|
||||
|
||||
for (next = (FAR struct tcb_s *)tasklist->head;
|
||||
(next && btcb->sched_priority <= next->sched_priority);
|
||||
next = next->flink);
|
||||
|
||||
prev = next->blink;
|
||||
if (prev == NULL)
|
||||
{
|
||||
/* Special case: Insert at the head of the list */
|
||||
|
||||
dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist);
|
||||
btcb->cpu = cpu;
|
||||
btcb->task_state = TSTATE_TASK_RUNNING;
|
||||
|
||||
DEBUGASSERT(btcb->flink != NULL);
|
||||
DEBUGASSERT(next == btcb->flink);
|
||||
next->task_state = TSTATE_TASK_ASSIGNED;
|
||||
|
||||
if (btcb->lockcount > 0)
|
||||
{
|
||||
g_cpu_lockset |= (1 << cpu);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Insert in the middle of the list */
|
||||
|
||||
dq_insert_mid(prev, btcb, next);
|
||||
btcb->cpu = cpu;
|
||||
btcb->task_state = TSTATE_TASK_ASSIGNED;
|
||||
}
|
||||
|
||||
g_delivertasks[cpu] = NULL;
|
||||
tcb = current_task(cpu);
|
||||
|
||||
if (tcb->irqcount <= 0)
|
||||
{
|
||||
cpu_irqlock_clear();
|
||||
}
|
||||
}
|
@ -324,6 +324,19 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge)
|
||||
else
|
||||
{
|
||||
FAR dq_queue_t *tasklist;
|
||||
int i;
|
||||
|
||||
/* if tcb == g_delivertasks[i] we set NULL to g_delivertasks[i] */
|
||||
|
||||
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
|
||||
{
|
||||
if (tcb == g_delivertasks[i])
|
||||
{
|
||||
g_delivertasks[i] = NULL;
|
||||
tcb->task_state = TSTATE_TASK_INVALID;
|
||||
goto finish;
|
||||
}
|
||||
}
|
||||
|
||||
tasklist = TLIST_HEAD(tcb, tcb->cpu);
|
||||
|
||||
@ -341,6 +354,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge)
|
||||
tcb->task_state = TSTATE_TASK_INVALID;
|
||||
}
|
||||
|
||||
finish:
|
||||
if (list_pendingtasks()->head && merge)
|
||||
{
|
||||
doswitch |= nxsched_merge_pending();
|
||||
|
@ -136,12 +136,20 @@ static int nxtask_restart(pid_t pid)
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu);
|
||||
if ((FAR struct tcb_s *)tcb == g_delivertasks[tcb->cmn.cpu])
|
||||
{
|
||||
g_delivertasks[tcb->cmn.cpu] = NULL;
|
||||
}
|
||||
else
|
||||
{
|
||||
tasklist = TLIST_HEAD(&tcb->cmn, tcb->cmn.cpu);
|
||||
dq_rem((FAR dq_entry_t *)tcb, tasklist);
|
||||
}
|
||||
#else
|
||||
tasklist = TLIST_HEAD(&tcb->cmn);
|
||||
dq_rem((FAR dq_entry_t *)tcb, tasklist);
|
||||
#endif
|
||||
|
||||
dq_rem((FAR dq_entry_t *)tcb, tasklist);
|
||||
tcb->cmn.task_state = TSTATE_TASK_INVALID;
|
||||
|
||||
/* Deallocate anything left in the TCB's signal queues */
|
||||
|
Loading…
Reference in New Issue
Block a user