arch: remove up_cpu_pause up_cpu_resume up_cpu_paused up_cpu_pausereq
reason: To remove the "sync pause" and decouple the critical section from the dependency on enabling interrupts, after that we need to further implement "schedlock + spinlock". changelist 1 Modify the implementation of critical sections to no longer involve enabling interrupts or handling synchronous pause events. 2 GIC_SMP_CPUCALL attach to pause handler to remove arch interface up_cpu_paused_restore up_cpu_paused_save 3 Completely remove up_cpu_pause, up_cpu_resume, up_cpu_paused, and up_cpu_pausereq 4 change up_cpu_pause_async to up_send_cpu_sgi Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
6392d5a6b3
commit
d54bc8a9f8
@ -42,172 +42,10 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Release the g_cpu_paused spinlock to synchronize with the
|
||||
* requesting CPU.
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_async_handler
|
||||
*
|
||||
@ -236,57 +74,6 @@ int arm_pause_async_handler(int irq, void *context, void *arg)
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int arm_pause_handler(int irq, void *context, void *arg)
|
||||
{
|
||||
int cpu = this_cpu();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused(). If the pause event has already
|
||||
* been processed then g_cpu_paused[cpu] will not be locked.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
@ -313,113 +100,4 @@ inline_function int up_cpu_pause_async(int cpu)
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -219,11 +219,9 @@ void arm_gic0_initialize(void)
|
||||
/* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */
|
||||
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUSTART, arm_start_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm_pause_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC,
|
||||
arm_pause_async_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL,
|
||||
nxsched_smp_call_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL));
|
||||
#endif
|
||||
|
||||
arm_gic_dump("Exit arm_gic0_initialize", true, 0);
|
||||
|
@ -835,30 +835,6 @@ uint32_t *arm_decodefiq(uint32_t *regs);
|
||||
int arm_start_handler(int irq, void *context, void *arg);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int arm_pause_handler(int irq, void *context, void *arg);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_async_handler
|
||||
*
|
||||
|
@ -42,221 +42,10 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Release the g_cpu_paused spinlock to synchronize with the
|
||||
* requesting CPU.
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int arm_pause_handler(int irq, void *context, void *arg)
|
||||
{
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused(). If the pause event has already
|
||||
* been processed then g_cpu_paused[cpu] will not be locked.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_async_handler
|
||||
*
|
||||
@ -311,113 +100,4 @@ inline_function int up_cpu_pause_async(int cpu)
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
arm_cpu_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -160,11 +160,9 @@ void arm_gic0_initialize(void)
|
||||
/* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */
|
||||
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUSTART, arm_start_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm_pause_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC,
|
||||
arm_pause_async_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL,
|
||||
nxsched_smp_call_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL, nxsched_smp_call_handler, NULL));
|
||||
#endif
|
||||
|
||||
arm_gic_dump("Exit arm_gic0_initialize", true, 0);
|
||||
|
@ -805,30 +805,6 @@ uint32_t *arm_decodeirq(uint32_t *regs);
|
||||
int arm_start_handler(int irq, void *context, void *arg);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int arm_pause_handler(int irq, void *context, void *arg);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_async_handler
|
||||
*
|
||||
|
@ -335,32 +335,7 @@ int arm_gic_raise_sgi(unsigned int sgi_id, uint16_t target_list);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int arm_pause_handler(int irq, void *context, void *arg);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int arm_pause_async_handler(int irq, void *context, void *arg);
|
||||
#endif
|
||||
|
||||
void arm_gic_secondary_init(void);
|
||||
|
||||
#endif
|
||||
|
@ -567,7 +567,6 @@ static void gicv3_dist_init(void)
|
||||
#ifdef CONFIG_SMP
|
||||
/* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */
|
||||
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC,
|
||||
arm64_pause_async_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL,
|
||||
@ -815,7 +814,7 @@ static void arm_gic_init(void)
|
||||
gicv3_cpuif_init();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
up_enable_irq(GIC_SMP_CPUPAUSE);
|
||||
up_enable_irq(GIC_SMP_CPUCALL);
|
||||
up_enable_irq(GIC_SMP_CPUPAUSE_ASYNC);
|
||||
#endif
|
||||
}
|
||||
|
@ -56,24 +56,6 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
static volatile int g_irq_to_handle[CONFIG_SMP_NCPUS][2];
|
||||
|
||||
/****************************************************************************
|
||||
@ -109,14 +91,6 @@ static bool handle_irqreq(int cpu)
|
||||
|
||||
if (irqreq)
|
||||
{
|
||||
/* Unlock the spinlock first */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Then wait for the spinlock to be released */
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Clear g_irq_to_handle[cpu][i] */
|
||||
|
||||
g_irq_to_handle[cpu][i] = 0;
|
||||
@ -130,9 +104,6 @@ static bool handle_irqreq(int cpu)
|
||||
up_disable_irq(irqreq);
|
||||
}
|
||||
|
||||
/* Finally unlock the spinlock */
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
handled = true;
|
||||
|
||||
break;
|
||||
@ -146,151 +117,6 @@ static bool handle_irqreq(int cpu)
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Fistly, check if this IPI is to enable/disable IRQ */
|
||||
|
||||
if (handle_irqreq(cpu))
|
||||
{
|
||||
return OK;
|
||||
}
|
||||
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
@ -310,6 +136,8 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
int cpu = this_cpu();
|
||||
int ret = OK;
|
||||
|
||||
handle_irqreq(cpu);
|
||||
|
||||
nxsched_smp_call_handler(irq, c, arg);
|
||||
|
||||
DPRINTF("cpu%d will be paused\n", cpu);
|
||||
@ -318,41 +146,6 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
|
||||
putreg32(0, CXD56_CPU_P2_INT + (4 * cpu));
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() would call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: Normally, we do not call up_cpu_paused() here because
|
||||
* the above enter_critical_setion() would call up_cpu_paused()
|
||||
* inside because the caller holds a crtical section.
|
||||
* However, cxd56's remote IRQ control logic also uses this handler
|
||||
* and a caller might not take a critical section to avoid a deadlock
|
||||
* during up_enable_irq() and up_disable_irq(). This is allowed
|
||||
* because IRQ control logic does not interact wtih the scheduler.
|
||||
* This means that if the request was not handled above, we need
|
||||
* to call up_cpu_paused() here again.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
ret = up_cpu_paused(cpu);
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return ret;
|
||||
@ -411,120 +204,6 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Generate IRQ for CPU(cpu) */
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_send_irqreq()
|
||||
*
|
||||
@ -546,11 +225,6 @@ void up_send_irqreq(int idx, int irq, int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Wait for the spinlocks to be released */
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Set irq for the cpu */
|
||||
|
||||
g_irq_to_handle[cpu][idx] = irq;
|
||||
@ -558,21 +232,6 @@ void up_send_irqreq(int idx, int irq, int cpu)
|
||||
/* Generate IRQ for CPU(cpu) */
|
||||
|
||||
putreg32(1, CXD56_CPU_P2_INT + (4 * cpu));
|
||||
|
||||
/* Wait for the handler is executed on cpu */
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Finally unlock the spinlock to proceed the handler */
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -52,166 +52,10 @@
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: lc823450_pause_handler
|
||||
*
|
||||
@ -245,29 +89,6 @@ int lc823450_pause_handler(int irq, void *c, void *arg)
|
||||
putreg32(IPICLR_INTISR1_CLR_1, IPICLR);
|
||||
}
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
@ -332,119 +153,3 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
up_cpu_pause_async(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -63,7 +63,7 @@
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
|
||||
extern volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
|
@ -50,28 +50,6 @@
|
||||
#define DPRINTF(fmt, args...) do {} while (0)
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Name: rp2040_handle_irqreq
|
||||
*
|
||||
@ -88,14 +66,6 @@ static void rp2040_handle_irqreq(int irqreq)
|
||||
{
|
||||
DEBUGASSERT(this_cpu() == 0);
|
||||
|
||||
/* Unlock the spinlock first */
|
||||
|
||||
spin_unlock(&g_cpu_paused[0]);
|
||||
|
||||
/* Then wait for the spinlock to be released */
|
||||
|
||||
spin_lock(&g_cpu_wait[0]);
|
||||
|
||||
if (irqreq > 0)
|
||||
{
|
||||
up_enable_irq(irqreq);
|
||||
@ -104,154 +74,12 @@ static void rp2040_handle_irqreq(int irqreq)
|
||||
{
|
||||
up_disable_irq(-irqreq);
|
||||
}
|
||||
|
||||
/* Finally unlock the spinlock */
|
||||
|
||||
spin_unlock(&g_cpu_wait[0]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
@ -301,29 +129,6 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
|
||||
DPRINTF("cpu%d will be paused\n", cpu);
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
@ -384,122 +189,6 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
DEBUGASSERT(cpu != this_cpu());
|
||||
|
||||
/* Generate IRQ for CPU(cpu) */
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: rp2040_send_irqreq()
|
||||
*
|
||||
@ -517,31 +206,11 @@ int up_cpu_resume(int cpu)
|
||||
|
||||
void rp2040_send_irqreq(int irqreq)
|
||||
{
|
||||
/* Wait for the spinlocks to be released */
|
||||
|
||||
spin_lock(&g_cpu_wait[0]);
|
||||
spin_lock(&g_cpu_paused[0]);
|
||||
|
||||
/* Send IRQ number to Core #0 */
|
||||
|
||||
while (!(getreg32(RP2040_SIO_FIFO_ST) & RP2040_SIO_FIFO_ST_RDY))
|
||||
;
|
||||
putreg32(irqreq, RP2040_SIO_FIFO_WR);
|
||||
|
||||
/* Wait for the handler is executed on cpu */
|
||||
|
||||
spin_lock(&g_cpu_paused[0]);
|
||||
spin_unlock(&g_cpu_paused[0]);
|
||||
|
||||
/* Finally unlock the spinlock to proceed the handler */
|
||||
|
||||
spin_unlock(&g_cpu_wait[0]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[0]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[0]);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -54,166 +54,10 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm_pause_handler
|
||||
*
|
||||
@ -249,16 +93,6 @@ int arm_pause_handler(int irq, void *c, void *arg)
|
||||
putreg32(0x1, SAM_IPC0_ICCR);
|
||||
}
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (spin_is_locked(&g_cpu_paused[cpu]))
|
||||
{
|
||||
return up_cpu_paused(cpu);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
|
||||
return OK;
|
||||
@ -326,121 +160,4 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the
|
||||
* interrupt handler from returning until up_cpu_resume() is called;
|
||||
* g_cpu_paused is a handshake that will prefent this function from
|
||||
* returning until the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
/* Set IPC Interrupt (IRQ0) (write-only) */
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DPRINTF("cpu=%d\n", cpu);
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -41,176 +41,10 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Release the g_cpu_paused spinlock to synchronize with the
|
||||
* requesting CPU.
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* This CPU has been resumed. Restore the exception context of the TCB at
|
||||
* the (new) head of the assigned task list.
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm64_pause_async_handler
|
||||
*
|
||||
@ -240,57 +74,6 @@ int arm64_pause_async_handler(int irq, void *context, void *arg)
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm64_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int arm64_pause_handler(int irq, void *context, void *arg)
|
||||
{
|
||||
int cpu = this_cpu();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused(). If the pause event has already
|
||||
* been processed then g_cpu_paused[cpu] will not be locked.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
@ -318,113 +101,3 @@ inline_function int up_cpu_pause_async(int cpu)
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
arm64_gic_raise_sgi(GIC_SMP_CPUPAUSE, (1 << cpu));
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -328,28 +328,6 @@ int arm64_gic_v2m_initialize(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm64_pause_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SGI2. It performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* Standard interrupt handling
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int arm64_pause_handler(int irq, void *context, void *arg);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: arm64_pause_async_handler
|
||||
*
|
||||
|
@ -864,7 +864,6 @@ static void arm_gic0_initialize(void)
|
||||
#ifdef CONFIG_SMP
|
||||
/* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */
|
||||
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC,
|
||||
arm64_pause_async_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL,
|
||||
|
@ -653,7 +653,6 @@ static void gicv3_dist_init(void)
|
||||
#ifdef CONFIG_SMP
|
||||
/* Attach SGI interrupt handlers. This attaches the handler to all CPUs. */
|
||||
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE, arm64_pause_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUPAUSE_ASYNC,
|
||||
arm64_pause_async_handler, NULL));
|
||||
DEBUGVERIFY(irq_attach(GIC_SMP_CPUCALL,
|
||||
@ -953,7 +952,6 @@ static void arm64_gic_init(void)
|
||||
gicv3_cpuif_init();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
up_enable_irq(GIC_SMP_CPUPAUSE);
|
||||
up_enable_irq(GIC_SMP_CPUPAUSE_ASYNC);
|
||||
up_enable_irq(GIC_SMP_CPUCALL);
|
||||
#endif
|
||||
|
@ -43,174 +43,10 @@
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
/* Save the current context at current_regs into the TCB at the head
|
||||
* of the assigned task list for this CPU.
|
||||
*/
|
||||
|
||||
riscv_savecontext(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
/* Then switch contexts. Any necessary address environment changes
|
||||
* will be made when the interrupt returns.
|
||||
*/
|
||||
|
||||
riscv_restorecontext(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: riscv_pause_handler
|
||||
*
|
||||
@ -236,29 +72,6 @@ int riscv_pause_handler(int irq, void *c, void *arg)
|
||||
|
||||
riscv_ipi_clear(cpu);
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
riscv_savecontext(tcb);
|
||||
nxsched_process_delivered(cpu);
|
||||
@ -321,118 +134,3 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
riscv_ipi_send(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -33,28 +33,32 @@
|
||||
#include "sched/sched.h"
|
||||
#include "sim_internal.h"
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sim_smp_call_handler
|
||||
*
|
||||
* Description:
|
||||
* This is the handler for SMP_CALL.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
static int sim_smp_call_handler(int irq, void *context, void *arg)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
tcb = current_task(cpu);
|
||||
sim_savestate(tcb->xcp.regs);
|
||||
nxsched_smp_call_handler(irq, context, arg);
|
||||
tcb = current_task(cpu);
|
||||
sim_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sim_cpupause_handler
|
||||
*
|
||||
@ -77,30 +81,6 @@ static int sim_cpupause_handler(int irq, void *context, void *arg)
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused(). If the pause event has already
|
||||
* been processed then g_cpu_paused[cpu] will not be locked.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
sim_savestate(tcb->xcp.regs);
|
||||
nxsched_process_delivered(cpu);
|
||||
@ -114,152 +94,6 @@ static int sim_cpupause_handler(int irq, void *context, void *arg)
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
/* Save the current context at current_regs into the TCB at the head
|
||||
* of the assigned task list for this CPU.
|
||||
*/
|
||||
|
||||
sim_savestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
/* Then switch contexts. Any necessary address environment changes
|
||||
* will be made when the interrupt returns.
|
||||
*/
|
||||
|
||||
sim_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: host_cpu_started
|
||||
*
|
||||
@ -370,115 +204,6 @@ inline_function int up_cpu_pause_async(int cpu)
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sim_init_func_call_ipi
|
||||
*
|
||||
@ -495,7 +220,7 @@ int up_cpu_resume(int cpu)
|
||||
int sim_init_func_call_ipi(int irq)
|
||||
{
|
||||
up_enable_irq(irq);
|
||||
return irq_attach(irq, nxsched_smp_call_handler, NULL);
|
||||
return irq_attach(irq, sim_smp_call_handler, NULL);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -516,4 +241,3 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
host_send_func_call_ipi(cpu);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -42,174 +42,10 @@
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
/* Save the current context at current_regs into the TCB at the head
|
||||
* of the assigned task list for this CPU.
|
||||
*/
|
||||
|
||||
sparc_savestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
/* Then switch contexts. Any necessary address environment changes
|
||||
* will be made when the interrupt returns.
|
||||
*/
|
||||
|
||||
sparc_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: s698pm_pause_handler
|
||||
*
|
||||
@ -229,37 +65,13 @@ int s698pm_pause_handler(int irq, void *c, void *arg)
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
nxsched_smp_call_handler(irq, c, arg);
|
||||
|
||||
/* Clear IPI (Inter-Processor-Interrupt) */
|
||||
|
||||
putreg32(1 << S698PM_IPI_VECTOR, S698PM_IRQREG_ICLEAR);
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
tcb = current_task(cpu);
|
||||
sparc_savestate(tcb->xcp.regs);
|
||||
nxsched_smp_call_handler(irq, c, arg);
|
||||
nxsched_process_delivered(cpu);
|
||||
tcb = current_task(cpu);
|
||||
sparc_restorestate(tcb->xcp.regs);
|
||||
@ -322,119 +134,3 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
up_cpu_pause_async(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
@ -44,180 +44,10 @@
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
/* Save the current context at current_regs into the TCB at the head
|
||||
* of the assigned task list for this CPU.
|
||||
*/
|
||||
|
||||
x86_64_savestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Release the g_cpu_paused spinlock to synchronize with the
|
||||
* requesting CPU.
|
||||
*/
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
/* Wait for the spinlock to be released. The requesting CPU will release
|
||||
* the spinlock when the CPU is resumed.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
/* Then switch contexts. Any necessary address environment changes
|
||||
* will be made when the interrupt returns.
|
||||
*/
|
||||
|
||||
x86_64_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_pause_handler
|
||||
*
|
||||
@ -234,32 +64,14 @@ int up_cpu_paused_restore(void)
|
||||
|
||||
int up_pause_handler(int irq, void *c, void *arg)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
tcb = current_task(cpu);
|
||||
x86_64_savestate(tcb->xcp.regs);
|
||||
nxsched_smp_call_handler(irq, c, arg);
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
tcb = current_task(cpu);
|
||||
x86_64_restorestate(tcb->xcp.regs);
|
||||
|
||||
return OK;
|
||||
}
|
||||
@ -349,123 +161,3 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
{
|
||||
up_trigger_irq(SMP_IPI_IRQ, cpuset);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Execute Pause IRQ to CPU(cpu) */
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(cpu, &cpuset);
|
||||
|
||||
up_trigger_irq(SMP_IPI_IRQ, cpuset);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
sinfo("cpu=%d\n", cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,152 +41,10 @@
|
||||
* Private Data
|
||||
****************************************************************************/
|
||||
|
||||
static spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
bool up_cpu_pausereq(int cpu)
|
||||
{
|
||||
return spin_is_locked(&g_cpu_paused[cpu]);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_save(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending pause
|
||||
* operation in other contexts where the interrupt cannot be taken in
|
||||
* order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_paused_restore(void)
|
||||
{
|
||||
struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
UNUSED(tcb);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: xtensa_pause_handler
|
||||
*
|
||||
@ -210,35 +68,8 @@ int up_cpu_paused_restore(void)
|
||||
|
||||
void xtensa_pause_handler(int irq, void *context, void *arg)
|
||||
{
|
||||
struct tcb_s *tcb;
|
||||
int cpu = this_cpu();
|
||||
|
||||
nxsched_smp_call_handler(irq, context, arg);
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the
|
||||
* interrupt by calling up_cpu_paused.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
||||
nxsched_process_delivered(cpu);
|
||||
nxsched_process_delivered(this_cpu());
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -294,114 +125,4 @@ void up_send_smp_call(cpu_set_t cpuset)
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Take both spinlocks. The g_cpu_wait spinlock will prevent the interrupt
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prevent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
* Note that we might spin before getting g_cpu_wait, this just means that
|
||||
* the other CPU still hasn't finished responding to the previous resume
|
||||
* request.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
up_cpu_pause_async(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the interrupt
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_wait[cpu]) &&
|
||||
!spin_is_locked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
||||
|
||||
spin_lock(&g_cpu_resumed[cpu]);
|
||||
|
||||
spin_unlock(&g_cpu_resumed[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -2288,34 +2288,6 @@ int up_cpu_idlestack(int cpu, FAR struct tcb_s *tcb, size_t stack_size);
|
||||
int up_cpu_start(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section; up_cpu_resume() must be called
|
||||
* later while still within the same critical section.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int up_cpu_pause(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause_async
|
||||
*
|
||||
@ -2339,126 +2311,6 @@ int up_cpu_pause(int cpu);
|
||||
int up_cpu_pause_async(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pausereq
|
||||
*
|
||||
* Description:
|
||||
* Return true if a pause request is pending for this CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be queried
|
||||
*
|
||||
* Returned Value:
|
||||
* true = a pause request is pending.
|
||||
* false = no pasue request is pending.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bool up_cpu_pausereq(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_save
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending
|
||||
* pause operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int up_cpu_paused_save(void);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused
|
||||
*
|
||||
* Description:
|
||||
* Handle a pause request from another CPU. Normally, this logic is
|
||||
* executed from interrupt handling logic within the architecture-specific
|
||||
* However, it is sometimes necessary to perform the pending pause
|
||||
* operation in other contexts where the interrupt cannot be taken
|
||||
* in order to avoid deadlocks.
|
||||
*
|
||||
* This function performs the following operations:
|
||||
*
|
||||
* 1. It saves the current task state at the head of the current assigned
|
||||
* task list.
|
||||
* 2. It waits on a spinlock, then
|
||||
* 3. Returns from interrupt, restoring the state of the new task at the
|
||||
* head of the ready to run list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be paused
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int up_cpu_paused(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_paused_restore
|
||||
*
|
||||
* Description:
|
||||
* Restore the state of the CPU after it was paused via up_cpu_pause(),
|
||||
* and resume normal tasking.
|
||||
*
|
||||
* Input Parameters:
|
||||
* None
|
||||
*
|
||||
* Returned Value:
|
||||
* On success, OK is returned. Otherwise, a negated errno value indicating
|
||||
* the nature of the failure is returned.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int up_cpu_paused_restore(void);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order to resume operation
|
||||
* of the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being resumed.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Assumptions:
|
||||
* Called from within a critical section; up_cpu_pause() must have
|
||||
* previously been called within the same critical section.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int up_cpu_resume(int cpu);
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_romgetc
|
||||
*
|
||||
|
@ -81,6 +81,7 @@
|
||||
SP_DMB(); \
|
||||
g_cpu_irqlock = SP_UNLOCKED; \
|
||||
SP_DSB(); \
|
||||
SP_SEV(); \
|
||||
} \
|
||||
while (0)
|
||||
#endif
|
||||
|
@ -75,96 +75,6 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: irq_waitlock
|
||||
*
|
||||
* Description:
|
||||
* Spin to get g_cpu_irqlock, handling a known deadlock condition:
|
||||
*
|
||||
* A deadlock may occur if enter_critical_section is called from an
|
||||
* interrupt handler. Suppose:
|
||||
*
|
||||
* - CPUn is in a critical section and has the g_cpu_irqlock spinlock.
|
||||
* - CPUm takes an interrupt and attempts to enter the critical section.
|
||||
* - It spins waiting on g_cpu_irqlock with interrupts disabled.
|
||||
* - CPUn calls up_cpu_pause() to pause operation on CPUm. This will
|
||||
* issue an inter-CPU interrupt to CPUm
|
||||
* - But interrupts are disabled on CPUm so the up_cpu_pause() is never
|
||||
* handled, causing the deadlock.
|
||||
*
|
||||
* This same deadlock can occur in the normal tasking case:
|
||||
*
|
||||
* - A task on CPUn enters a critical section and has the g_cpu_irqlock
|
||||
* spinlock.
|
||||
* - Another task on CPUm attempts to enter the critical section but has
|
||||
* to wait, spinning to get g_cpu_irqlock with interrupts disabled.
|
||||
* - The task on CPUn causes a new task to become ready-to-run and the
|
||||
* scheduler selects CPUm. CPUm is requested to pause via a pause
|
||||
* interrupt.
|
||||
* - But the task on CPUm is also attempting to enter the critical
|
||||
* section. Since it is spinning with interrupts disabled, CPUm cannot
|
||||
* process the pending pause interrupt, causing the deadlock.
|
||||
*
|
||||
* This function detects this deadlock condition while spinning with
|
||||
* interrupts disabled.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of CPU that is trying to enter the critical section.
|
||||
*
|
||||
* Returned Value:
|
||||
* True: The g_cpu_irqlock spinlock has been taken.
|
||||
* False: The g_cpu_irqlock spinlock has not been taken yet, but there is
|
||||
* a pending pause interrupt request.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline_function bool irq_waitlock(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
FAR struct tcb_s *tcb = current_task(cpu);
|
||||
|
||||
/* Notify that we are waiting for a spinlock */
|
||||
|
||||
sched_note_spinlock(tcb, &g_cpu_irqlock, NOTE_SPINLOCK_LOCK);
|
||||
#endif
|
||||
|
||||
/* Duplicate the spin_lock() logic from spinlock.c, but adding the check
|
||||
* for the deadlock condition.
|
||||
*/
|
||||
|
||||
while (!spin_trylock_wo_note(&g_cpu_irqlock))
|
||||
{
|
||||
/* Is a pause request pending? */
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* Yes.. some other CPU is requesting to pause this CPU!
|
||||
* Abort the wait and return false.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we have aborted the wait for the spinlock */
|
||||
|
||||
sched_note_spinlock(tcb, &g_cpu_irqlock, NOTE_SPINLOCK_ABORT);
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* We have g_cpu_irqlock! */
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we have the spinlock */
|
||||
|
||||
sched_note_spinlock(tcb, &g_cpu_irqlock, NOTE_SPINLOCK_LOCKED);
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -198,7 +108,6 @@ irqstate_t enter_critical_section(void)
|
||||
* the local CPU.
|
||||
*/
|
||||
|
||||
try_again:
|
||||
ret = up_irq_save();
|
||||
|
||||
/* If called from an interrupt handler, then just take the spinlock.
|
||||
@ -259,8 +168,6 @@ try_again:
|
||||
|
||||
else
|
||||
{
|
||||
int paused = false;
|
||||
|
||||
/* Make sure that the g_cpu_irqset was not already set
|
||||
* by previous logic on this CPU that was executed by the
|
||||
* interrupt handler. We know that the bit in g_cpu_irqset
|
||||
@ -274,31 +181,7 @@ try_again:
|
||||
* no longer blocked by the critical section).
|
||||
*/
|
||||
|
||||
try_again_in_irq:
|
||||
if (!irq_waitlock(cpu))
|
||||
{
|
||||
/* We are in a deadlock condition due to a pending
|
||||
* pause request interrupt. Break the deadlock by
|
||||
* handling the pause request now.
|
||||
*/
|
||||
|
||||
if (!paused)
|
||||
{
|
||||
up_cpu_paused_save();
|
||||
}
|
||||
|
||||
DEBUGVERIFY(up_cpu_paused(cpu));
|
||||
paused = true;
|
||||
|
||||
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
|
||||
|
||||
/* NOTE: Here, this CPU does not hold g_cpu_irqlock,
|
||||
* so call irq_waitlock(cpu) to acquire g_cpu_irqlock.
|
||||
*/
|
||||
|
||||
goto try_again_in_irq;
|
||||
}
|
||||
|
||||
spin_lock(&g_cpu_irqlock);
|
||||
cpu_irqlock_set(cpu);
|
||||
}
|
||||
|
||||
@ -306,11 +189,6 @@ try_again_in_irq:
|
||||
|
||||
g_cpu_nestcount[cpu] = 1;
|
||||
|
||||
if (paused)
|
||||
{
|
||||
up_cpu_paused_restore();
|
||||
}
|
||||
|
||||
DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
|
||||
(g_cpu_irqset & (1 << cpu)) != 0);
|
||||
}
|
||||
@ -354,18 +232,7 @@ try_again_in_irq:
|
||||
|
||||
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
|
||||
|
||||
if (!irq_waitlock(cpu))
|
||||
{
|
||||
/* We are in a deadlock condition due to a pending pause
|
||||
* request interrupt. Re-enable interrupts on this CPU
|
||||
* and try again. Briefly re-enabling interrupts should
|
||||
* be sufficient to permit processing the pending pause
|
||||
* request.
|
||||
*/
|
||||
|
||||
up_irq_restore(ret);
|
||||
goto try_again;
|
||||
}
|
||||
spin_lock(&g_cpu_irqlock);
|
||||
|
||||
/* Then set the lock count to 1.
|
||||
*
|
||||
|
@ -59,7 +59,6 @@ if(CONFIG_SMP)
|
||||
APPEND
|
||||
SRCS
|
||||
sched_cpuselect.c
|
||||
sched_cpupause.c
|
||||
sched_getcpu.c
|
||||
sched_getaffinity.c
|
||||
sched_setaffinity.c
|
||||
|
@ -37,7 +37,7 @@ CSRCS += sched_reprioritize.c
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_SMP),y)
|
||||
CSRCS += sched_cpuselect.c sched_cpupause.c sched_getcpu.c sched_process_delivered.c
|
||||
CSRCS += sched_cpuselect.c sched_getcpu.c sched_process_delivered.c
|
||||
CSRCS += sched_getaffinity.c sched_setaffinity.c
|
||||
endif
|
||||
|
||||
|
@ -401,11 +401,9 @@ static inline_function FAR struct tcb_s *this_task(void)
|
||||
}
|
||||
|
||||
int nxsched_select_cpu(cpu_set_t affinity);
|
||||
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
|
||||
void nxsched_process_delivered(int cpu);
|
||||
#else
|
||||
# define nxsched_select_cpu(a) (0)
|
||||
# define nxsched_pause_cpu(t) (-38) /* -ENOSYS */
|
||||
#endif
|
||||
|
||||
#define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
|
||||
|
@ -1,106 +0,0 @@
|
||||
/****************************************************************************
|
||||
* sched/sched/sched_cpupause.c
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "sched/sched.h"
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: nxsched_pause_cpu
|
||||
*
|
||||
* Description:
|
||||
* Check if task associated with 'tcb' is running on a different CPU. If
|
||||
* so then pause that CPU and return its CPU index.
|
||||
*
|
||||
* Input Parameters:
|
||||
* tcb - The TCB of the task to be conditionally paused.
|
||||
*
|
||||
* Returned Value:
|
||||
* If a CPU is pauses its non-negative CPU index is returned. This index
|
||||
* may then be used to resume the CPU. If the task is not running at all
|
||||
* (or if an error occurs), then a negated errno value is returned. -ESRCH
|
||||
* is returned in the case where the task is not running on any CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* This function was called in a critical section. In that case, no tasks
|
||||
* may started or may exit until the we leave the critical section. This
|
||||
* critical section should extend until up_cpu_resume() is called in the
|
||||
* typical case.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int nxsched_pause_cpu(FAR struct tcb_s *tcb)
|
||||
{
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
DEBUGASSERT(tcb != NULL);
|
||||
|
||||
/* If the task is not running at all then our job is easy */
|
||||
|
||||
cpu = tcb->cpu;
|
||||
if (tcb->task_state != TSTATE_TASK_RUNNING)
|
||||
{
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
/* Check the CPU that the task is running on */
|
||||
|
||||
DEBUGASSERT(cpu != this_cpu() && (unsigned int)cpu < CONFIG_SMP_NCPUS);
|
||||
if (cpu == this_cpu())
|
||||
{
|
||||
/* We can't pause ourself */
|
||||
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* Pause the CPU that the task is running on */
|
||||
|
||||
ret = up_cpu_pause(cpu);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Return the CPU that the task is running on */
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
@ -73,13 +73,7 @@ void nxsched_process_delivered(int cpu)
|
||||
|
||||
if ((g_cpu_irqset & (1 << cpu)) == 0)
|
||||
{
|
||||
while (!spin_trylock_wo_note(&g_cpu_irqlock))
|
||||
{
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
up_cpu_paused(cpu);
|
||||
}
|
||||
}
|
||||
spin_lock_wo_note(&g_cpu_irqlock);
|
||||
|
||||
g_cpu_irqset |= (1 << cpu);
|
||||
}
|
||||
|
@ -290,19 +290,9 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge)
|
||||
|
||||
if (tcb->task_state == TSTATE_TASK_RUNNING)
|
||||
{
|
||||
int me = this_cpu();
|
||||
int cpu = tcb->cpu;
|
||||
if (cpu != me)
|
||||
{
|
||||
up_cpu_pause(tcb->cpu);
|
||||
DEBUGASSERT(tcb->cpu == this_cpu());
|
||||
nxsched_remove_running(tcb);
|
||||
up_cpu_resume(tcb->cpu);
|
||||
}
|
||||
else
|
||||
{
|
||||
nxsched_remove_running(tcb);
|
||||
doswitch = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -125,7 +125,6 @@ int nxsched_smp_call_handler(int irq, FAR void *context,
|
||||
|
||||
call_queue = &g_smp_call_queue[cpu];
|
||||
|
||||
up_cpu_paused_save();
|
||||
sq_for_every_safe(call_queue, curr, next)
|
||||
{
|
||||
FAR struct smp_call_data_s *call_data =
|
||||
@ -158,7 +157,6 @@ int nxsched_smp_call_handler(int irq, FAR void *context,
|
||||
}
|
||||
}
|
||||
|
||||
up_cpu_paused_restore();
|
||||
spin_unlock_irqrestore(&g_smp_call_lock, flags);
|
||||
return OK;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user