2016-03-12 20:23:49 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* arch/arm/src/armv7-a/arm_cpupause.c
|
|
|
|
*
|
2021-03-24 09:12:29 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
2021-03-24 09:12:29 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
2021-03-24 09:12:29 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#include <stdint.h>
|
2021-05-18 08:59:14 +02:00
|
|
|
#include <assert.h>
|
2016-03-12 20:23:49 +01:00
|
|
|
|
|
|
|
#include <nuttx/arch.h>
|
|
|
|
#include <nuttx/sched.h>
|
|
|
|
#include <nuttx/spinlock.h>
|
2016-11-28 00:14:57 +01:00
|
|
|
#include <nuttx/sched_note.h>
|
2016-03-12 20:23:49 +01:00
|
|
|
|
2020-05-01 03:20:29 +02:00
|
|
|
#include "arm_internal.h"
|
2016-03-12 20:23:49 +01:00
|
|
|
#include "gic.h"
|
|
|
|
#include "sched/sched.h"
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Private Data
|
|
|
|
****************************************************************************/
|
|
|
|
|
2016-11-22 18:34:16 +01:00
|
|
|
/* These spinlocks are used in the SMP configuration in order to implement
|
|
|
|
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
|
|
|
*
|
|
|
|
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
2016-11-22 23:48:57 +01:00
|
|
|
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
2016-11-22 18:34:16 +01:00
|
|
|
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
|
|
|
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
|
|
|
* blocks CPUm in the interrupt handler.
|
|
|
|
*
|
|
|
|
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
|
|
|
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
|
|
|
* so that it will be ready for the next pause operation.
|
|
|
|
*/
|
|
|
|
|
2021-04-21 03:27:47 +02:00
|
|
|
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
|
|
|
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
2023-05-11 06:27:13 +02:00
|
|
|
static volatile spinlock_t g_cpu_resumed[CONFIG_SMP_NCPUS];
|
2016-03-12 20:23:49 +01:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2016-11-22 18:34:16 +01:00
|
|
|
* Name: up_cpu_pausereq
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2016-11-22 18:34:16 +01:00
|
|
|
* Return true if a pause request is pending for this CPU.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* cpu - The index of the CPU to be queried
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* true = a pause request is pending.
|
|
|
|
* false = no pasue request is pending.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
bool up_cpu_pausereq(int cpu)
|
|
|
|
{
|
|
|
|
return spin_islocked(&g_cpu_paused[cpu]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: up_cpu_paused
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Handle a pause request from another CPU. Normally, this logic is
|
|
|
|
* executed from interrupt handling logic within the architecture-specific
|
|
|
|
* However, it is sometimes necessary necessary to perform the pending
|
|
|
|
* pause operation in other contexts where the interrupt cannot be taken
|
|
|
|
* in order to avoid deadlocks.
|
|
|
|
*
|
|
|
|
* This function performs the following operations:
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
|
|
|
* 1. It saves the current task state at the head of the current assigned
|
|
|
|
* task list.
|
|
|
|
* 2. It waits on a spinlock, then
|
|
|
|
* 3. Returns from interrupt, restoring the state of the new task at the
|
|
|
|
* head of the ready to run list.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
2016-11-22 18:34:16 +01:00
|
|
|
* cpu - The index of the CPU to be paused
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2016-11-22 18:34:16 +01:00
|
|
|
* On success, OK is returned. Otherwise, a negated errno value indicating
|
|
|
|
* the nature of the failure is returned.
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2016-11-22 18:34:16 +01:00
|
|
|
int up_cpu_paused(int cpu)
|
2016-03-12 20:23:49 +01:00
|
|
|
{
|
2022-04-17 07:57:58 +02:00
|
|
|
struct tcb_s *tcb = this_task();
|
2016-03-12 20:23:49 +01:00
|
|
|
|
|
|
|
/* Update scheduler parameters */
|
|
|
|
|
2020-05-09 16:04:45 +02:00
|
|
|
nxsched_suspend_scheduler(tcb);
|
2016-03-12 20:23:49 +01:00
|
|
|
|
2016-11-28 00:14:57 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
|
|
|
/* Notify that we are paused */
|
|
|
|
|
|
|
|
sched_note_cpu_paused(tcb);
|
|
|
|
#endif
|
|
|
|
|
2016-11-22 18:34:16 +01:00
|
|
|
/* Save the current context at CURRENT_REGS into the TCB at the head
|
|
|
|
* of the assigned task list for this CPU.
|
2016-03-12 20:23:49 +01:00
|
|
|
*/
|
|
|
|
|
2020-05-02 22:49:55 +02:00
|
|
|
arm_savestate(tcb->xcp.regs);
|
2016-03-12 20:23:49 +01:00
|
|
|
|
2021-07-16 23:37:45 +02:00
|
|
|
/* Release the g_cpu_paused spinlock to synchronize with the
|
2016-11-29 17:01:38 +01:00
|
|
|
* requesting CPU.
|
|
|
|
*/
|
2016-03-12 20:23:49 +01:00
|
|
|
|
2016-03-12 22:22:45 +01:00
|
|
|
spin_unlock(&g_cpu_paused[cpu]);
|
2016-11-29 17:01:38 +01:00
|
|
|
|
2023-05-11 06:27:13 +02:00
|
|
|
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
|
|
|
|
|
|
|
spin_lock(&g_cpu_resumed[cpu]);
|
|
|
|
|
2016-11-29 17:01:38 +01:00
|
|
|
/* Wait for the spinlock to be released. The requesting CPU will release
|
2020-02-23 09:50:23 +01:00
|
|
|
* the spinlock when the CPU is resumed.
|
2016-11-29 17:01:38 +01:00
|
|
|
*/
|
|
|
|
|
2016-03-12 22:22:45 +01:00
|
|
|
spin_lock(&g_cpu_wait[cpu]);
|
2016-03-12 20:23:49 +01:00
|
|
|
|
2016-11-29 17:01:38 +01:00
|
|
|
/* This CPU has been resumed. Restore the exception context of the TCB at
|
|
|
|
* the (new) head of the assigned task list.
|
2016-03-12 20:23:49 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
tcb = this_task();
|
|
|
|
|
2016-11-28 00:14:57 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
|
|
|
/* Notify that we have resumed */
|
|
|
|
|
|
|
|
sched_note_cpu_resumed(tcb);
|
|
|
|
#endif
|
|
|
|
|
2016-03-12 20:23:49 +01:00
|
|
|
/* Reset scheduler parameters */
|
|
|
|
|
2020-05-09 16:04:45 +02:00
|
|
|
nxsched_resume_scheduler(tcb);
|
2016-03-12 20:23:49 +01:00
|
|
|
|
2016-11-22 18:34:16 +01:00
|
|
|
/* Then switch contexts. Any necessary address environment changes
|
|
|
|
* will be made when the interrupt returns.
|
2016-03-12 20:23:49 +01:00
|
|
|
*/
|
|
|
|
|
2020-05-02 22:49:55 +02:00
|
|
|
arm_restorestate(tcb->xcp.regs);
|
2016-03-12 22:22:45 +01:00
|
|
|
spin_unlock(&g_cpu_wait[cpu]);
|
2023-05-11 06:27:13 +02:00
|
|
|
spin_unlock(&g_cpu_resumed[cpu]);
|
2016-11-22 18:34:16 +01:00
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: arm_pause_handler
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This is the handler for SGI2. It performs the following operations:
|
|
|
|
*
|
|
|
|
* 1. It saves the current task state at the head of the current assigned
|
|
|
|
* task list.
|
|
|
|
* 2. It waits on a spinlock, then
|
|
|
|
* 3. Returns from interrupt, restoring the state of the new task at the
|
|
|
|
* head of the ready to run list.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* Standard interrupt handling
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Zero on success; a negated errno value on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-04-17 07:57:58 +02:00
|
|
|
int arm_pause_handler(int irq, void *context, void *arg)
|
2016-11-22 18:34:16 +01:00
|
|
|
{
|
|
|
|
int cpu = this_cpu();
|
|
|
|
|
|
|
|
/* Check for false alarms. Such false could occur as a consequence of
|
|
|
|
* some deadlock breaking logic that might have already serviced the SG2
|
2016-11-24 05:24:14 +01:00
|
|
|
* interrupt by calling up_cpu_paused(). If the pause event has already
|
2016-11-24 05:24:40 +01:00
|
|
|
* been processed then g_cpu_paused[cpu] will not be locked.
|
2016-11-22 18:34:16 +01:00
|
|
|
*/
|
|
|
|
|
arch, sched: Fix global IRQ control logics for SMP
Summary:
- This commit fixes global IRQ control logic
- In previous implementation, g_cpu_irqset for a remote CPU was
set in sched_add_readytorun(), sched_remove_readytorun() and
up_schedule_sigaction()
- In this implementation, they are removed.
- Instead, in the pause handler, call enter_critical_setion()
which will call up_cpu_paused() then acquire g_cpu_irqlock
- So if a new task with irqcount > 1 restarts on the remote CPU,
the CPU will only hold a critical section. Thus, the issue such as
'POSSIBLE FOR TWO CPUs TO HOLD A CRITICAL SECTION' could be resolved.
- Fix nxsched_resume_scheduler() so that it does not call spin_clrbit()
if a CPU does not hold a g_cpu_irqset
- Fix nxtask_exit() so that it acquires g_cpu_irqlock
- Update TODO
Impact:
- All SMP implementations
Testing:
- Tested with smp, ostest with the following configurations
- Tested with spresense:wifi_smp (NCPUS=2,4)
- Tested with sabre-6quad:smp (QEMU, dev board)
- Tested with maix-bit:smp (QEMU)
- Tested with esp32-core:smp (QEMU)
- Tested with lc823450-xgevk:rndis
Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
2020-11-25 03:18:24 +01:00
|
|
|
if (up_cpu_pausereq(cpu))
|
2016-11-22 18:34:16 +01:00
|
|
|
{
|
arch, sched: Fix global IRQ control logics for SMP
Summary:
- This commit fixes global IRQ control logic
- In previous implementation, g_cpu_irqset for a remote CPU was
set in sched_add_readytorun(), sched_remove_readytorun() and
up_schedule_sigaction()
- In this implementation, they are removed.
- Instead, in the pause handler, call enter_critical_setion()
which will call up_cpu_paused() then acquire g_cpu_irqlock
- So if a new task with irqcount > 1 restarts on the remote CPU,
the CPU will only hold a critical section. Thus, the issue such as
'POSSIBLE FOR TWO CPUs TO HOLD A CRITICAL SECTION' could be resolved.
- Fix nxsched_resume_scheduler() so that it does not call spin_clrbit()
if a CPU does not hold a g_cpu_irqset
- Fix nxtask_exit() so that it acquires g_cpu_irqlock
- Update TODO
Impact:
- All SMP implementations
Testing:
- Tested with smp, ostest with the following configurations
- Tested with spresense:wifi_smp (NCPUS=2,4)
- Tested with sabre-6quad:smp (QEMU, dev board)
- Tested with maix-bit:smp (QEMU)
- Tested with esp32-core:smp (QEMU)
- Tested with lc823450-xgevk:rndis
Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
2020-11-25 03:18:24 +01:00
|
|
|
/* NOTE: The following enter_critical_section() will call
|
|
|
|
* up_cpu_paused() to process a pause request to break a deadlock
|
|
|
|
* because the caller held a critical section. Once up_cpu_paused()
|
|
|
|
* finished, the caller will proceed and release the g_cpu_irqlock.
|
|
|
|
* Then this CPU will acquire g_cpu_irqlock in the function.
|
|
|
|
*/
|
|
|
|
|
|
|
|
irqstate_t flags = enter_critical_section();
|
|
|
|
|
|
|
|
/* NOTE: the pause request should not exist here */
|
|
|
|
|
|
|
|
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
|
|
|
|
|
|
|
leave_critical_section(flags);
|
2016-11-22 18:34:16 +01:00
|
|
|
}
|
|
|
|
|
2020-11-24 22:58:49 +01:00
|
|
|
return OK;
|
2016-03-12 20:23:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: up_cpu_pause
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Save the state of the current task at the head of the
|
|
|
|
* g_assignedtasks[cpu] task list and then pause task execution on the
|
|
|
|
* CPU.
|
|
|
|
*
|
|
|
|
* This function is called by the OS when the logic executing on one CPU
|
|
|
|
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
|
|
|
* CPU.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
2016-11-22 18:34:16 +01:00
|
|
|
* cpu - The index of the CPU to be stopped
|
2016-03-12 20:23:49 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Zero on success; a negated errno value on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int up_cpu_pause(int cpu)
|
|
|
|
{
|
2017-01-24 21:33:57 +01:00
|
|
|
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
|
|
|
|
2016-11-28 00:14:57 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
|
|
|
/* Notify of the pause event */
|
|
|
|
|
|
|
|
sched_note_cpu_pause(this_task(), cpu);
|
|
|
|
#endif
|
|
|
|
|
2016-03-12 22:22:45 +01:00
|
|
|
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the SGI2
|
|
|
|
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
|
|
|
* is a handshake that will prefent this function from returning until
|
|
|
|
* the CPU is actually paused.
|
2021-10-17 13:12:09 +02:00
|
|
|
* Note that we might spin before getting g_cpu_wait, this just means that
|
|
|
|
* the other CPU still hasn't finished responding to the previous resume
|
|
|
|
* request.
|
2016-03-12 20:23:49 +01:00
|
|
|
*/
|
|
|
|
|
2021-10-17 13:12:09 +02:00
|
|
|
DEBUGASSERT(!spin_islocked(&g_cpu_paused[cpu]));
|
2016-03-12 22:22:45 +01:00
|
|
|
|
|
|
|
spin_lock(&g_cpu_wait[cpu]);
|
|
|
|
spin_lock(&g_cpu_paused[cpu]);
|
2016-03-12 20:23:49 +01:00
|
|
|
|
|
|
|
/* Execute SGI2 */
|
|
|
|
|
2022-09-18 11:42:16 +02:00
|
|
|
arm_cpu_sgi(GIC_IRQ_SGI2, (1 << cpu));
|
2016-03-12 22:22:45 +01:00
|
|
|
|
2022-09-18 11:42:16 +02:00
|
|
|
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
|
|
|
* it is fully paused and ready for up_cpu_resume();
|
|
|
|
*/
|
2016-03-12 22:22:45 +01:00
|
|
|
|
2022-09-18 11:42:16 +02:00
|
|
|
spin_lock(&g_cpu_paused[cpu]);
|
2016-03-12 22:22:45 +01:00
|
|
|
spin_unlock(&g_cpu_paused[cpu]);
|
|
|
|
|
|
|
|
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
2020-03-31 17:25:04 +02:00
|
|
|
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
2016-03-12 22:22:45 +01:00
|
|
|
* called. g_cpu_paused will be unlocked in any case.
|
|
|
|
*/
|
|
|
|
|
2022-09-18 11:42:16 +02:00
|
|
|
return OK;
|
2016-03-12 20:23:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: up_cpu_resume
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
|
|
|
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
|
|
|
* resume normal tasking.
|
|
|
|
*
|
|
|
|
* This function is called after up_cpu_pause in order resume operation of
|
|
|
|
* the CPU after modifying its g_assignedtasks[cpu] list.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* cpu - The index of the CPU being re-started.
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Zero on success; a negated errno value on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int up_cpu_resume(int cpu)
|
|
|
|
{
|
2017-01-24 21:33:57 +01:00
|
|
|
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
|
|
|
|
2016-11-28 00:14:57 +01:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
|
|
|
/* Notify of the resume event */
|
|
|
|
|
|
|
|
sched_note_cpu_resume(this_task(), cpu);
|
|
|
|
#endif
|
|
|
|
|
2016-03-12 20:23:49 +01:00
|
|
|
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
|
|
|
* handler on 'cpu' to continue and return from interrupt to the newly
|
|
|
|
* established thread.
|
|
|
|
*/
|
|
|
|
|
2016-03-12 22:22:45 +01:00
|
|
|
DEBUGASSERT(spin_islocked(&g_cpu_wait[cpu]) &&
|
|
|
|
!spin_islocked(&g_cpu_paused[cpu]));
|
|
|
|
|
|
|
|
spin_unlock(&g_cpu_wait[cpu]);
|
2023-05-11 06:27:13 +02:00
|
|
|
|
|
|
|
/* Ensure the CPU has been resumed to avoid causing a deadlock */
|
|
|
|
|
|
|
|
spin_lock(&g_cpu_resumed[cpu]);
|
|
|
|
|
|
|
|
spin_unlock(&g_cpu_resumed[cpu]);
|
|
|
|
|
2016-03-12 20:23:49 +01:00
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|