sim: Fix interrupt handling for SMP
Summary - This commit fixes interrupt handling for SMP - The following are the changes - Introduce up_copyfullstate.c - Add enter_critical_section() to up_exit() - Add a critical section to up_schedule_sigaction() - Introduce pseudo timer thread to send periodic events - UART and interval timer are now handled in the pause handler - Apply the same SMP related code as other CPU architectures - However, signal handling and context switching are not changed - Also enable debug features and some tools in smp/defconfig Imact - SMP only Testing - Tested with sim:smp on ubuntu18.04 x86_64 - Tested with hello, taskset, smp, ostest Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
parent
ad9f88f042
commit
f3a81cb1b7
@ -74,6 +74,7 @@ CSRCS += up_createstack.c up_usestack.c up_releasestack.c up_stackframe.c
|
||||
CSRCS += up_unblocktask.c up_blocktask.c up_releasepending.c
|
||||
CSRCS += up_reprioritizertr.c up_exit.c up_schedulesigaction.c
|
||||
CSRCS += up_allocateheap.c up_uart.c
|
||||
CSRCS += up_copyfullstate.c
|
||||
|
||||
VPATH = sim
|
||||
DEPPATH = $(patsubst %,--dep-path %,$(subst :, ,$(VPATH)))
|
||||
|
@ -113,12 +113,19 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
|
||||
|
||||
nxsched_suspend_scheduler(rtcb);
|
||||
|
||||
/* TODO */
|
||||
|
||||
if (CURRENT_REGS)
|
||||
{
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
/* Copy the exception context into the TCB at the (old) head of the
|
||||
* ready-to-run Task list. if up_setjmp returns a non-zero
|
||||
* value, then this is really the previously running task restarting!
|
||||
*/
|
||||
|
||||
if (!up_setjmp(rtcb->xcp.regs))
|
||||
else if (!up_setjmp(rtcb->xcp.regs))
|
||||
{
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
|
64
arch/sim/src/sim/up_copyfullstate.c
Normal file
64
arch/sim/src/sim/up_copyfullstate.c
Normal file
@ -0,0 +1,64 @@
|
||||
/****************************************************************************
|
||||
* arch/sim/src/sim/sim_copyfullstate.c
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <stdint.h>
|
||||
#include <arch/irq.h>
|
||||
|
||||
#include "up_internal.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_copyfullstate
|
||||
*
|
||||
* Description:
|
||||
* Copy the entire register save area
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#if defined(CONFIG_HOST_X86_64) && !defined(CONFIG_SIM_M32)
|
||||
void up_copyfullstate(unsigned long *dest, unsigned long *src)
|
||||
#else
|
||||
void up_copyfullstate(uint32_t *dest, uint32_t *src)
|
||||
#endif
|
||||
{
|
||||
int i;
|
||||
|
||||
/* In the sim model, the state is copied from the stack to the TCB,
|
||||
* but only a reference is passed to get the state from the TCB. So the
|
||||
* following check avoids copying the TCB save area onto itself:
|
||||
*/
|
||||
|
||||
if (src != dest)
|
||||
{
|
||||
for (i = 0; i < XCPTCONTEXT_REGS; i++)
|
||||
{
|
||||
*dest++ = *src++;
|
||||
}
|
||||
}
|
||||
}
|
@ -67,6 +67,12 @@ void up_exit(int status)
|
||||
{
|
||||
FAR struct tcb_s *tcb;
|
||||
|
||||
/* Make sure that we are in a critical section with local interrupts.
|
||||
* The IRQ state will be restored when the next task is started.
|
||||
*/
|
||||
|
||||
enter_critical_section();
|
||||
|
||||
sinfo("TCB=%p exiting\n", this_task());
|
||||
|
||||
/* Destroy the task at the head of the ready to run list. */
|
||||
|
@ -111,6 +111,11 @@ void up_irq_restore(uint64_t flags)
|
||||
|
||||
void up_irqinitialize(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/* Register the pause handler */
|
||||
|
||||
up_cpu_set_pause_handler(SIGUSR1);
|
||||
#endif
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -70,6 +70,7 @@
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
void up_idle(void)
|
||||
{
|
||||
#ifdef CONFIG_PM
|
||||
@ -124,3 +125,30 @@ void up_idle(void)
|
||||
up_timer_update();
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void up_idle(void)
|
||||
{
|
||||
host_sleep(100 * 1000);
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sim_timer_handler
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void sim_timer_handler(void)
|
||||
{
|
||||
/* Handle UART data availability */
|
||||
|
||||
up_uartloop();
|
||||
|
||||
#ifdef CONFIG_ONESHOT
|
||||
/* Driver the simulated interval timer */
|
||||
|
||||
up_timer_update();
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -128,6 +128,11 @@
|
||||
|
||||
#define SIM_HEAP_SIZE (64*1024*1024)
|
||||
|
||||
/* Macros to handle saving and restoring interrupt state ********************/
|
||||
|
||||
#define up_savestate(regs) up_copyfullstate(regs, (xcpt_reg_t *)CURRENT_REGS)
|
||||
#define up_restorestate(regs) (CURRENT_REGS = regs)
|
||||
|
||||
/* File System Definitions **************************************************/
|
||||
|
||||
/* These definitions characterize the compressed filesystem image */
|
||||
@ -188,29 +193,18 @@ extern volatile void *g_current_regs[1];
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
extern volatile uint8_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
extern volatile uint8_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Function Prototypes
|
||||
****************************************************************************/
|
||||
|
||||
/* Context switching */
|
||||
|
||||
#if defined(CONFIG_HOST_X86_64) && !defined(CONFIG_SIM_M32)
|
||||
void up_copyfullstate(unsigned long *dest, unsigned long *src);
|
||||
#else
|
||||
void up_copyfullstate(uint32_t *dest, uint32_t *src);
|
||||
#endif
|
||||
|
||||
void *up_doirq(int irq, void *regs);
|
||||
|
||||
/* up_setjmp32.S ************************************************************/
|
||||
@ -244,6 +238,9 @@ void up_cpu_started(void);
|
||||
int up_cpu_paused(int cpu);
|
||||
struct tcb_s *up_this_task(void);
|
||||
int up_cpu_set_pause_handler(int irq);
|
||||
void sim_send_ipi(int cpu);
|
||||
void sim_timer_handler(void);
|
||||
void sim_sigdeliver(void);
|
||||
#endif
|
||||
|
||||
/* up_oneshot.c *************************************************************/
|
||||
|
@ -66,12 +66,22 @@ bool up_interrupt_context(void)
|
||||
* Name: up_doirq
|
||||
****************************************************************************/
|
||||
|
||||
void *up_doirq(int irq, void *regs)
|
||||
void *up_doirq(int irq, void *context)
|
||||
{
|
||||
/* Current regs non-zero indicates that we are processing an interrupt;
|
||||
/* Allocate temporary context on the stack */
|
||||
|
||||
xcpt_reg_t tmp[XCPTCONTEXT_REGS];
|
||||
void *regs = (void *)tmp;
|
||||
|
||||
/* CURRENT_REGS non-zero indicates that we are processing an interrupt.
|
||||
* CURRENT_REGS is also used to manage interrupt level context switches.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (up_setjmp(regs) == 0)
|
||||
{
|
||||
#endif
|
||||
|
||||
CURRENT_REGS = regs;
|
||||
|
||||
/* Deliver the IRQ */
|
||||
@ -79,18 +89,30 @@ void *up_doirq(int irq, void *regs)
|
||||
irq_dispatch(irq, regs);
|
||||
|
||||
/* If a context switch occurred while processing the interrupt then
|
||||
* CURRENT_REGS may have change value. If we return any value different
|
||||
* from the input regs, then the lower level will know that a context
|
||||
* switch occurred during interrupt processing.
|
||||
* CURRENT_REGS may have change value. If we return any value
|
||||
* different from the input regs, then the lower level will know that
|
||||
* context switch occurred during interrupt processing.
|
||||
*/
|
||||
|
||||
regs = (void *)CURRENT_REGS;
|
||||
|
||||
/* Restore the previous value of CURRENT_REGS. NULL would indicate that
|
||||
* we are no longer in an interrupt handler. It will be non-NULL if we
|
||||
* are returning from a nested interrupt.
|
||||
/* Restore the previous value of CURRENT_REGS. NULL would indicate
|
||||
* that we are no longer in an interrupt handler. It will be non-NULL
|
||||
* if we are returning from a nested interrupt.
|
||||
*/
|
||||
|
||||
CURRENT_REGS = NULL;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Handle signal */
|
||||
|
||||
sim_sigdeliver();
|
||||
|
||||
/* Then switch contexts */
|
||||
|
||||
up_longjmp(regs, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
return regs;
|
||||
}
|
||||
|
@ -82,12 +82,19 @@ void up_release_pending(void)
|
||||
|
||||
nxsched_suspend_scheduler(rtcb);
|
||||
|
||||
/* TODO */
|
||||
|
||||
if (CURRENT_REGS)
|
||||
{
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
/* Copy the exception context into the TCB of the task that was
|
||||
* currently active. if up_setjmp returns a non-zero value, then
|
||||
* this is really the previously running task restarting!
|
||||
*/
|
||||
|
||||
if (!up_setjmp(rtcb->xcp.regs))
|
||||
else if (!up_setjmp(rtcb->xcp.regs))
|
||||
{
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
|
@ -136,13 +136,20 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
|
||||
|
||||
nxsched_suspend_scheduler(rtcb);
|
||||
|
||||
if (CURRENT_REGS)
|
||||
{
|
||||
/* TODO */
|
||||
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
/* Copy the exception context into the TCB at the (old) head of the
|
||||
* ready-to-run Task list. if up_setjmp returns a non-zero
|
||||
* value, then this is really the previously running task
|
||||
* restarting!
|
||||
*/
|
||||
|
||||
if (!up_setjmp(rtcb->xcp.regs))
|
||||
else if (!up_setjmp(rtcb->xcp.regs))
|
||||
{
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
|
@ -86,8 +86,16 @@
|
||||
|
||||
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
{
|
||||
irqstate_t flags;
|
||||
|
||||
/* We don't have to anything complex for the simulated target */
|
||||
|
||||
sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
|
||||
|
||||
/* Make sure that interrupts are disabled */
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
if (tcb == this_task())
|
||||
{
|
||||
sigdeliver(tcb);
|
||||
@ -96,4 +104,6 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
{
|
||||
tcb->xcp.sigdeliver = sigdeliver;
|
||||
}
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
@ -66,22 +66,7 @@ struct sim_cpuinfo_s
|
||||
static pthread_key_t g_cpu_key;
|
||||
static pthread_t g_cpu_thread[CONFIG_SMP_NCPUS];
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
volatile uint8_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
volatile uint8_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
static pthread_t g_timer_thread;
|
||||
|
||||
/****************************************************************************
|
||||
* NuttX domain function prototypes
|
||||
@ -93,6 +78,10 @@ void sched_note_cpu_pause(struct tcb_s *tcb, int cpu);
|
||||
void sched_note_cpu_resume(struct tcb_s *tcb, int cpu);
|
||||
#endif
|
||||
|
||||
void up_irqinitialize(void);
|
||||
|
||||
extern uint8_t g_nx_initstate;
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
@ -134,9 +123,9 @@ static void *sim_idle_trampoline(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Make sure the SIGUSR1 is not masked */
|
||||
/* Initialize IRQ */
|
||||
|
||||
up_cpu_set_pause_handler(SIGUSR1);
|
||||
up_irqinitialize();
|
||||
|
||||
/* Let up_cpu_start() continue */
|
||||
|
||||
@ -172,6 +161,30 @@ static void *sim_idle_trampoline(void *arg)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sim_host_timer_handler
|
||||
****************************************************************************/
|
||||
|
||||
static void *sim_host_timer_handler(void *arg)
|
||||
{
|
||||
/* Wait until OSINIT_OSREADY(5) */
|
||||
|
||||
while (g_nx_initstate < 5)
|
||||
{
|
||||
host_sleep(10 * 1000); /* 10ms */
|
||||
}
|
||||
|
||||
/* Send a periodic timer event to CPU0 */
|
||||
|
||||
while (1)
|
||||
{
|
||||
pthread_kill(g_cpu_thread[0], SIGUSR1);
|
||||
host_sleep(10 * 1000); /* 10ms */
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -213,9 +226,12 @@ void sim_cpu0_start(void)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Register the common signal handler for all threads */
|
||||
/* NOTE: IRQ initialization will be done in up_irqinitialize */
|
||||
|
||||
up_cpu_set_pause_handler(SIGUSR1);
|
||||
/* Create timer thread to send a periodic timer event */
|
||||
|
||||
ret = pthread_create(&g_timer_thread,
|
||||
NULL, sim_host_timer_handler, NULL);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -316,81 +332,10 @@ errout_with_cond:
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped/
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
* Name: sim_send_ipi(int cpu)
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
void sim_send_ipi(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(up_this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the spinlock that will prevent the CPU thread from running */
|
||||
|
||||
g_cpu_wait[cpu] = 1;
|
||||
g_cpu_paused[cpu] = 1;
|
||||
|
||||
/* Signal the CPU thread */
|
||||
|
||||
pthread_kill(g_cpu_thread[cpu], SIGUSR1);
|
||||
|
||||
/* Spin, waiting for the thread to pause */
|
||||
|
||||
while (g_cpu_paused[cpu] != 0)
|
||||
{
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(up_this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock that will alloc the CPU thread to continue */
|
||||
|
||||
g_cpu_wait[cpu] = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -46,6 +46,23 @@
|
||||
#include "sched/sched.h"
|
||||
#include "up_internal.h"
|
||||
|
||||
/* These spinlocks are used in the SMP configuration in order to implement
|
||||
* up_cpu_pause(). The protocol for CPUn to pause CPUm is as follows
|
||||
*
|
||||
* 1. The up_cpu_pause() implementation on CPUn locks both g_cpu_wait[m]
|
||||
* and g_cpu_paused[m]. CPUn then waits spinning on g_cpu_paused[m].
|
||||
* 2. CPUm receives the interrupt it (1) unlocks g_cpu_paused[m] and
|
||||
* (2) locks g_cpu_wait[m]. The first unblocks CPUn and the second
|
||||
* blocks CPUm in the interrupt handler.
|
||||
*
|
||||
* When CPUm resumes, CPUn unlocks g_cpu_wait[m] and the interrupt handler
|
||||
* on CPUm continues. CPUm must, of course, also then unlock g_cpu_wait[m]
|
||||
* so that it will be ready for the next pause operation.
|
||||
*/
|
||||
|
||||
static volatile spinlock_t g_cpu_wait[CONFIG_SMP_NCPUS];
|
||||
static volatile spinlock_t g_cpu_paused[CONFIG_SMP_NCPUS];
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
@ -69,7 +86,41 @@
|
||||
|
||||
static int sim_cpupause_handler(int irq, FAR void *context, FAR void *arg)
|
||||
{
|
||||
return up_cpu_paused(this_cpu());
|
||||
int cpu = this_cpu();
|
||||
|
||||
/* Check for false alarms. Such false could occur as a consequence of
|
||||
* some deadlock breaking logic that might have already serviced the SG2
|
||||
* interrupt by calling up_cpu_paused(). If the pause event has already
|
||||
* been processed then g_cpu_paused[cpu] will not be locked.
|
||||
*/
|
||||
|
||||
if (up_cpu_pausereq(cpu))
|
||||
{
|
||||
/* NOTE: The following enter_critical_section() will call
|
||||
* up_cpu_paused() to process a pause request to break a deadlock
|
||||
* because the caller held a critical section. Once up_cpu_paused()
|
||||
* finished, the caller will proceed and release the g_cpu_irqlock.
|
||||
* Then this CPU will acquire g_cpu_irqlock in the function.
|
||||
*/
|
||||
|
||||
irqstate_t flags = enter_critical_section();
|
||||
|
||||
/* NOTE: the pause request should not exist here */
|
||||
|
||||
DEBUGVERIFY(!up_cpu_pausereq(cpu));
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* NOTE: sim specific logic
|
||||
* In the case of no pause request, call sim_timer_handler()
|
||||
*/
|
||||
|
||||
sim_timer_handler();
|
||||
}
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -125,72 +176,56 @@ bool up_cpu_pausereq(int cpu)
|
||||
|
||||
int up_cpu_paused(int cpu)
|
||||
{
|
||||
struct tcb_s *rtcb = current_task(cpu);
|
||||
struct tcb_s *tcb = current_task(cpu);
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_suspend_scheduler(rtcb);
|
||||
nxsched_suspend_scheduler(tcb);
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we are paused */
|
||||
|
||||
sched_note_cpu_paused(rtcb);
|
||||
sched_note_cpu_paused(tcb);
|
||||
#endif
|
||||
|
||||
/* Copy the exception context into the TCB at the (old) head of the
|
||||
* CPUs assigned task list. if up_setjmp returns a non-zero value, then
|
||||
* this is really the previously running task restarting!
|
||||
/* Save the current context at CURRENT_REGS into the TCB at the head
|
||||
* of the assigned task list for this CPU.
|
||||
*/
|
||||
|
||||
if (up_setjmp(rtcb->xcp.regs) == 0)
|
||||
{
|
||||
/* Unlock the g_cpu_paused spinlock to indicate that we are in the
|
||||
* paused state
|
||||
*/
|
||||
up_savestate(tcb->xcp.regs);
|
||||
|
||||
/* Wait for the spinlock to be released */
|
||||
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Spin until we are asked to resume. When we resume, we need to
|
||||
* inicate that we are not longer paused.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
/* While we were paused, logic on a different CPU probably changed
|
||||
* the task as that head of the assigned task list. So now we need
|
||||
* restore the exception context of the rtcb at the (new) head
|
||||
* of the assigned list in order to instantiate the new task.
|
||||
/* Restore the exception context of the tcb at the (new) head of the
|
||||
* assigned task list.
|
||||
*/
|
||||
|
||||
rtcb = current_task(cpu);
|
||||
tcb = current_task(cpu);
|
||||
|
||||
/* The way that we handle signals in the simulation is kind of a
|
||||
* kludge. This would be unsafe in a truly multi-threaded,
|
||||
* interrupt driven environment.
|
||||
*/
|
||||
|
||||
if (rtcb->xcp.sigdeliver)
|
||||
{
|
||||
sinfo("CPU%d: Delivering signals TCB=%p\n", cpu, rtcb);
|
||||
((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb);
|
||||
rtcb->xcp.sigdeliver = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify that we have resumed */
|
||||
|
||||
sched_note_cpu_resumed(rtcb);
|
||||
sched_note_cpu_resumed(tcb);
|
||||
#endif
|
||||
|
||||
/* Reset scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(rtcb);
|
||||
nxsched_resume_scheduler(tcb);
|
||||
|
||||
/* Then switch contexts */
|
||||
/* Then switch contexts. Any necessary address environment changes
|
||||
* will be made when the interrupt returns.
|
||||
*/
|
||||
|
||||
up_longjmp(rtcb->xcp.regs, 1);
|
||||
}
|
||||
up_restorestate(tcb->xcp.regs);
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
|
||||
return OK;
|
||||
}
|
||||
@ -249,3 +284,122 @@ int up_cpu_set_pause_handler(int irq)
|
||||
up_enable_irq(irq);
|
||||
return irq_attach(irq, sim_cpupause_handler, NULL);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_pause
|
||||
*
|
||||
* Description:
|
||||
* Save the state of the current task at the head of the
|
||||
* g_assignedtasks[cpu] task list and then pause task execution on the
|
||||
* CPU.
|
||||
*
|
||||
* This function is called by the OS when the logic executing on one CPU
|
||||
* needs to modify the state of the g_assignedtasks[cpu] list for another
|
||||
* CPU.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU to be stopped
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_pause(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the pause event */
|
||||
|
||||
sched_note_cpu_pause(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Take the both spinlocks. The g_cpu_wait spinlock will prevent the
|
||||
* handler from returning until up_cpu_resume() is called; g_cpu_paused
|
||||
* is a handshake that will prefent this function from returning until
|
||||
* the CPU is actually paused.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(!spin_islocked(&g_cpu_wait[cpu]) &&
|
||||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_lock(&g_cpu_wait[cpu]);
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* Generate IRQ for CPU(cpu) */
|
||||
|
||||
sim_send_ipi(cpu);
|
||||
|
||||
/* Wait for the other CPU to unlock g_cpu_paused meaning that
|
||||
* it is fully paused and ready for up_cpu_resume();
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_paused[cpu]);
|
||||
spin_unlock(&g_cpu_paused[cpu]);
|
||||
|
||||
/* On successful return g_cpu_wait will be locked, the other CPU will be
|
||||
* spinning on g_cpu_wait and will not continue until g_cpu_resume() is
|
||||
* called. g_cpu_paused will be unlocked in any case.
|
||||
*/
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: up_cpu_resume
|
||||
*
|
||||
* Description:
|
||||
* Restart the cpu after it was paused via up_cpu_pause(), restoring the
|
||||
* state of the task at the head of the g_assignedtasks[cpu] list, and
|
||||
* resume normal tasking.
|
||||
*
|
||||
* This function is called after up_cpu_pause in order resume operation of
|
||||
* the CPU after modifying its g_assignedtasks[cpu] list.
|
||||
*
|
||||
* Input Parameters:
|
||||
* cpu - The index of the CPU being re-started.
|
||||
*
|
||||
* Returned Value:
|
||||
* Zero on success; a negated errno value on failure.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
int up_cpu_resume(int cpu)
|
||||
{
|
||||
DEBUGASSERT(cpu >= 0 && cpu < CONFIG_SMP_NCPUS && cpu != this_cpu());
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION
|
||||
/* Notify of the resume event */
|
||||
|
||||
sched_note_cpu_resume(this_task(), cpu);
|
||||
#endif
|
||||
|
||||
/* Release the spinlock. Releasing the spinlock will cause the SGI2
|
||||
* handler on 'cpu' to continue and return from interrupt to the newly
|
||||
* established thread.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(spin_islocked(&g_cpu_wait[cpu]) &&
|
||||
!spin_islocked(&g_cpu_paused[cpu]));
|
||||
|
||||
spin_unlock(&g_cpu_wait[cpu]);
|
||||
return OK;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sim_sigdeliver
|
||||
****************************************************************************/
|
||||
|
||||
void sim_sigdeliver(void)
|
||||
{
|
||||
int cpu = this_cpu();
|
||||
struct tcb_s *tcb = current_task(cpu);
|
||||
|
||||
if (tcb->xcp.sigdeliver)
|
||||
{
|
||||
sinfo("Delivering signals TCB=%p\n", tcb);
|
||||
((sig_deliver_t)tcb->xcp.sigdeliver)(tcb);
|
||||
tcb->xcp.sigdeliver = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -96,12 +96,37 @@ void up_unblock_task(FAR struct tcb_s *tcb)
|
||||
|
||||
nxsched_suspend_scheduler(rtcb);
|
||||
|
||||
/* Are we in an interrupt handler? */
|
||||
|
||||
if (CURRENT_REGS)
|
||||
{
|
||||
/* Yes, then we have to do things differently.
|
||||
* Just copy the CURRENT_REGS into the OLD rtcb.
|
||||
*/
|
||||
|
||||
up_savestate(rtcb->xcp.regs);
|
||||
|
||||
/* Restore the exception context of the rtcb at the (new) head
|
||||
* of the ready-to-run task list.
|
||||
*/
|
||||
|
||||
rtcb = this_task();
|
||||
|
||||
/* Update scheduler parameters */
|
||||
|
||||
nxsched_resume_scheduler(rtcb);
|
||||
|
||||
/* Then switch contexts */
|
||||
|
||||
up_restorestate(rtcb->xcp.regs);
|
||||
}
|
||||
|
||||
/* Copy the exception context into the TCB of the task that was
|
||||
* previously active. if up_setjmp returns a non-zero value, then
|
||||
* this is really the previously running task restarting!
|
||||
*/
|
||||
|
||||
if (!up_setjmp(rtcb->xcp.regs))
|
||||
else if (!up_setjmp(rtcb->xcp.regs))
|
||||
{
|
||||
/* Restore the exception context of the new task that is ready to
|
||||
* run (probably tcb). This is the new rtcb at the head of the
|
||||
|
@ -13,6 +13,9 @@ CONFIG_ARCH_CHIP="sim"
|
||||
CONFIG_ARCH_SIM=y
|
||||
CONFIG_BOARDCTL_POWEROFF=y
|
||||
CONFIG_BUILTIN=y
|
||||
CONFIG_DEBUG_ASSERTIONS=y
|
||||
CONFIG_DEBUG_ERROR=y
|
||||
CONFIG_DEBUG_FEATURES=y
|
||||
CONFIG_DEBUG_SYMBOLS=y
|
||||
CONFIG_DRIVER_NOTE=y
|
||||
CONFIG_EXAMPLES_HELLO=y
|
||||
@ -23,9 +26,11 @@ CONFIG_NSH_READLINE=y
|
||||
CONFIG_READLINE_CMD_HISTORY=y
|
||||
CONFIG_SCHED_HAVE_PARENT=y
|
||||
CONFIG_SCHED_INSTRUMENTATION=y
|
||||
CONFIG_SCHED_WAITPID=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_SYSTEM_NSH=y
|
||||
CONFIG_SYSTEM_SYSTEM=y
|
||||
CONFIG_SYSTEM_TASKSET=y
|
||||
CONFIG_TESTING_GETPRIME=y
|
||||
CONFIG_TESTING_OSTEST=y
|
||||
CONFIG_TESTING_SMP=y
|
||||
CONFIG_USER_ENTRYPOINT="nsh_main"
|
||||
|
Loading…
x
Reference in New Issue
Block a user