From 8220b169f371fc7d82a38be52e5950be06c27e83 Mon Sep 17 00:00:00 2001 From: p-szafonimateusz Date: Wed, 3 Apr 2024 11:53:46 +0200 Subject: [PATCH] arch/intel64: add SMP support modified various intel64 files to support SMP Signed-off-by: p-szafonimateusz --- arch/x86_64/include/irq.h | 25 ++- arch/x86_64/src/common/x86_64_exit.c | 10 ++ arch/x86_64/src/common/x86_64_switchcontext.c | 13 ++ arch/x86_64/src/intel64/intel64_handlers.c | 4 + .../src/intel64/intel64_schedulesigaction.c | 158 +++++++++++++++++- arch/x86_64/src/intel64/intel64_sigdeliver.c | 60 ++++++- 6 files changed, 250 insertions(+), 20 deletions(-) diff --git a/arch/x86_64/include/irq.h b/arch/x86_64/include/irq.h index 8cddf2a3e3..dfbbe15c35 100644 --- a/arch/x86_64/include/irq.h +++ b/arch/x86_64/include/irq.h @@ -45,14 +45,6 @@ # include #endif -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -/**************************************************************************** - * Public Types - ****************************************************************************/ - /**************************************************************************** * Public Data ****************************************************************************/ @@ -103,7 +95,22 @@ extern "C" * ****************************************************************************/ -#define up_cpu_index() (0) +#ifdef CONFIG_SMP +static inline_function int up_cpu_index(void) +{ + unsigned long cpu; + + asm volatile( + "\tmovq %%gs:(%c1), %0\n" + : "=rm" (cpu) + : "i" (offsetof(struct intel64_cpu_s, id)) + :); + + return cpu; +} +#else +# define up_cpu_index() (0) +#endif /**************************************************************************** * Inline functions diff --git a/arch/x86_64/src/common/x86_64_exit.c b/arch/x86_64/src/common/x86_64_exit.c index 43f0dcc66d..867937ec3c 100644 --- a/arch/x86_64/src/common/x86_64_exit.c +++ b/arch/x86_64/src/common/x86_64_exit.c @@ -92,7 +92,17 @@ void up_exit(int status) addrenv_switch(tcb); #endif + /* Restore the cpu lock */ + + restore_critical_section(); + /* Then switch contexts */ x86_64_fullcontextrestore(tcb->xcp.regs); + + /* x86_64_fullcontextrestore() should not return but could if the software + * interrupts are disabled. + */ + + PANIC(); } diff --git a/arch/x86_64/src/common/x86_64_switchcontext.c b/arch/x86_64/src/common/x86_64_switchcontext.c index 3397d2d493..c87a71339c 100644 --- a/arch/x86_64/src/common/x86_64_switchcontext.c +++ b/arch/x86_64/src/common/x86_64_switchcontext.c @@ -57,6 +57,8 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb) { + int cpu; + /* Update scheduler parameters */ nxsched_suspend_scheduler(rtcb); @@ -107,6 +109,17 @@ void up_switch_context(struct tcb_s *tcb, struct tcb_s *rtcb) nxsched_resume_scheduler(tcb); + /* Restore the cpu lock */ + + restore_critical_section(); + + /* Record the new "running" task. g_running_tasks[] is only used by + * assertion logic for reporting crashes. + */ + + cpu = this_cpu(); + g_running_tasks[cpu] = current_task(cpu); + /* Then switch contexts */ x86_64_fullcontextrestore(tcb->xcp.regs); diff --git a/arch/x86_64/src/intel64/intel64_handlers.c b/arch/x86_64/src/intel64/intel64_handlers.c index 8bf5843d6b..9bbf2d2e60 100644 --- a/arch/x86_64/src/intel64/intel64_handlers.c +++ b/arch/x86_64/src/intel64/intel64_handlers.c @@ -100,6 +100,10 @@ static uint64_t *common_handler(int irq, uint64_t *regs) */ g_running_tasks[this_cpu()] = this_task(); + + /* Restore the cpu lock */ + + restore_critical_section(); } /* If a context switch occurred while processing the interrupt then diff --git a/arch/x86_64/src/intel64/intel64_schedulesigaction.c b/arch/x86_64/src/intel64/intel64_schedulesigaction.c index 28f51334eb..589d955fce 100644 --- a/arch/x86_64/src/intel64/intel64_schedulesigaction.c +++ b/arch/x86_64/src/intel64/intel64_schedulesigaction.c @@ -70,6 +70,7 @@ * ****************************************************************************/ +#ifndef CONFIG_SMP void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) { sinfo("tcb=%p sigdeliver=%p\n", tcb, sigdeliver); @@ -109,7 +110,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) * Hmmm... there looks like a latent bug here: The following logic * would fail in the strange case where we are in an interrupt * handler, the thread is signalling itself, but a context switch - * to another task has occurred so that g_current_regs does not + * to another task has occurred so that CURRENT_REGS does not * refer to the thread of this_task()! */ @@ -121,7 +122,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) */ tcb->xcp.saved_rip = up_current_regs()[REG_RIP]; - tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; + tcb->xcp.saved_rsp = up_current_regs()[REG_RSP]; tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS]; /* Then set up to vector to the trampoline with interrupts @@ -165,3 +166,156 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) } } } +#else /* !CONFIG_SMP */ +void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +{ + int cpu; + int me; + + sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); + + /* Refuse to handle nested signal actions */ + + if (tcb->xcp.sigdeliver == NULL) + { + tcb->xcp.sigdeliver = sigdeliver; + + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ + + sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), + up_current_regs()); + + if (tcb->task_state == TSTATE_TASK_RUNNING) + { + me = this_cpu(); + cpu = tcb->cpu; + + /* CASE 1: We are not in an interrupt handler and a task is + * signaling itself for some reason. + */ + + if (cpu == me && !up_current_regs()) + { + /* In this case just deliver the signal now. + * REVISIT: Signal handler will run in a critical section! + */ + + sigdeliver(tcb); + tcb->xcp.sigdeliver = NULL; + } + + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. + */ + + else + { + /* If we signaling a task running on the other CPU, we have + * to PAUSE the other CPU. + */ + + if (cpu != me) + { + /* Pause the CPU */ + + up_cpu_pause(cpu); + + /* Wait while the pause request is pending */ + + while (up_cpu_pausereq(cpu)) + { + } + + /* Now tcb on the other CPU can be accessed safely */ + + /* Copy tcb->xcp.regs to tcp.xcp.saved. These will be + * restored by the signal trampoline after the signal has + * been delivered. + */ + + tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; + tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; + tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; + tcb->xcp.regs[REG_RFLAGS] = 0; + } + else + { + /* tcb is running on the same CPU */ + + /* Save the return lr and cpsr and one scratch register. + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_rip = up_current_regs()[REG_RIP]; + tcb->xcp.saved_rsp = up_current_regs()[REG_RSP]; + tcb->xcp.saved_rflags = up_current_regs()[REG_RFLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + up_current_regs()[REG_RIP] = (uint64_t)x86_64_sigdeliver; + up_current_regs()[REG_RFLAGS] = 0; + + /* And make sure that the saved context in the TCB + * is the same as the interrupt return context. + */ + + x86_64_savestate(tcb->xcp.regs); + } + + /* NOTE: If the task runs on another CPU(cpu), adjusting + * global IRQ controls will be done in the pause handler + * on the CPU(cpu) by taking a critical section. + * If the task is scheduled on this CPU(me), do nothing + * because this CPU already took a critical section + */ + + /* RESUME the other CPU if it was PAUSED */ + + if (cpu != me) + { + up_cpu_resume(cpu); + } + } + } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signaling some other non-running task. + */ + + else + { + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.saved_rip = tcb->xcp.regs[REG_RIP]; + tcb->xcp.saved_rsp = tcb->xcp.regs[REG_RSP]; + tcb->xcp.saved_rflags = tcb->xcp.regs[REG_RFLAGS]; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_RIP] = (uint64_t)x86_64_sigdeliver; + tcb->xcp.regs[REG_RFLAGS] = 0; + } + } +} +#endif /* CONFIG_SMP */ diff --git a/arch/x86_64/src/intel64/intel64_sigdeliver.c b/arch/x86_64/src/intel64/intel64_sigdeliver.c index 0369480a4a..819d689c87 100644 --- a/arch/x86_64/src/intel64/intel64_sigdeliver.c +++ b/arch/x86_64/src/intel64/intel64_sigdeliver.c @@ -54,10 +54,24 @@ void x86_64_sigdeliver(void) { struct tcb_s *rtcb = this_task(); - sig_deliver_t sighandler; uint64_t regs_area[XCPTCONTEXT_REGS + 2]; uint64_t *regs; +#ifdef CONFIG_SMP + /* In the SMP case, we must terminate the critical section while the signal + * handler executes, but we also need to restore the irqcount when the + * we resume the main thread of the task. + */ + + int16_t saved_irqcount; +#endif + + board_autoled_on(LED_SIGNAL); + + sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", + rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); + DEBUGASSERT(rtcb->xcp.sigdeliver != NULL); + /* Align regs to 16 byte boundary for SSE instructions. */ regs = (uint64_t *)(((uint64_t)(regs_area) + 15) & (~(uint64_t)15)); @@ -70,17 +84,24 @@ void x86_64_sigdeliver(void) x86_64_copystate(regs, rtcb->xcp.regs); - /* grab on a copy of the signal hander function pointer before any - * possibility to be switched out. +#ifdef CONFIG_SMP + /* In the SMP case, up_schedule_sigaction(0) will have incremented + * 'irqcount' in order to force us into a critical section. Save the + * pre-incremented irqcount. */ - ASSERT(rtcb->xcp.sigdeliver != NULL); - sighandler = rtcb->xcp.sigdeliver; + saved_irqcount = rtcb->irqcount; + DEBUGASSERT(saved_irqcount >= 0); - board_autoled_on(LED_SIGNAL); + /* Now we need call leave_critical_section() repeatedly to get the irqcount + * to zero, freeing all global spinlocks that enforce the critical section. + */ - sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n", - rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head); + while (rtcb->irqcount > 0) + { + leave_critical_section((uint8_t)regs[REG_RFLAGS]); + } +#endif /* CONFIG_SMP */ #ifndef CONFIG_SUPPRESS_INTERRUPTS /* Then make sure that interrupts are enabled. Signal handlers must always @@ -92,7 +113,7 @@ void x86_64_sigdeliver(void) /* Deliver the signals */ - sighandler(rtcb); + ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb); /* Output any debug messages BEFORE restoring errno (because they may * alter errno), then disable interrupts again and restore the original @@ -100,7 +121,12 @@ void x86_64_sigdeliver(void) */ sinfo("Resuming\n"); + +#ifdef CONFIG_SMP + enter_critical_section(); +#else up_irq_save(); +#endif /* Modify the saved return state with the actual saved values in the * TCB. This depends on the fact that nested signal handling is @@ -117,6 +143,22 @@ void x86_64_sigdeliver(void) regs[REG_RFLAGS] = rtcb->xcp.saved_rflags; rtcb->xcp.sigdeliver = NULL; /* Allows next handler to be scheduled */ +#ifdef CONFIG_SMP + /* Restore the saved 'irqcount' and recover the critical section + * spinlocks. + * + * REVISIT: irqcount should be one from the above call to + * enter_critical_section(). Could the saved_irqcount be zero? That + * would be a problem. + */ + + DEBUGASSERT(rtcb->irqcount == 1); + while (rtcb->irqcount < saved_irqcount) + { + enter_critical_section(); + } +#endif + /* Then restore the correct state for this thread of execution. */ board_autoled_off(LED_SIGNAL);