diff --git a/arch/arm/src/armv7-a/arm_schedulesigaction.c b/arch/arm/src/armv7-a/arm_schedulesigaction.c index c448a9a8a4..35b71afb71 100644 --- a/arch/arm/src/armv7-a/arm_schedulesigaction.c +++ b/arch/arm/src/armv7-a/arm_schedulesigaction.c @@ -1,7 +1,7 @@ /**************************************************************************** * arch/arm/src/armv7-a/arm_schedulesigaction.c * - * Copyright (C) 2013, 2015-2016 Gregory Nutt. All rights reserved. + * Copyright (C) 2013, 2015-2017 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -51,6 +51,8 @@ #include "up_internal.h" #include "up_arch.h" +#include "irq/irq.h" + #ifndef CONFIG_DISABLE_SIGNALS /**************************************************************************** @@ -90,6 +92,7 @@ * ****************************************************************************/ +#ifndef CONFIG_SMP void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) { irqstate_t flags; @@ -105,7 +108,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) if (!tcb->xcp.sigdeliver) { /* First, handle some special cases when the signal is being delivered - * to the currently executing task. + * to task that is currently executing on this CPU. */ sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS); @@ -153,18 +156,6 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver; CURRENT_REGS[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT); -#ifdef CONFIG_SMP - /* In an SMP configuration, the interrupt disable logic also - * involves spinlocks that are configured per the TCB irqcount - * field. This is logically equivalent to enter_critical_section(). - * The matching call to leave_critical_section() will be - * performed in up_sigdeliver(). - */ - - DEBUGASSERT(tcb->irqcount < INT16_MAX); - tcb->irqcount++; -#endif - /* And make sure that the saved context in the TCB is the same * as the interrupt return context. */ @@ -175,7 +166,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) /* Otherwise, we are (1) signaling a task is not running from an * interrupt handler or (2) we are not in an interrupt handler and the - * running task is signalling some non-running task. + * running task is signalling some other non-running task. */ else @@ -195,23 +186,175 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT); - -#ifdef CONFIG_SMP - /* In an SMP configuration, the interrupt disable logic also - * involves spinlocks that are configured per the TCB irqcount - * field. This is logically equivalent to enter_critical_section(); - * The matching leave_critical_section will be performed in - * The matching call to leave_critical_section() will be performed - * in up_sigdeliver(). - */ - - DEBUGASSERT(tcb->irqcount < INT16_MAX); - tcb->irqcount++; -#endif } } leave_critical_section(flags); } +#endif /* !CONFIG_SMP */ + +#ifdef CONFIG_SMP +void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +{ + irqstate_t flags; + int cpu; + int me; + + sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); + + /* Make sure that interrupts are disabled */ + + flags = enter_critical_section(); + + /* Refuse to handle nested signal actions */ + + if (!tcb->xcp.sigdeliver) + { + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ + + sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS); + + me = this_cpu(); + cpu = tcb->cpu; + + if (tcb->task_state == TSTATE_TASK_RUNNING) + { + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. + */ + + if (cpu == me && !CURRENT_REGS) + { + /* In this case just deliver the signal now. */ + + sigdeliver(tcb); + } + + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following logic + * would fail in the strange case where we are in an interrupt + * handler, the thread is signalling itself, but a context switch + * to another task has occurred so that CURRENT_REGS does not + * refer to the thread of this_task()! + */ + + else + { + /* If we signalling a task running on the other CPU, we have + * to PAUSE the other CPU. + */ + + if (cpu != me) + { + up_cpu_pause(cpu); + } + + /* Save the return lr and cpsr and one scratch register + * These will be restored by the signal trampoline after + * the signals have been delivered. + */ + + tcb->xcp.sigdeliver = sigdeliver; + tcb->xcp.saved_pc = CURRENT_REGS[REG_PC]; + tcb->xcp.saved_cpsr = CURRENT_REGS[REG_CPSR]; + + /* Increment the IRQ lock count so that when the task is restarted, + * it will hold the IRQ spinlock. + */ + + DEBUGASSERT(tcb->irqcount < INT16_MAX); + tcb->irqcount++; + + /* Handle a possible race condition where the TCB was suspended + * just before we paused the other CPU. + */ + + if (tcb->task_state != TSTATE_TASK_RUNNING) + { + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; + tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT); + } + else + { + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver; + CURRENT_REGS[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT); + + /* In an SMP configuration, the interrupt disable logic also + * involves spinlocks that are configured per the TCB irqcount + * field. This is logically equivalent to enter_critical_section(). + * The matching call to leave_critical_section() will be + * performed in up_sigdeliver(). + */ + + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); + + /* And make sure that the saved context in the TCB is the same + * as the interrupt return context. + */ + + up_savestate(tcb->xcp.regs); + } + + /* RESUME the other CPU if it was PAUSED */ + + if (cpu != me) + { + up_cpu_pause(cpu); + } + } + } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signalling some other non-running task. + */ + + else + { + /* Save the return lr and cpsr and one scratch register. These + * will be restored by the signal trampoline after the signals + * have been delivered. + */ + + tcb->xcp.sigdeliver = sigdeliver; + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; + tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR]; + + /* Increment the IRQ lock count so that when the task is restarted, + * it will hold the IRQ spinlock. + */ + + DEBUGASSERT(tcb->irqcount < INT16_MAX); + tcb->irqcount++; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; + tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT); + } + } + + leave_critical_section(flags); +} +#endif /* CONFIG_SMP */ #endif /* !CONFIG_DISABLE_SIGNALS */ diff --git a/arch/arm/src/armv7-m/up_schedulesigaction.c b/arch/arm/src/armv7-m/up_schedulesigaction.c index fb6a4c167c..d4dd36c04a 100644 --- a/arch/arm/src/armv7-m/up_schedulesigaction.c +++ b/arch/arm/src/armv7-m/up_schedulesigaction.c @@ -1,7 +1,7 @@ /**************************************************************************** * arch/arm/src/armv7-m/up_schedulesigaction.c * - * Copyright (C) 2009-2014, 2016 Gregory Nutt. All rights reserved. + * Copyright (C) 2009-2014, 2016-2017 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -52,6 +52,8 @@ #include "up_internal.h" #include "up_arch.h" +#include "irq/irq.h" + #ifndef CONFIG_DISABLE_SIGNALS /**************************************************************************** @@ -91,6 +93,7 @@ * ****************************************************************************/ +#ifndef CONFIG_SMP void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) { irqstate_t flags; @@ -165,19 +168,6 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) #ifdef CONFIG_BUILD_PROTECTED CURRENT_REGS[REG_LR] = EXC_RETURN_PRIVTHR; #endif - -#ifdef CONFIG_SMP - /* In an SMP configuration, the interrupt disable logic also - * involves spinlocks that are configured per the TCB irqcount - * field. This is logically equivalent to enter_critical_section(). - * The matching call to leave_critical_section() will be - * performed in up_sigdeliver(). - */ - - DEBUGASSERT(tcb->irqcount < INT16_MAX); - tcb->irqcount++; -#endif - /* And make sure that the saved context in the TCB is the same * as the interrupt return context. */ @@ -224,23 +214,215 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) #ifdef CONFIG_BUILD_PROTECTED tcb->xcp.regs[REG_LR] = EXC_RETURN_PRIVTHR; #endif + } + } + + leave_critical_section(flags); +} +#endif /* !CONFIG_SMP */ #ifdef CONFIG_SMP - /* In an SMP configuration, the interrupt disable logic also - * involves spinlocks that are configured per the TCB irqcount - * field. This is logically equivalent to enter_critical_section(); - * The matching leave_critical_section will be performed in - * The matching call to leave_critical_section() will be performed - * in up_sigdeliver(). +void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +{ + irqstate_t flags; + int cpu; + int me; + + sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); + + /* Make sure that interrupts are disabled */ + + flags = enter_critical_section(); + + /* Refuse to handle nested signal actions */ + + if (!tcb->xcp.sigdeliver) + { + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ + + sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS); + + me = this_cpu(); + cpu = tcb->cpu; + + if (tcb->task_state == TSTATE_TASK_RUNNING) + { + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. + */ + + if (cpu == me && !CURRENT_REGS) + { + /* In this case just deliver the signal now. */ + + sigdeliver(tcb); + } + + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following logic + * would fail in the strange case where we are in an interrupt + * handler, the thread is signalling itself, but a context switch + * to another task has occurred so that CURRENT_REGS does not + * refer to the thread of this_task()! + */ + + else + { + /* If we signalling a task running on the other CPU, we have + * to PAUSE the other CPU. + */ + + if (cpu != me) + { + up_cpu_pause(cpu); + } + + /* Save the return PC, CPSR and either the BASEPRI or PRIMASK + * registers (and perhaps also the LR). These will be + * restored by the signal trampoline after the signal has been + * delivered. + */ + + tcb->xcp.sigdeliver = (FAR void *)sigdeliver; + tcb->xcp.saved_pc = CURRENT_REGS[REG_PC]; +#ifdef CONFIG_ARMV7M_USEBASEPRI + tcb->xcp.saved_basepri = CURRENT_REGS[REG_BASEPRI]; +#else + tcb->xcp.saved_primask = CURRENT_REGS[REG_PRIMASK]; +#endif + tcb->xcp.saved_xpsr = CURRENT_REGS[REG_XPSR]; +#ifdef CONFIG_BUILD_PROTECTED + tcb->xcp.saved_lr = CURRENT_REGS[REG_LR]; +#endif + /* Increment the IRQ lock count so that when the task is restarted, + * it will hold the IRQ spinlock. + */ + + DEBUGASSERT(tcb->irqcount < INT16_MAX); + tcb->irqcount++; + + /* Handle a possible race condition where the TCB was suspended + * just before we paused the other CPU. + */ + + if (tcb->task_state != TSTATE_TASK_RUNNING) + { + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode + * to be here. + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; +#ifdef CONFIG_ARMV7M_USEBASEPRI + tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; +#else + tcb->xcp.regs[REG_PRIMASK] = 1; +#endif + tcb->xcp.regs[REG_XPSR] = ARMV7M_XPSR_T; +#ifdef CONFIG_BUILD_PROTECTED + tcb->xcp.regs[REG_LR] = EXC_RETURN_PRIVTHR; +#endif + } + else + { + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver; +#ifdef CONFIG_ARMV7M_USEBASEPRI + CURRENT_REGS[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; +#else + CURRENT_REGS[REG_PRIMASK] = 1; +#endif + CURRENT_REGS[REG_XPSR] = ARMV7M_XPSR_T; +#ifdef CONFIG_BUILD_PROTECTED + CURRENT_REGS[REG_LR] = EXC_RETURN_PRIVTHR; +#endif + /* In an SMP configuration, the interrupt disable logic also + * involves spinlocks that are configured per the TCB irqcount + * field. This is logically equivalent to enter_critical_section(). + * The matching call to leave_critical_section() will be + * performed in up_sigdeliver(). + */ + + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); + + /* And make sure that the saved context in the TCB is the same + * as the interrupt return context. + */ + + up_savestate(tcb->xcp.regs); + } + + /* RESUME the other CPU if it was PAUSED */ + + if (cpu != me) + { + up_cpu_pause(cpu); + } + } + } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signalling some other non-running task. + */ + + else + { + /* Save the return PC, CPSR and either the BASEPRI or PRIMASK + * registers (and perhaps also the LR). These will be restored + * by the signal trampoline after the signal has been delivered. + */ + + tcb->xcp.sigdeliver = (FAR void *)sigdeliver; + tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; +#ifdef CONFIG_ARMV7M_USEBASEPRI + tcb->xcp.saved_basepri = tcb->xcp.regs[REG_BASEPRI]; +#else + tcb->xcp.saved_primask = tcb->xcp.regs[REG_PRIMASK]; +#endif + tcb->xcp.saved_xpsr = tcb->xcp.regs[REG_XPSR]; +#ifdef CONFIG_BUILD_PROTECTED + tcb->xcp.saved_lr = tcb->xcp.regs[REG_LR]; +#endif + /* Increment the IRQ lock count so that when the task is restarted, + * it will hold the IRQ spinlock. */ DEBUGASSERT(tcb->irqcount < INT16_MAX); tcb->irqcount++; + + /* Then set up to vector to the trampoline with interrupts + * disabled. We must already be in privileged thread mode to be + * here. + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; +#ifdef CONFIG_ARMV7M_USEBASEPRI + tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; +#else + tcb->xcp.regs[REG_PRIMASK] = 1; +#endif + tcb->xcp.regs[REG_XPSR] = ARMV7M_XPSR_T; +#ifdef CONFIG_BUILD_PROTECTED + tcb->xcp.regs[REG_LR] = EXC_RETURN_PRIVTHR; #endif } } leave_critical_section(flags); } +#endif /* CONFIG_SMP */ #endif /* !CONFIG_DISABLE_SIGNALS */ diff --git a/arch/arm/src/armv7-m/up_sigdeliver.c b/arch/arm/src/armv7-m/up_sigdeliver.c index 086ed882fc..5aff11499c 100644 --- a/arch/arm/src/armv7-m/up_sigdeliver.c +++ b/arch/arm/src/armv7-m/up_sigdeliver.c @@ -54,18 +54,6 @@ #ifndef CONFIG_DISABLE_SIGNALS -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -/**************************************************************************** - * Private Data - ****************************************************************************/ - -/**************************************************************************** - * Private Functions - ****************************************************************************/ - /**************************************************************************** * Public Functions ****************************************************************************/ diff --git a/arch/xtensa/src/common/xtensa_schedsigaction.c b/arch/xtensa/src/common/xtensa_schedsigaction.c index 6154f5126a..f6b3ddb46a 100644 --- a/arch/xtensa/src/common/xtensa_schedsigaction.c +++ b/arch/xtensa/src/common/xtensa_schedsigaction.c @@ -1,7 +1,7 @@ /**************************************************************************** * arch/xtensa/src/common/arm_schedulesigaction.c * - * Copyright (C) 2016 Gregory Nutt. All rights reserved. + * Copyright (C) 2016-2017 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -49,6 +49,8 @@ #include "sched/sched.h" #include "xtensa.h" +#include "irq/irq.h" + #ifndef CONFIG_DISABLE_SIGNALS /**************************************************************************** @@ -88,6 +90,7 @@ * ****************************************************************************/ +#ifndef CONFIG_SMP void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) { irqstate_t flags; @@ -135,9 +138,8 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) else { - /* Save the return lr and cpsr and one scratch register - * These will be restored by the signal trampoline after - * the signals have been delivered. + /* Save the return pc and ps. These will be restored by the + * signal trampoline after the signals have been delivered. * * NOTE: that hi-priority interrupts are not disabled. */ @@ -172,29 +174,204 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) else { - /* Save the return lr and cpsr and one scratch register. These - * will be restored by the signal trampoline after the signals - * have been delivered. + /* Save the return pc and ps. These will be restored by the + * signal trampoline after the signals have been delivered. + * + * NOTE: that hi-priority interrupts are not disabled. */ tcb->xcp.sigdeliver = sigdeliver; - tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; - tcb->xcp.saved_ps = tcb->xcp.regs[REG_PS]; + tcb->xcp.saved_pc = CURRENT_REGS[REG_PC]; + tcb->xcp.saved_ps = CURRENT_REGS[REG_PS]; /* Then set up to vector to the trampoline with interrupts * disabled */ - tcb->xcp.regs[REG_PC] = (uint32_t)_xtensa_sig_trampoline; + tcb->xcp.regs[REG_PC] = (uint32_t)_xtensa_sig_trampoline; #ifdef __XTENSA_CALL0_ABI__ - tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); + tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); #else - tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE); + tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE); #endif } } leave_critical_section(flags); } +#endif /* !CONFIG_SMP */ + +#ifdef CONFIG_SMP +void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) +{ + irqstate_t flags; + int cpu; + int me; + + sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); + + /* Make sure that interrupts are disabled */ + + flags = enter_critical_section(); + + /* Refuse to handle nested signal actions */ + + if (!tcb->xcp.sigdeliver) + { + /* First, handle some special cases when the signal is being delivered + * to task that is currently executing on any CPU. + */ + + sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS); + + me = this_cpu(); + cpu = tcb->cpu; + + if (tcb->task_state == TSTATE_TASK_RUNNING) + { + /* CASE 1: We are not in an interrupt handler and a task is + * signalling itself for some reason. + */ + + if (cpu == me && !CURRENT_REGS) + { + /* In this case just deliver the signal now. */ + + sigdeliver(tcb); + } + + /* CASE 2: The task that needs to receive the signal is running. + * This could happen if the task is running on another CPU OR if + * we are in an interrupt handler and the task is running on this + * CPU. In the former case, we will have to PAUSE the other CPU + * first. But in either case, we will have to modify the return + * state as well as the state in the TCB. + * + * Hmmm... there looks like a latent bug here: The following logic + * would fail in the strange case where we are in an interrupt + * handler, the thread is signalling itself, but a context switch + * to another task has occurred so that CURRENT_REGS does not + * refer to the thread of this_task()! + */ + + else + { + /* If we signalling a task running on the other CPU, we have + * to PAUSE the other CPU. + */ + + if (cpu != me) + { + up_cpu_pause(cpu); + } + + /* Save the return pc and ps. These will be restored by the + * signal trampoline after the signals have been delivered. + * + * NOTE: that hi-priority interrupts are not disabled. + */ + + tcb->xcp.sigdeliver = sigdeliver; + tcb->xcp.saved_pc = CURRENT_REGS[REG_PC]; + tcb->xcp.saved_ps = CURRENT_REGS[REG_PS]; + + /* Increment the IRQ lock count so that when the task is restarted, + * it will hold the IRQ spinlock. + */ + + DEBUGASSERT(tcb->irqcount < INT16_MAX); + tcb->irqcount++; + + /* Handle a possible race condition where the TCB was suspended + * just before we paused the other CPU. + */ + + if (tcb->task_state != TSTATE_TASK_RUNNING) + { + tcb->xcp.regs[REG_PC] = (uint32_t)_xtensa_sig_trampoline; +#ifdef __XTENSA_CALL0_ABI__ + tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); +#else + tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE); +#endif + } + else + { + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + CURRENT_REGS[REG_PC] = (uint32_t)_xtensa_sig_trampoline; +#ifdef __XTENSA_CALL0_ABI__ + CURRENT_REGS[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); +#else + CURRENT_REGS[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE); +#endif + /* In an SMP configuration, the interrupt disable logic also + * involves spinlocks that are configured per the TCB irqcount + * field. This is logically equivalent to enter_critical_section(). + * The matching call to leave_critical_section() will be + * performed in up_sigdeliver(). + */ + + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); + + /* And make sure that the saved context in the TCB is the same + * as the interrupt return context. + */ + + xtensa_savestate(tcb->xcp.regs); + } + + /* RESUME the other CPU if it was PAUSED */ + + if (cpu != me) + { + up_cpu_pause(cpu); + } + } + } + + /* Otherwise, we are (1) signaling a task is not running from an + * interrupt handler or (2) we are not in an interrupt handler and the + * running task is signalling some other non-running task. + */ + + else + { + /* Save the return pc and ps. These will be restored by the + * signal trampoline after the signals have been delivered. + * + * NOTE: that hi-priority interrupts are not disabled. + */ + + tcb->xcp.sigdeliver = sigdeliver; + tcb->xcp.saved_pc = CURRENT_REGS[REG_PC]; + tcb->xcp.saved_ps = CURRENT_REGS[REG_PS]; + + /* Increment the IRQ lock count so that when the task is restarted, + * it will hold the IRQ spinlock. + */ + + DEBUGASSERT(tcb->irqcount < INT16_MAX); + tcb->irqcount++; + + /* Then set up to vector to the trampoline with interrupts + * disabled + */ + + tcb->xcp.regs[REG_PC] = (uint32_t)_xtensa_sig_trampoline; +#ifdef __XTENSA_CALL0_ABI__ + tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM); +#else + tcb->xcp.regs[REG_PS] = (uint32_t)(PS_INTLEVEL(XCHAL_EXCM_LEVEL) | PS_UM | PS_WOE); +#endif + } + } + + leave_critical_section(flags); +} +#endif /* CONFIG_SMP */ #endif /* !CONFIG_DISABLE_SIGNALS */