2013-07-19 19:43:04 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* arch/arm/src/armv7-a/arm_schedulesigaction.c
|
|
|
|
*
|
2019-03-19 18:10:41 +01:00
|
|
|
* Copyright (C) 2013, 2015-2019 Gregory Nutt. All rights reserved.
|
2013-07-19 19:43:04 +02:00
|
|
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name NuttX nor the names of its contributors may be
|
|
|
|
* used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <sched.h>
|
|
|
|
#include <debug.h>
|
|
|
|
|
2016-02-14 02:11:09 +01:00
|
|
|
#include <nuttx/irq.h>
|
2013-07-19 19:43:04 +02:00
|
|
|
#include <nuttx/arch.h>
|
|
|
|
|
|
|
|
#include "arm.h"
|
2014-08-09 01:53:55 +02:00
|
|
|
#include "sched/sched.h"
|
2020-05-01 03:20:29 +02:00
|
|
|
#include "arm_internal.h"
|
|
|
|
#include "arm_arch.h"
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2017-01-14 15:28:37 +01:00
|
|
|
#include "irq/irq.h"
|
|
|
|
|
2013-07-19 19:43:04 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: up_schedule_sigaction
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function is called by the OS when one or more
|
|
|
|
* signal handling actions have been queued for execution.
|
|
|
|
* The architecture specific code must configure things so
|
2014-09-04 18:28:38 +02:00
|
|
|
* that the 'sigdeliver' callback is executed on the thread
|
2013-07-19 19:43:04 +02:00
|
|
|
* specified by 'tcb' as soon as possible.
|
|
|
|
*
|
|
|
|
* This function may be called from interrupt handling logic.
|
|
|
|
*
|
|
|
|
* This operation should not cause the task to be unblocked
|
|
|
|
* nor should it cause any immediate execution of sigdeliver.
|
|
|
|
* Typically, a few cases need to be considered:
|
|
|
|
*
|
|
|
|
* (1) This function may be called from an interrupt handler
|
|
|
|
* During interrupt processing, all xcptcontext structures
|
|
|
|
* should be valid for all tasks. That structure should
|
|
|
|
* be modified to invoke sigdeliver() either on return
|
|
|
|
* from (this) interrupt or on some subsequent context
|
|
|
|
* switch to the recipient task.
|
|
|
|
* (2) If not in an interrupt handler and the tcb is NOT
|
|
|
|
* the currently executing task, then again just modify
|
|
|
|
* the saved xcptcontext structure for the recipient
|
|
|
|
* task so it will invoke sigdeliver when that task is
|
|
|
|
* later resumed.
|
|
|
|
* (3) If not in an interrupt handler and the tcb IS the
|
|
|
|
* currently executing task -- just call the signal
|
|
|
|
* handler now.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2017-01-14 15:28:37 +01:00
|
|
|
#ifndef CONFIG_SMP
|
2013-07-19 19:43:04 +02:00
|
|
|
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
|
|
|
{
|
2015-09-30 19:21:04 +02:00
|
|
|
irqstate_t flags;
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2016-06-18 00:44:50 +02:00
|
|
|
sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2015-09-30 19:21:04 +02:00
|
|
|
/* Make sure that interrupts are disabled */
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2016-02-14 02:11:09 +01:00
|
|
|
flags = enter_critical_section();
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2015-09-30 19:21:04 +02:00
|
|
|
/* Refuse to handle nested signal actions */
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2015-09-30 19:21:04 +02:00
|
|
|
if (!tcb->xcp.sigdeliver)
|
|
|
|
{
|
2014-06-21 17:55:09 +02:00
|
|
|
/* First, handle some special cases when the signal is being delivered
|
2017-01-14 15:28:37 +01:00
|
|
|
* to task that is currently executing on this CPU.
|
2013-07-19 19:43:04 +02:00
|
|
|
*/
|
|
|
|
|
2016-06-18 00:44:50 +02:00
|
|
|
sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS);
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2016-02-06 20:41:28 +01:00
|
|
|
if (tcb == this_task())
|
2013-07-19 19:43:04 +02:00
|
|
|
{
|
2014-06-21 17:55:09 +02:00
|
|
|
/* CASE 1: We are not in an interrupt handler and a task is
|
2018-02-14 15:36:34 +01:00
|
|
|
* signaling itself for some reason.
|
2013-07-19 19:43:04 +02:00
|
|
|
*/
|
|
|
|
|
2016-03-09 20:41:48 +01:00
|
|
|
if (!CURRENT_REGS)
|
2013-07-19 19:43:04 +02:00
|
|
|
{
|
2018-06-07 01:03:42 +02:00
|
|
|
/* In this case just deliver the signal now.
|
|
|
|
* REVISIT: Signal handler will run in a critical section!
|
|
|
|
*/
|
2013-07-19 19:43:04 +02:00
|
|
|
|
|
|
|
sigdeliver(tcb);
|
|
|
|
}
|
|
|
|
|
2014-06-21 17:55:09 +02:00
|
|
|
/* CASE 2: We are in an interrupt handler AND the interrupted
|
|
|
|
* task is the same as the one that must receive the signal, then
|
|
|
|
* we will have to modify the return state as well as the state
|
|
|
|
* in the TCB.
|
2013-07-19 19:43:04 +02:00
|
|
|
*
|
2014-06-21 17:55:09 +02:00
|
|
|
* Hmmm... there looks like a latent bug here: The following logic
|
|
|
|
* would fail in the strange case where we are in an interrupt
|
2018-02-14 15:36:34 +01:00
|
|
|
* handler, the thread is signaling itself, but a context switch
|
2016-03-09 20:41:48 +01:00
|
|
|
* to another task has occurred so that CURRENT_REGS does not
|
2016-02-06 20:41:28 +01:00
|
|
|
* refer to the thread of this_task()!
|
2013-07-19 19:43:04 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Save the return lr and cpsr and one scratch register
|
|
|
|
* These will be restored by the signal trampoline after
|
|
|
|
* the signals have been delivered.
|
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.sigdeliver = sigdeliver;
|
|
|
|
tcb->xcp.saved_pc = CURRENT_REGS[REG_PC];
|
|
|
|
tcb->xcp.saved_cpsr = CURRENT_REGS[REG_CPSR];
|
2013-07-19 19:43:04 +02:00
|
|
|
|
|
|
|
/* Then set up to vector to the trampoline with interrupts
|
|
|
|
* disabled
|
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver;
|
|
|
|
CURRENT_REGS[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
CURRENT_REGS[REG_CPSR] |= PSR_T_BIT;
|
|
|
|
#endif
|
2013-07-19 19:43:04 +02:00
|
|
|
|
2014-06-21 17:55:09 +02:00
|
|
|
/* And make sure that the saved context in the TCB is the same
|
|
|
|
* as the interrupt return context.
|
2013-07-19 19:43:04 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
up_savestate(tcb->xcp.regs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-21 17:55:09 +02:00
|
|
|
/* Otherwise, we are (1) signaling a task is not running from an
|
|
|
|
* interrupt handler or (2) we are not in an interrupt handler and the
|
2018-02-14 15:36:34 +01:00
|
|
|
* running task is signaling some other non-running task.
|
2013-07-19 19:43:04 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
2014-06-21 17:55:09 +02:00
|
|
|
/* Save the return lr and cpsr and one scratch register. These
|
|
|
|
* will be restored by the signal trampoline after the signals
|
|
|
|
* have been delivered.
|
2013-07-19 19:43:04 +02:00
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.sigdeliver = sigdeliver;
|
|
|
|
tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC];
|
|
|
|
tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR];
|
2013-07-19 19:43:04 +02:00
|
|
|
|
|
|
|
/* Then set up to vector to the trampoline with interrupts
|
|
|
|
* disabled
|
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver;
|
|
|
|
tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT;
|
|
|
|
#endif
|
2017-01-14 15:28:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
leave_critical_section(flags);
|
|
|
|
}
|
|
|
|
#endif /* !CONFIG_SMP */
|
2016-11-24 18:45:05 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
2017-01-14 15:28:37 +01:00
|
|
|
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
|
|
|
{
|
|
|
|
irqstate_t flags;
|
|
|
|
int cpu;
|
|
|
|
int me;
|
|
|
|
|
|
|
|
sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);
|
|
|
|
|
|
|
|
/* Make sure that interrupts are disabled */
|
|
|
|
|
|
|
|
flags = enter_critical_section();
|
|
|
|
|
|
|
|
/* Refuse to handle nested signal actions */
|
|
|
|
|
|
|
|
if (!tcb->xcp.sigdeliver)
|
|
|
|
{
|
|
|
|
/* First, handle some special cases when the signal is being delivered
|
|
|
|
* to task that is currently executing on any CPU.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS);
|
|
|
|
|
|
|
|
if (tcb->task_state == TSTATE_TASK_RUNNING)
|
|
|
|
{
|
2017-01-14 16:22:13 +01:00
|
|
|
me = this_cpu();
|
|
|
|
cpu = tcb->cpu;
|
|
|
|
|
2017-01-14 15:28:37 +01:00
|
|
|
/* CASE 1: We are not in an interrupt handler and a task is
|
2018-02-14 15:36:34 +01:00
|
|
|
* signaling itself for some reason.
|
2017-01-14 15:28:37 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (cpu == me && !CURRENT_REGS)
|
|
|
|
{
|
2018-06-07 01:03:42 +02:00
|
|
|
/* In this case just deliver the signal now.
|
|
|
|
* REVISIT: Signal handler will run in a critical section!
|
|
|
|
*/
|
2017-01-14 15:28:37 +01:00
|
|
|
|
|
|
|
sigdeliver(tcb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CASE 2: The task that needs to receive the signal is running.
|
|
|
|
* This could happen if the task is running on another CPU OR if
|
|
|
|
* we are in an interrupt handler and the task is running on this
|
|
|
|
* CPU. In the former case, we will have to PAUSE the other CPU
|
|
|
|
* first. But in either case, we will have to modify the return
|
|
|
|
* state as well as the state in the TCB.
|
|
|
|
*/
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
2018-02-14 15:36:34 +01:00
|
|
|
/* If we signaling a task running on the other CPU, we have
|
2017-01-14 15:28:37 +01:00
|
|
|
* to PAUSE the other CPU.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (cpu != me)
|
|
|
|
{
|
2018-02-14 15:36:34 +01:00
|
|
|
/* Pause the CPU */
|
|
|
|
|
2017-01-14 15:28:37 +01:00
|
|
|
up_cpu_pause(cpu);
|
|
|
|
|
2018-02-14 15:36:34 +01:00
|
|
|
/* Wait while the pause request is pending */
|
2017-01-14 15:28:37 +01:00
|
|
|
|
2018-02-14 15:36:34 +01:00
|
|
|
while (up_cpu_pausereq(cpu))
|
|
|
|
{
|
|
|
|
}
|
2017-01-14 15:28:37 +01:00
|
|
|
|
2018-02-14 15:36:34 +01:00
|
|
|
/* Now tcb on the other CPU can be accessed safely */
|
2017-01-14 15:28:37 +01:00
|
|
|
|
2018-02-14 15:36:34 +01:00
|
|
|
/* Copy tcb->xcp.regs to tcp.xcp.saved. These will be restored
|
|
|
|
* by the signal trampoline after the signal has been delivered.
|
|
|
|
*/
|
2017-01-14 15:28:37 +01:00
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.sigdeliver = sigdeliver;
|
|
|
|
tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC];
|
|
|
|
tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR];
|
2017-01-14 15:28:37 +01:00
|
|
|
|
|
|
|
/* Then set up to vector to the trampoline with interrupts
|
|
|
|
* disabled
|
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver;
|
|
|
|
tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT;
|
|
|
|
#endif
|
2017-01-14 15:28:37 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-02-14 15:36:34 +01:00
|
|
|
/* tcb is running on the same CPU */
|
|
|
|
|
|
|
|
/* Save the return PC, CPSR and either the BASEPRI or PRIMASK
|
|
|
|
* registers (and perhaps also the LR). These will be
|
|
|
|
* restored by the signal trampoline after the signal has been
|
|
|
|
* delivered.
|
2017-01-14 15:28:37 +01:00
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.sigdeliver = (FAR void *)sigdeliver;
|
|
|
|
tcb->xcp.saved_pc = CURRENT_REGS[REG_PC];
|
|
|
|
tcb->xcp.saved_cpsr = CURRENT_REGS[REG_CPSR];
|
2017-01-14 15:28:37 +01:00
|
|
|
|
2018-02-14 15:36:34 +01:00
|
|
|
/* Then set up vector to the trampoline with interrupts
|
|
|
|
* disabled. The kernel-space trampoline must run in
|
|
|
|
* privileged thread mode.
|
2017-01-14 15:28:37 +01:00
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver;
|
|
|
|
CURRENT_REGS[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
CURRENT_REGS[REG_CPSR] |= PSR_T_BIT;
|
|
|
|
#endif
|
2017-01-14 15:28:37 +01:00
|
|
|
|
|
|
|
/* And make sure that the saved context in the TCB is the same
|
|
|
|
* as the interrupt return context.
|
|
|
|
*/
|
|
|
|
|
|
|
|
up_savestate(tcb->xcp.regs);
|
|
|
|
}
|
|
|
|
|
2018-02-14 15:36:34 +01:00
|
|
|
/* Increment the IRQ lock count so that when the task is restarted,
|
|
|
|
* it will hold the IRQ spinlock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(tcb->irqcount < INT16_MAX);
|
|
|
|
tcb->irqcount++;
|
|
|
|
|
|
|
|
/* In an SMP configuration, the interrupt disable logic also
|
|
|
|
* involves spinlocks that are configured per the TCB irqcount
|
|
|
|
* field. This is logically equivalent to enter_critical_section().
|
|
|
|
* The matching call to leave_critical_section() will be
|
|
|
|
* performed in up_sigdeliver().
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
|
|
|
&g_cpu_irqlock);
|
|
|
|
|
2017-01-14 15:28:37 +01:00
|
|
|
/* RESUME the other CPU if it was PAUSED */
|
|
|
|
|
|
|
|
if (cpu != me)
|
|
|
|
{
|
2017-01-16 15:48:05 +01:00
|
|
|
up_cpu_resume(cpu);
|
2017-01-14 15:28:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Otherwise, we are (1) signaling a task is not running from an
|
|
|
|
* interrupt handler or (2) we are not in an interrupt handler and the
|
2018-02-14 15:36:34 +01:00
|
|
|
* running task is signaling some other non-running task.
|
2017-01-14 15:28:37 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Save the return lr and cpsr and one scratch register. These
|
|
|
|
* will be restored by the signal trampoline after the signals
|
|
|
|
* have been delivered.
|
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.sigdeliver = sigdeliver;
|
|
|
|
tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC];
|
|
|
|
tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR];
|
2017-01-14 15:28:37 +01:00
|
|
|
|
|
|
|
/* Increment the IRQ lock count so that when the task is restarted,
|
|
|
|
* it will hold the IRQ spinlock.
|
2016-11-24 18:45:05 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(tcb->irqcount < INT16_MAX);
|
|
|
|
tcb->irqcount++;
|
2017-01-14 15:28:37 +01:00
|
|
|
|
|
|
|
/* Then set up to vector to the trampoline with interrupts
|
|
|
|
* disabled
|
|
|
|
*/
|
|
|
|
|
2019-03-19 18:10:41 +01:00
|
|
|
tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver;
|
|
|
|
tcb->xcp.regs[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
|
|
|
|
#ifdef CONFIG_ARM_THUMB
|
|
|
|
tcb->xcp.regs[REG_CPSR] |= PSR_T_BIT;
|
|
|
|
#endif
|
2013-07-19 19:43:04 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-30 19:21:04 +02:00
|
|
|
|
2016-02-14 02:11:09 +01:00
|
|
|
leave_critical_section(flags);
|
2013-07-19 19:43:04 +02:00
|
|
|
}
|
2017-01-14 15:28:37 +01:00
|
|
|
#endif /* CONFIG_SMP */
|