arch/arm/src/armv7-a: Port the SMP change by Masayuki Ishikawa to the ARMv7-A family.
This commit is contained in:
parent
d7fae340c1
commit
3ddea73dc1
@ -1,7 +1,7 @@
|
||||
/****************************************************************************
|
||||
* arch/arm/src/armv7-a/arm_schedulesigaction.c
|
||||
*
|
||||
* Copyright (C) 2013, 2015-2017 Gregory Nutt. All rights reserved.
|
||||
* Copyright (C) 2013, 2015-2018 Gregory Nutt. All rights reserved.
|
||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -116,7 +116,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
if (tcb == this_task())
|
||||
{
|
||||
/* CASE 1: We are not in an interrupt handler and a task is
|
||||
* signalling itself for some reason.
|
||||
* signaling itself for some reason.
|
||||
*/
|
||||
|
||||
if (!CURRENT_REGS)
|
||||
@ -133,7 +133,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
*
|
||||
* Hmmm... there looks like a latent bug here: The following logic
|
||||
* would fail in the strange case where we are in an interrupt
|
||||
* handler, the thread is signalling itself, but a context switch
|
||||
* handler, the thread is signaling itself, but a context switch
|
||||
* to another task has occurred so that CURRENT_REGS does not
|
||||
* refer to the thread of this_task()!
|
||||
*/
|
||||
@ -166,7 +166,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
|
||||
/* Otherwise, we are (1) signaling a task is not running from an
|
||||
* interrupt handler or (2) we are not in an interrupt handler and the
|
||||
* running task is signalling some other non-running task.
|
||||
* running task is signaling some other non-running task.
|
||||
*/
|
||||
|
||||
else
|
||||
@ -222,7 +222,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
cpu = tcb->cpu;
|
||||
|
||||
/* CASE 1: We are not in an interrupt handler and a task is
|
||||
* signalling itself for some reason.
|
||||
* signaling itself for some reason.
|
||||
*/
|
||||
|
||||
if (cpu == me && !CURRENT_REGS)
|
||||
@ -238,51 +238,36 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
* CPU. In the former case, we will have to PAUSE the other CPU
|
||||
* first. But in either case, we will have to modify the return
|
||||
* state as well as the state in the TCB.
|
||||
*
|
||||
* Hmmm... there looks like a latent bug here: The following logic
|
||||
* would fail in the strange case where we are in an interrupt
|
||||
* handler, the thread is signalling itself, but a context switch
|
||||
* to another task has occurred so that CURRENT_REGS does not
|
||||
* refer to the thread of this_task()!
|
||||
*/
|
||||
|
||||
else
|
||||
{
|
||||
/* If we signalling a task running on the other CPU, we have
|
||||
/* If we signaling a task running on the other CPU, we have
|
||||
* to PAUSE the other CPU.
|
||||
*/
|
||||
|
||||
if (cpu != me)
|
||||
{
|
||||
/* Pause the CPU */
|
||||
|
||||
up_cpu_pause(cpu);
|
||||
}
|
||||
|
||||
/* Save the return lr and cpsr and one scratch register
|
||||
* These will be restored by the signal trampoline after
|
||||
* the signals have been delivered.
|
||||
*/
|
||||
/* Wait while the pause request is pending */
|
||||
|
||||
tcb->xcp.sigdeliver = sigdeliver;
|
||||
tcb->xcp.saved_pc = CURRENT_REGS[REG_PC];
|
||||
tcb->xcp.saved_cpsr = CURRENT_REGS[REG_CPSR];
|
||||
while (up_cpu_pausereq(cpu))
|
||||
{
|
||||
}
|
||||
|
||||
/* Increment the IRQ lock count so that when the task is restarted,
|
||||
* it will hold the IRQ spinlock.
|
||||
*/
|
||||
/* Now tcb on the other CPU can be accessed safely */
|
||||
|
||||
DEBUGASSERT(tcb->irqcount < INT16_MAX);
|
||||
tcb->irqcount++;
|
||||
/* Copy tcb->xcp.regs to tcp.xcp.saved. These will be restored
|
||||
* by the signal trampoline after the signal has been delivered.
|
||||
*/
|
||||
|
||||
/* Handle a possible race condition where the TCB was suspended
|
||||
* just before we paused the other CPU. The critical section
|
||||
* established above will prevent new threads from running on
|
||||
* that CPU, but it will not guarantee that the running thread
|
||||
* did not suspend itself (allowing any threads "assigned" to
|
||||
* the CPU to run).
|
||||
*/
|
||||
tcb->xcp.sigdeliver = sigdeliver;
|
||||
tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC];
|
||||
tcb->xcp.saved_cpsr = tcb->xcp.regs[REG_CPSR];
|
||||
|
||||
if (tcb->task_state != TSTATE_TASK_RUNNING)
|
||||
{
|
||||
/* Then set up to vector to the trampoline with interrupts
|
||||
* disabled
|
||||
*/
|
||||
@ -292,23 +277,26 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Then set up to vector to the trampoline with interrupts
|
||||
* disabled
|
||||
/* tcb is running on the same CPU */
|
||||
|
||||
/* Save the return PC, CPSR and either the BASEPRI or PRIMASK
|
||||
* registers (and perhaps also the LR). These will be
|
||||
* restored by the signal trampoline after the signal has been
|
||||
* delivered.
|
||||
*/
|
||||
|
||||
tcb->xcp.sigdeliver = (FAR void *)sigdeliver;
|
||||
tcb->xcp.saved_pc = CURRENT_REGS[REG_PC];
|
||||
tcb->xcp.saved_cpsr = CURRENT_REGS[REG_CPSR];
|
||||
|
||||
/* Then set up vector to the trampoline with interrupts
|
||||
* disabled. The kernel-space trampoline must run in
|
||||
* privileged thread mode.
|
||||
*/
|
||||
|
||||
CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver;
|
||||
CURRENT_REGS[REG_CPSR] = (PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT);
|
||||
|
||||
/* In an SMP configuration, the interrupt disable logic also
|
||||
* involves spinlocks that are configured per the TCB irqcount
|
||||
* field. This is logically equivalent to enter_critical_section().
|
||||
* The matching call to leave_critical_section() will be
|
||||
* performed in up_sigdeliver().
|
||||
*/
|
||||
|
||||
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||
&g_cpu_irqlock);
|
||||
|
||||
/* And make sure that the saved context in the TCB is the same
|
||||
* as the interrupt return context.
|
||||
*/
|
||||
@ -316,6 +304,23 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
up_savestate(tcb->xcp.regs);
|
||||
}
|
||||
|
||||
/* Increment the IRQ lock count so that when the task is restarted,
|
||||
* it will hold the IRQ spinlock.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(tcb->irqcount < INT16_MAX);
|
||||
tcb->irqcount++;
|
||||
|
||||
/* In an SMP configuration, the interrupt disable logic also
|
||||
* involves spinlocks that are configured per the TCB irqcount
|
||||
* field. This is logically equivalent to enter_critical_section().
|
||||
* The matching call to leave_critical_section() will be
|
||||
* performed in up_sigdeliver().
|
||||
*/
|
||||
|
||||
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||
&g_cpu_irqlock);
|
||||
|
||||
/* RESUME the other CPU if it was PAUSED */
|
||||
|
||||
if (cpu != me)
|
||||
@ -327,7 +332,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
|
||||
|
||||
/* Otherwise, we are (1) signaling a task is not running from an
|
||||
* interrupt handler or (2) we are not in an interrupt handler and the
|
||||
* running task is signalling some other non-running task.
|
||||
* running task is signaling some other non-running task.
|
||||
*/
|
||||
|
||||
else
|
||||
|
@ -413,7 +413,7 @@ int nxsig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info)
|
||||
* Description:
|
||||
* This is the front-end for nxsig_tcbdispatch that should be typically
|
||||
* be used to dispatch a signal. If HAVE_GROUP_MEMBERS is defined,
|
||||
* then function will follow the group signal delivery algorthrims:
|
||||
* then function will follow the group signal delivery algorithms:
|
||||
*
|
||||
* This front-end does the following things before calling
|
||||
* nxsig_tcbdispatch.
|
||||
|
Loading…
Reference in New Issue
Block a user