SMP: Back out deferred IRQ locking. This was accidentally merged into master and it looks like it is going to be more work than I thought to get it working again. Changes will go to the irqlock branch.

This commit is contained in:
Gregory Nutt 2016-12-24 19:53:37 -06:00
parent 3af6b2a9a6
commit efb86382c3
16 changed files with 39 additions and 409 deletions

View File

@ -51,7 +51,6 @@
#include "up_internal.h"
#include "group/group.h"
#include "irq/irq.h"
#include "gic.h"
/****************************************************************************
@ -121,17 +120,6 @@ static inline uint32_t *_arm_doirq(int irq, uint32_t *regs)
(void)group_addrenv(NULL);
#endif
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
}
#endif

View File

@ -77,7 +77,6 @@ up_fullcontextrestore:
*/
#ifdef CONFIG_ARCH_FPU
/* First, restore the floating point registers. Lets do this before we
* restore the ARM registers so that we have plenty of registers to
* work with.
@ -97,11 +96,9 @@ up_fullcontextrestore:
ldr r2, [r1], #4 /* Fetch the floating point control and status register */
vmsr fpscr, r2 /* Restore the FPCSR */
#endif
#ifdef CONFIG_BUILD_KERNEL
/* For the kernel build, we need to be able to transition gracefully
* between kernel- and user-mode tasks. Here we do that with a system
* call; the system call will execute in kernel mode and but can return
@ -119,74 +116,10 @@ up_fullcontextrestore:
bx lr /* Unnecessary ... will not return */
#else
/* For a flat build, we can do all of this here... Just think of this as
* a longjmp() all on steriods.
*/
#ifdef CONFIG_SMP
/* Recover all registers except for the volatile registers {r0-r3, r12}
* and r14 (lr).
*/
add r1, r0, #(4*REG_R4) /* Offset to REG_R2 storage */
ldmia r1, {r4-r11} /* Recover registers */
/* Recover the stack pointer (r13) */
ldr sp, [r0, #(4*REG_SP)] /* Recover the stack pointer */
/* Create a stack from to preserve the structure pointer and some
* additional registers. We should have everything preserved on the
* in registers on on the stack when irq_restore_lock(0) is called (I am
* not sure that is necessary, but I have concerns about the save
* structure getting modified in the TCB if the spinlock is released --
* assuming that it is set???).
*/
sub sp, sp, #(4*8) /* Frame for eight registers */
ldr r1, [r0, #(4*REG_R2)] /* Fetch the stored r2 value */
str r1, [sp, #(4*0)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R3)] /* Fetch the stored r3 value */
str r1, [sp, #(4*1)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R12)] /* Fetch the stored r12 value */
str r1, [sp, #(4*2)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R14)] /* Fetch the stored r14 value */
str r1, [sp, #(4*3)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
str r1, [sp, #(4*4)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp, #(4*5)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
str r1, [sp, #(4*6)] /* Save it in the stack */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
str r1, [sp, #(4*7)] /* Save it at the bottom of the frame */
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
bl irq_restore_lock
/* Recover the structure pointer and most of the volatile structures
* that were saved on the stack.
*/
ldr r2, [sp, #(4*0)] /* Recover R2 */
ldr r3, [sp, #(4*1)] /* Recover R3 */
ldr r12, [sp, #(4*2)] /* Recover R12 */
ldr r14, [sp, #(4*3)] /* Recover R14 */
ldr r1, [sp, #(4*4)] /* Recover the save CPSR in r1 */
add sp, sp, #(4*5) /* Discard 5 of the allocated 8 storage locations */
#else
/* Recover all registers except for r0, r1, R15, and CPSR */
add r1, r0, #(4*REG_R2) /* Offset to REG_R2 storage */
@ -196,16 +129,11 @@ up_fullcontextrestore:
sub sp, sp, #(3*4) /* Frame for three registers */
ldr r1, [r0, #(4*REG_R0)] /* Fetch the stored r0 value */
str r1, [sp, #(4*0)] /* Save it at the top of the stack */
str r1, [sp] /* Save it at the top of the stack */
ldr r1, [r0, #(4*REG_R1)] /* Fetch the stored r1 value */
str r1, [sp, #(4*1)] /* Save it in the stack */
str r1, [sp, #4] /* Save it in the stack */
ldr r1, [r0, #(4*REG_PC)] /* Fetch the stored pc value */
str r1, [sp, #(4*2)] /* Save it at the bottom of the frame */
/* Recover the saved CPSR value in r1 */
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
#endif
str r1, [sp, #8] /* Save it at the bottom of the frame */
/* Now we can restore the CPSR. We wait until we are completely
* finished with the context save data to do this. Restore the CPSR
@ -214,13 +142,14 @@ up_fullcontextrestore:
* disabled.
*/
ldr r1, [r0, #(4*REG_CPSR)] /* Fetch the stored CPSR value */
msr cpsr, r1 /* Set the CPSR */
/* Now recover r0 and r1 */
ldr r0, [sp]
ldr r1, [sp, #4]
add sp, sp, #(4*2)
add sp, sp, #(2*4)
/* Then return to the address at the stop of the stack,
* destroying the stack frame

View File

@ -50,8 +50,6 @@
#include "up_arch.h"
#include "up_internal.h"
#include "irq/irq.h"
/****************************************************************************
* Public Functions
****************************************************************************/
@ -93,35 +91,15 @@ uint32_t *up_doirq(int irq, uint32_t *regs)
* switch occurred during interrupt processing.
*/
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a "voting"
* algorithm with current task on each CPU casting its "vote" by the
* state of the TCB irqcount flag. That irqcount for the current task
* on this CPU will be different is a context switch occurrred.
*/
if (regs != (uint32_t *)CURRENT_REGS)
{
/* A context switch has occurred, time for the current task on this
* CPU to cast its vote.
*/
irq_restore_lock();
}
#endif
/* Return the current state of CURRENT_REGS */
regs = (uint32_t *)CURRENT_REGS;
/* Restore the previous value of CURRENT_REGS. NULL would indicate that
* we are no longer in an interrupt handler. It will be non-NULL if we
* are returning from a nested interrupt (which are NOT fully supported).
* are returning from a nested interrupt.
*/
CURRENT_REGS = savestate;
#endif
board_autoled_off(LED_INIRQ);
return regs;
}

View File

@ -1,7 +1,7 @@
/****************************************************************************
* arch/sim/src/up_blocktask.c
*
* Copyright (C) 2007-2009, 2013, 2015-2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2007-2009, 2013, 2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -47,7 +47,6 @@
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -146,16 +145,6 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state)
sched_resume_scheduler(rtcb);
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
/* Then switch contexts */
up_longjmp(rtcb->xcp.regs, 1);

View File

@ -1,7 +1,7 @@
/****************************************************************************
* arch/sim/src/up_exit.c
*
* Copyright (C) 2007-2009, 2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2007-2009 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -46,7 +46,6 @@
#include "task/task.h"
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -93,17 +92,6 @@ void _exit(int status)
tcb->xcp.sigdeliver = NULL;
}
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
/* Then switch contexts */
up_longjmp(tcb->xcp.regs, 1);

View File

@ -47,7 +47,6 @@
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -129,16 +128,6 @@ void up_release_pending(void)
sched_resume_scheduler(rtcb);
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
/* Then switch contexts */
up_longjmp(rtcb->xcp.regs, 1);

View File

@ -1,7 +1,7 @@
/****************************************************************************
* arch/sim/src/up_reprioritizertr.c
*
* Copyright (C) 2007-2009, 2013, 2015-2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2007-2009, 2013, 2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -48,7 +48,6 @@
#include <nuttx/sched.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -167,16 +166,6 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
sched_resume_scheduler(rtcb);
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
/* Then switch contexts */
up_longjmp(rtcb->xcp.regs, 1);

View File

@ -43,7 +43,6 @@
#include <nuttx/spinlock.h>
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
#ifdef CONFIG_SMP
@ -151,16 +150,6 @@ int up_cpu_paused(int cpu)
sched_resume_scheduler(rtcb);
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
/* Then switch contexts */
up_longjmp(rtcb->xcp.regs, 1);

View File

@ -46,7 +46,6 @@
#include "clock/clock.h"
#include "sched/sched.h"
#include "irq/irq.h"
#include "up_internal.h"
/****************************************************************************
@ -126,16 +125,6 @@ void up_unblock_task(FAR struct tcb_s *tcb)
sched_resume_scheduler(rtcb);
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
/* Then switch contexts */
up_longjmp(rtcb->xcp.regs, 1);

View File

@ -529,28 +529,6 @@ _xtensa_context_restore:
xtensa_context_restore:
ENTRY(16)
#ifdef CONFIG_SMP
/* Since this function does not return, it is only necessary preserve the
* processor state state are pointer across the following C call.
*/
s32i a2, sp, LOCAL_OFFSET(1)
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*
* REVISIT: This should be the very last thing that is done before the
* 'rfe'. Ideally, you will like to have all of the registers restored
* (or protected on the stack) when the IRQ lock is unlocked.
*/
call4 irq_restore_lock
l32i a2, sp, LOCAL_OFFSET(1)
#endif
#ifndef __XTENSA_CALL0_ABI__
/* Force a spill of the live registers of the thread that has been
* suspended.

View File

@ -52,7 +52,6 @@
#include "group/group.h"
#include "sched/sched.h"
#include "irq/irq.h"
/****************************************************************************
* Public Functions
@ -130,17 +129,6 @@ uint32_t *xtensa_irq_dispatch(int irq, uint32_t *regs)
(void)group_addrenv(NULL);
#endif
#ifdef CONFIG_SMP
/* In the SMP configuration, critical section management uses a
* "voting" algorithm with current task on each CPU casting its
* "vote" by the state of the TCB irqcount flag. That irqcount
* for the current task on this CPU will be different is a
* context switch occurrred.
*/
irq_restore_lock();
#endif
}
#endif

View File

@ -36,7 +36,7 @@
CSRCS += irq_initialize.c irq_attach.c irq_dispatch.c irq_unexpectedisr.c
ifeq ($(CONFIG_SMP),y)
CSRCS += irq_csection.c irq_restorelock.c
CSRCS += irq_csection.c
else ifeq ($(CONFIG_SCHED_INSTRUMENTATION_CSECTION),y)
CSRCS += irq_csection.c
endif

View File

@ -106,54 +106,6 @@ void weak_function irq_initialize(void);
int irq_unexpected_isr(int irq, FAR void *context);
/****************************************************************************
* Name: irq_restore_cpulock
*
* Description:
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on another CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states.
*
* Input Parameters:
* cpu - The CPU on which the task was started
* rtcb - The TCB of the task that was started
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void irq_restore_cpulock(int cpu, FAR struct tcb_s *rtcb);
#endif
/****************************************************************************
* Name: irq_restore_lock
*
* Description:
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on the current CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states. However, the actual realization of that change cannot occur
* until all context switching operations have completed. This function
* implements the deferred setting of g_cpu_irqlock.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
* g_cpu_irqlock is set upon entry. It may or may not be set upon return.
*
****************************************************************************/
#ifdef CONFIG_SMP
void irq_restore_lock(void);
#endif
#undef EXTERN
#ifdef __cplusplus
}

View File

@ -1,120 +0,0 @@
/****************************************************************************
* sched/irq/irq_restorelock.c
*
* Copyright (C) 2016 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include "sched/sched.h"
#include "irq/irq.h"
#ifdef CONFIG_SMP
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: irq_restore_cpulock
*
* Description:
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on another CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states.
*
* Input Parameters:
* cpu - The CPU on which the task was started
* rtcb - The TCB of the task that was started
*
* Returned Value:
* None
*
****************************************************************************/
void irq_restore_cpulock(int cpu, FAR struct tcb_s *rtcb)
{
/* Adjust global pre-emption controls. If the lockcount is greater than
* zero, then this task/this CPU holds the scheduler lock.
*/
if (rtcb->irqcount > 0)
{
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
else
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
}
/****************************************************************************
* Name: irq_restore_lock
*
* Description:
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on the current CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states. However, the actual realization of that change cannot occur
* until all context switching operations have completed. This function
* implements the deferred setting of g_cpu_irqlock.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
* Assumptions:
* g_cpu_irqlock is set upon entry. It may or may not be set upon return.
*
****************************************************************************/
void irq_restore_lock(void)
{
FAR struct tcb_s *rtcb;
int cpu;
cpu = this_cpu();
rtcb = current_task(cpu);
/* Adjust global pre-emption and IRQ controls. */
irq_restore_cpulock(cpu, rtcb);
}
#endif /* CONFIG_SMP */

View File

@ -295,11 +295,6 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
/* Adjust global pre-emption controls. If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler
* lock.
*
* NOTE that the global IRQ controls cannot yet be changed. We
* must maintain the critical section until the full context
* switch is complete. irq_restore_lock() will perform that
* operation.
*/
if (btcb->lockcount > 0)
@ -313,14 +308,19 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
&g_cpu_schedlock);
}
/* If this is not current CPU, then we should update IRQ locks
* now. Controls for this CPU will be updated when we finish the
* context switch.
/* Adjust global IRQ controls. If irqcount is greater than zero,
* then this task/this CPU holds the IRQ lock
*/
if (cpu != me)
if (btcb->irqcount > 0)
{
irq_restore_cpulock(cpu, btcb);
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
else
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
/* If the following task is not locked to this CPU, then it must
@ -379,10 +379,6 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
if (cpu != me)
{
/* In this we will not want to report a context switch to this
* CPU. Only the other CPU is affected.
*/
DEBUGVERIFY(up_cpu_resume(cpu));
doswitch = false;
}

View File

@ -236,10 +236,6 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
/* Will pre-emption be disabled after the switch? If the lockcount is
* greater than zero, then this task/this CPU holds the scheduler lock.
*
* NOTE that the global IRQ controls cannot yet be changed. We must
* maintain the critical section until the full context switch is
* complete. irq_restore_lock() will perform that operation.
*/
if (nxttcb->lockcount > 0)
@ -257,6 +253,25 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
&g_cpu_schedlock);
}
/* Interrupts may be disabled after the switch. If irqcount is greater
* than zero, then this task/this CPU holds the IRQ lock
*/
if (nxttcb->irqcount > 0)
{
/* Yes... make sure that scheduling logic knows about this */
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
else
{
/* No.. we may need to release our hold on the irq state. */
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
&g_cpu_irqlock);
}
nxttcb->task_state = TSTATE_TASK_RUNNING;
/* All done, restart the other CPU (if it was paused). */
@ -264,13 +279,6 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
doswitch = true;
if (cpu != me)
{
/* If this is not current CPU, then we should update IRQ locks
* now. Controls for this CPU will be updated when we finish the
* context switch.
*/
irq_restore_cpulock(cpu, nxttcb);
/* In this we will not want to report a context switch to this
* CPU. Only the other CPU is affected.
*/