SMP: Fix an error in critical section logic when performing a context switch from an interrupt handler. The g_cpu_irqset bit was not being set for the CPU so other CPUs did not know about the critical section.
This commit is contained in:
parent
4ede950039
commit
9ce4022096
@ -1,7 +1,7 @@
|
|||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* sched/irq/irq.h
|
* sched/irq/irq.h
|
||||||
*
|
*
|
||||||
* Copyright (C) 2007, 2008, 2013-2014 Gregory Nutt. All rights reserved.
|
* Copyright (C) 2007, 2008, 2013-2014, 2017 Gregory Nutt. All rights reserved.
|
||||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -72,6 +72,10 @@ extern volatile spinlock_t g_cpu_irqlock SP_SECTION;
|
|||||||
|
|
||||||
extern volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
extern volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
||||||
extern volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
extern volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
||||||
|
|
||||||
|
/* Handles nested calls to enter_critical section from interrupt handlers */
|
||||||
|
|
||||||
|
extern volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* sched/irq/irq_csection.c
|
* sched/irq/irq_csection.c
|
||||||
*
|
*
|
||||||
* Copyright (C) 2016 Gregory Nutt. All rights reserved.
|
* Copyright (C) 2016-2017 Gregory Nutt. All rights reserved.
|
||||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -66,16 +66,10 @@ volatile spinlock_t g_cpu_irqlock SP_SECTION = SP_UNLOCKED;
|
|||||||
|
|
||||||
volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
volatile spinlock_t g_cpu_irqsetlock SP_SECTION;
|
||||||
volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
volatile cpu_set_t g_cpu_irqset SP_SECTION;
|
||||||
#endif
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Private Data
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Handles nested calls to enter_critical section from interrupt handlers */
|
/* Handles nested calls to enter_critical section from interrupt handlers */
|
||||||
|
|
||||||
static uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
@ -303,6 +297,13 @@ try_again:
|
|||||||
/* In any event, the nesting count is now one */
|
/* In any event, the nesting count is now one */
|
||||||
|
|
||||||
g_cpu_nestcount[cpu] = 1;
|
g_cpu_nestcount[cpu] = 1;
|
||||||
|
|
||||||
|
/* Also set the CPU bit so that other CPUs will be aware that this
|
||||||
|
* CPU holds the critical section.
|
||||||
|
*/
|
||||||
|
|
||||||
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -455,18 +456,22 @@ void leave_critical_section(irqstate_t flags)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* No, not nested. Release the spinlock. */
|
/* No, not nested. Restore the g_cpu_irqset for this CPU
|
||||||
|
* and release the spinlock (if necessary).
|
||||||
|
*/
|
||||||
|
|
||||||
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
|
DEBUGASSERT(spin_islocked(&g_cpu_irqlock) &&
|
||||||
g_cpu_nestcount[cpu] == 1);
|
g_cpu_nestcount[cpu] == 1);
|
||||||
|
|
||||||
spin_lock(&g_cpu_irqsetlock); /* Protects g_cpu_irqset */
|
FAR struct tcb_s *rtcb = this_task();
|
||||||
if (g_cpu_irqset == 0)
|
DEBUGASSERT(rtcb != NULL);
|
||||||
|
|
||||||
|
if (rtcb->irqcount <= 0)
|
||||||
{
|
{
|
||||||
spin_unlock(&g_cpu_irqlock);
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&g_cpu_irqsetlock);
|
|
||||||
g_cpu_nestcount[cpu] = 0;
|
g_cpu_nestcount[cpu] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* sched/sched/sched_addreadytorun.c
|
* sched/sched/sched_addreadytorun.c
|
||||||
*
|
*
|
||||||
* Copyright (C) 2007-2009, 2014, 2016 Gregory Nutt. All rights reserved.
|
* Copyright (C) 2007-2009, 2014, 2016-2017 Gregory Nutt. All rights reserved.
|
||||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -316,19 +316,40 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
|||||||
&g_cpu_schedlock);
|
&g_cpu_schedlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust global IRQ controls. If irqcount is greater than zero,
|
/* Adjust global IRQ controls. This works differently if we are
|
||||||
* then this task/this CPU holds the IRQ lock
|
* performing a context switch from an interrupt handler and the
|
||||||
|
* interrupt handler has established a critical section. We can
|
||||||
|
* detect this case when g_cpu_nestcount[me] > 0:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (btcb->irqcount > 0)
|
if (g_cpu_nestcount[me] <= 0)
|
||||||
{
|
{
|
||||||
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
/* If irqcount is greater than zero, then this task/this CPU
|
||||||
&g_cpu_irqlock);
|
* holds the IRQ lock
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (btcb->irqcount > 0)
|
||||||
|
{
|
||||||
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
/* Sanity check. g_cpu_netcount should be greater than zero
|
||||||
&g_cpu_irqlock);
|
* only while we are within the critical section and within
|
||||||
|
* an interrupt handler. If we are not in an interrupt handler
|
||||||
|
* then there is a problem; perhaps some logic previously
|
||||||
|
* called enter_critical_section() with no matching call to
|
||||||
|
* leave_critical_section(), leaving the non-zero count.
|
||||||
|
*/
|
||||||
|
|
||||||
|
DEBUGASSERT(up_interrupt_context());
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the following task is not locked to this CPU, then it must
|
/* If the following task is not locked to this CPU, then it must
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* sched/sched_removereadytorun.c
|
* sched/sched_removereadytorun.c
|
||||||
*
|
*
|
||||||
* Copyright (C) 2007-2009, 2012, 2016 Gregory Nutt. All rights reserved.
|
* Copyright (C) 2007-2009, 2012, 2016-2017 Gregory Nutt. All rights reserved.
|
||||||
* Author: Gregory Nutt <gnutt@nuttx.org>
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -260,23 +260,44 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
|
|||||||
&g_cpu_schedlock);
|
&g_cpu_schedlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Interrupts may be disabled after the switch. If irqcount is greater
|
/* Adjust global IRQ controls. This works differently if we are
|
||||||
* than zero, then this task/this CPU holds the IRQ lock
|
* performing a context switch from an interrupt handler and the
|
||||||
|
* interrupt handler has established a critical section. We can
|
||||||
|
* detect this case when g_cpu_nestcount[me] > 0:
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (nxttcb->irqcount > 0)
|
if (g_cpu_nestcount[me] <= 0)
|
||||||
{
|
{
|
||||||
/* Yes... make sure that scheduling logic knows about this */
|
/* If irqcount is greater than zero, then this task/this CPU
|
||||||
|
* holds the IRQ lock
|
||||||
|
*/
|
||||||
|
|
||||||
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
if (nxttcb->irqcount > 0)
|
||||||
&g_cpu_irqlock);
|
{
|
||||||
|
/* Yes... make sure that scheduling logic knows about this */
|
||||||
|
|
||||||
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* No.. we may need to release our hold on the IRQ state. */
|
||||||
|
|
||||||
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* No.. we may need to release our hold on the irq state. */
|
/* Sanity check. g_cpu_netcount should be greater than zero
|
||||||
|
* only while we are within the critical section and within
|
||||||
|
* an interrupt handler. If we are not in an interrupt handler
|
||||||
|
* then there is a problem; perhaps some logic previously
|
||||||
|
* called enter_critical_section() with no matching call to
|
||||||
|
* leave_critical_section(), leaving the non-zero count.
|
||||||
|
*/
|
||||||
|
|
||||||
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
DEBUGASSERT(up_interrupt_context());
|
||||||
&g_cpu_irqlock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nxttcb->task_state = TSTATE_TASK_RUNNING;
|
nxttcb->task_state = TSTATE_TASK_RUNNING;
|
||||||
|
Loading…
Reference in New Issue
Block a user