SMP: Fix logic error in last change: Deferred restoration of IRQ lock only applies if the context switch was on this CPU.

This commit is contained in:
Gregory Nutt 2016-12-24 18:52:58 -06:00
parent 9e8b1f32d2
commit f55bad863b
4 changed files with 87 additions and 23 deletions

View File

@ -106,16 +106,38 @@ void weak_function irq_initialize(void);
int irq_unexpected_isr(int irq, FAR void *context);
/****************************************************************************
* Name: irq_restore_cpulock
*
* Description:
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on another CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states.
*
* Input Parameters:
* cpu - The CPU on which the task was started
* rtcb - The TCB of the task that was started
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_SMP
void irq_restore_cpulock(int cpu, FAR struct tcb_s *rtcb);
#endif
/****************************************************************************
* Name: irq_restore_lock
*
* Description:
* Restore the state of g_cpu_irqlock. This function is called after a
* context switch. A consequence of the context switch is that the the
* global g_cpu_irqlock spinlock may need to change states. However, the
* actual realization of that change cannot occur until all context
* switching operations have completed. This function implements the
* deferred setting of g_cpu_irqlock.
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on the current CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states. However, the actual realization of that change cannot occur
* until all context switching operations have completed. This function
* implements the deferred setting of g_cpu_irqlock.
*
* Input Parameters:
* None

View File

@ -48,16 +48,50 @@
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: irq_restore_cpulock
*
* Description:
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on another CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states.
*
* Input Parameters:
* cpu - The CPU on which the task was started
* rtcb - The TCB of the task that was started
*
* Returned Value:
* None
*
****************************************************************************/
void irq_restore_cpulock(int cpu, FAR struct tcb_s *rtcb)
{
/* Adjust global pre-emption controls. If the lockcount is greater than
* zero, then this task/this CPU holds the scheduler lock.
*/
if (rtcb->irqcount > 0)
{
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
else
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
}
/****************************************************************************
* Name: irq_restore_lock
*
* Description:
* Restore the state of g_cpu_irqlock. This function is called after a
* context switch. A consequence of the context switch is that the the
* global g_cpu_irqlock spinlock may need to change states. However, the
* actual realization of that change cannot occur until all context
* switching operations have completed. This function implements the
* deferred setting of g_cpu_irqlock.
* Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function
* is called after a context switch on the current CPU. A consequence of
* the context switch is that the global spinlocks may need to change
* states. However, the actual realization of that change cannot occur
* until all context switching operations have completed. This function
* implements the deferred setting of g_cpu_irqlock.
*
* Input Parameters:
* None
@ -78,18 +112,9 @@ void irq_restore_lock(void)
cpu = this_cpu();
rtcb = current_task(cpu);
/* Adjust global IRQ controls. If the irqcount of the newly running task is
* greater than zero, then this task/CPU holds the IRQ lock
*/
/* Adjust global pre-emption and IRQ controls. */
if (rtcb->irqcount > 0)
{
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
else
{
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock);
}
irq_restore_cpulock(cpu, rtcb);
}
#endif /* CONFIG_SMP */

View File

@ -313,6 +313,16 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
&g_cpu_schedlock);
}
/* If this is not current CPU, then we should update IRQ locks
* now. Controls for this CPU will be updated when we finish the
* context switch.
*/
if (cpu != me)
{
irq_restore_cpulock(cpu, btcb);
}
/* If the following task is not locked to this CPU, then it must
* be moved to the g_readytorun list. Since it cannot be at the
* head of the list, we can do this without invoking any heavy

View File

@ -264,6 +264,13 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
doswitch = true;
if (cpu != me)
{
/* If this is not current CPU, then we should update IRQ locks
* now. Controls for this CPU will be updated when we finish the
* context switch.
*/
irq_restore_cpulock(cpu, nxttcb);
/* In this we will not want to report a context switch to this
* CPU. Only the other CPU is affected.
*/