From f55bad863b8dc6baa8a353ac4d3c89b1fe799fe9 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Sat, 24 Dec 2016 18:52:58 -0600 Subject: [PATCH] SMP: Fix logic error in last change: Deferred restoration of IRQ lock only applies if the context switch was on this CPU. --- sched/irq/irq.h | 34 +++++++++++++--- sched/irq/irq_restorelock.c | 59 ++++++++++++++++++++-------- sched/sched/sched_addreadytorun.c | 10 +++++ sched/sched/sched_removereadytorun.c | 7 ++++ 4 files changed, 87 insertions(+), 23 deletions(-) diff --git a/sched/irq/irq.h b/sched/irq/irq.h index 77b8740984..8e5a718c6b 100644 --- a/sched/irq/irq.h +++ b/sched/irq/irq.h @@ -106,16 +106,38 @@ void weak_function irq_initialize(void); int irq_unexpected_isr(int irq, FAR void *context); +/**************************************************************************** + * Name: irq_restore_cpulock + * + * Description: + * Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function + * is called after a context switch on another CPU. A consequence of + * the context switch is that the global spinlocks may need to change + * states. + * + * Input Parameters: + * cpu - The CPU on which the task was started + * rtcb - The TCB of the task that was started + * + * Returned Value: + * None + * + ****************************************************************************/ + +#ifdef CONFIG_SMP +void irq_restore_cpulock(int cpu, FAR struct tcb_s *rtcb); +#endif + /**************************************************************************** * Name: irq_restore_lock * * Description: - * Restore the state of g_cpu_irqlock. This function is called after a - * context switch. A consequence of the context switch is that the the - * global g_cpu_irqlock spinlock may need to change states. However, the - * actual realization of that change cannot occur until all context - * switching operations have completed. This function implements the - * deferred setting of g_cpu_irqlock. + * Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function + * is called after a context switch on the current CPU. A consequence of + * the context switch is that the global spinlocks may need to change + * states. However, the actual realization of that change cannot occur + * until all context switching operations have completed. This function + * implements the deferred setting of g_cpu_irqlock. * * Input Parameters: * None diff --git a/sched/irq/irq_restorelock.c b/sched/irq/irq_restorelock.c index 94befe5389..0899a91458 100644 --- a/sched/irq/irq_restorelock.c +++ b/sched/irq/irq_restorelock.c @@ -48,16 +48,50 @@ * Public Functions ****************************************************************************/ +/**************************************************************************** + * Name: irq_restore_cpulock + * + * Description: + * Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function + * is called after a context switch on another CPU. A consequence of + * the context switch is that the global spinlocks may need to change + * states. + * + * Input Parameters: + * cpu - The CPU on which the task was started + * rtcb - The TCB of the task that was started + * + * Returned Value: + * None + * + ****************************************************************************/ + +void irq_restore_cpulock(int cpu, FAR struct tcb_s *rtcb) +{ + /* Adjust global pre-emption controls. If the lockcount is greater than + * zero, then this task/this CPU holds the scheduler lock. + */ + + if (rtcb->irqcount > 0) + { + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); + } + else + { + spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); + } +} + /**************************************************************************** * Name: irq_restore_lock * * Description: - * Restore the state of g_cpu_irqlock. This function is called after a - * context switch. A consequence of the context switch is that the the - * global g_cpu_irqlock spinlock may need to change states. However, the - * actual realization of that change cannot occur until all context - * switching operations have completed. This function implements the - * deferred setting of g_cpu_irqlock. + * Restore the state of g_cpu_schedlock and g_cpu_irqlock. This function + * is called after a context switch on the current CPU. A consequence of + * the context switch is that the global spinlocks may need to change + * states. However, the actual realization of that change cannot occur + * until all context switching operations have completed. This function + * implements the deferred setting of g_cpu_irqlock. * * Input Parameters: * None @@ -78,18 +112,9 @@ void irq_restore_lock(void) cpu = this_cpu(); rtcb = current_task(cpu); - /* Adjust global IRQ controls. If the irqcount of the newly running task is - * greater than zero, then this task/CPU holds the IRQ lock - */ + /* Adjust global pre-emption and IRQ controls. */ - if (rtcb->irqcount > 0) - { - spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); - } - else - { - spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); - } + irq_restore_cpulock(cpu, rtcb); } #endif /* CONFIG_SMP */ diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index b19cd41841..b4c74e0db6 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -313,6 +313,16 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) &g_cpu_schedlock); } + /* If this is not current CPU, then we should update IRQ locks + * now. Controls for this CPU will be updated when we finish the + * context switch. + */ + + if (cpu != me) + { + irq_restore_cpulock(cpu, btcb); + } + /* If the following task is not locked to this CPU, then it must * be moved to the g_readytorun list. Since it cannot be at the * head of the list, we can do this without invoking any heavy diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index 99a039aded..7f8100a43a 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -264,6 +264,13 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb) doswitch = true; if (cpu != me) { + /* If this is not current CPU, then we should update IRQ locks + * now. Controls for this CPU will be updated when we finish the + * context switch. + */ + + irq_restore_cpulock(cpu, nxttcb); + /* In this we will not want to report a context switch to this * CPU. Only the other CPU is affected. */