From 96394f339e09a4d4137b1e5a588dee6ab0bf11f3 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Wed, 28 Dec 2016 13:58:24 -0600 Subject: [PATCH] SMP: Fix a gap where we may try to make modifications to the task lists without being in a critical sections. That permits concurrent access to the tasks lists and many subtle problems. This fix just remains in the critical section throughout the operation (and possible until the task is restore in the event of a context switch). Makes a big difference in stability --- configs/sabre-6quad/smp/defconfig | 2 +- sched/irq/irq_csection.c | 32 +++++++++++++++++++------------ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/configs/sabre-6quad/smp/defconfig b/configs/sabre-6quad/smp/defconfig index 4ca6215581..d470592436 100644 --- a/configs/sabre-6quad/smp/defconfig +++ b/configs/sabre-6quad/smp/defconfig @@ -315,7 +315,7 @@ CONFIG_USER_ENTRYPOINT="nsh_main" CONFIG_RR_INTERVAL=200 # CONFIG_SCHED_SPORADIC is not set CONFIG_TASK_NAME_SIZE=31 -CONFIG_MAX_TASKS=16 +CONFIG_MAX_TASKS=32 # CONFIG_SCHED_HAVE_PARENT is not set CONFIG_SCHED_WAITPID=y diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 16be425cd7..4b5bdac15c 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -504,16 +504,14 @@ void leave_critical_section(irqstate_t flags) DEBUGASSERT(spin_islocked(&g_cpu_irqlock) && (g_cpu_irqset & (1 << cpu)) != 0); - rtcb->irqcount = 0; - spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); + /* Check if releasing the lock held by this CPU will unlock the + * critical section. + */ - /* Have all CPUs released the lock? */ - - if (!spin_islocked(&g_cpu_irqlock)) + if ((g_cpu_irqset & ~(1 << cpu)) == 0) { - /* Check if there are pending tasks and that pre-emption - * is also enabled. This is necessary becaue we may have + /* Yes.. Check if there are pending tasks and that pre-emption + * is also enabled. This is necessary because we may have * deferred the up_release_pending() call in sched_unlock() * because we were within a critical section then. */ @@ -522,15 +520,25 @@ void leave_critical_section(irqstate_t flags) !spin_islocked(&g_cpu_schedlock)) { /* Release any ready-to-run tasks that have collected - * in g_pendingtasks if the scheduler is not locked. - * - * NOTE: This operation has a very high likelihood of - * causing this task to be switched out! + * in g_pendingtasks. NOTE: This operation has a very + * high likelihood of causing this task to be switched + * out! */ up_release_pending(); } } + + /* Now, possibly on return from a context switch, clear our + * count on the lock. If all CPUs have released the lock, + * then unlock the global IRQ spinlock. + */ + + rtcb->irqcount = 0; + spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); + + /* Have all CPUs released the lock? */ } } }