diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index eb9d551503..c78d7a30fa 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -316,31 +316,36 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb) &g_cpu_schedlock); } - /* Adjust global IRQ controls. This works differently if we are - * performing a context switch from an interrupt handler and the - * interrupt handler has established a critical section. We can - * detect this case when g_cpu_nestcount[me] > 0. - * - * REVISIT: Could this not cause logic to exit the critical - * section prematurely in the context switch sequence? + /* Adjust global IRQ controls. If irqcount is greater than zero, + * then this task/this CPU holds the IRQ lock */ - if (g_cpu_nestcount[me] <= 0) + if (btcb->irqcount > 0) { - /* If irqcount is greater than zero, then this task/this CPU - * holds the IRQ lock + /* Yes... make sure that scheduling logic on other CPUs knows + * that we hold the IRQ lock. */ - if (btcb->irqcount > 0) - { - spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } - else - { - spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); + } + + /* This CPU will be relinquishing the lock. But this works + * differently if we are performing a context switch from an + * interrupt handler and the interrupt handler has established + * a critical section. We can detect this case when + * g_cpu_nestcount[me] > 0. + * + * REVISIT: Could this not cause logic to exit the critical section + * prematurely in the context switch sequence? + */ + + else if (g_cpu_nestcount[me] <= 0) + { + /* No.. we may need to release our hold on the IRQ state. */ + + spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); } else { diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index b056eefb57..12a13e651d 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -260,35 +260,36 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb) &g_cpu_schedlock); } - /* Adjust global IRQ controls. This works differently if we are - * performing a context switch from an interrupt handler and the - * interrupt handler has established a critical section. We can - * detect this case when g_cpu_nestcount[me] > 0. + /* Adjust global IRQ controls. If irqcount is greater than zero, + * then this task/this CPU holds the IRQ lock + */ + + if (nxttcb->irqcount > 0) + { + /* Yes... make sure that scheduling logic on other CPUs knows + * that we hold the IRQ lock. + */ + + spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); + } + + /* This CPU will be relinquishing the lock. But this works + * differently if we are performing a context switch from an + * interrupt handler and the interrupt handler has established + * a critical section. We can detect this case when + * g_cpu_nestcount[me] > 0. * * REVISIT: Could this not cause logic to exit the critical section * prematurely in the context switch sequence? */ - if (g_cpu_nestcount[me] <= 0) + else if (g_cpu_nestcount[me] <= 0) { - /* If irqcount is greater than zero, then this task/this CPU - * holds the IRQ lock - */ + /* No.. we may need to release our hold on the IRQ state. */ - if (nxttcb->irqcount > 0) - { - /* Yes... make sure that scheduling logic knows about this */ - - spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } - else - { - /* No.. we may need to release our hold on the IRQ state. */ - - spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, - &g_cpu_irqlock); - } + spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, + &g_cpu_irqlock); } else {