i.MX6: Corrects behavior of last SMP patch with i.MX6
This commit is contained in:
parent
3ed091376c
commit
99bb2dda5d
@ -316,31 +316,36 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
|||||||
&g_cpu_schedlock);
|
&g_cpu_schedlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust global IRQ controls. This works differently if we are
|
/* Adjust global IRQ controls. If irqcount is greater than zero,
|
||||||
* performing a context switch from an interrupt handler and the
|
* then this task/this CPU holds the IRQ lock
|
||||||
* interrupt handler has established a critical section. We can
|
|
||||||
* detect this case when g_cpu_nestcount[me] > 0.
|
|
||||||
*
|
|
||||||
* REVISIT: Could this not cause logic to exit the critical
|
|
||||||
* section prematurely in the context switch sequence?
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (g_cpu_nestcount[me] <= 0)
|
if (btcb->irqcount > 0)
|
||||||
{
|
{
|
||||||
/* If irqcount is greater than zero, then this task/this CPU
|
/* Yes... make sure that scheduling logic on other CPUs knows
|
||||||
* holds the IRQ lock
|
* that we hold the IRQ lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (btcb->irqcount > 0)
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
{
|
&g_cpu_irqlock);
|
||||||
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
}
|
||||||
&g_cpu_irqlock);
|
|
||||||
}
|
/* This CPU will be relinquishing the lock. But this works
|
||||||
else
|
* differently if we are performing a context switch from an
|
||||||
{
|
* interrupt handler and the interrupt handler has established
|
||||||
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
* a critical section. We can detect this case when
|
||||||
&g_cpu_irqlock);
|
* g_cpu_nestcount[me] > 0.
|
||||||
}
|
*
|
||||||
|
* REVISIT: Could this not cause logic to exit the critical section
|
||||||
|
* prematurely in the context switch sequence?
|
||||||
|
*/
|
||||||
|
|
||||||
|
else if (g_cpu_nestcount[me] <= 0)
|
||||||
|
{
|
||||||
|
/* No.. we may need to release our hold on the IRQ state. */
|
||||||
|
|
||||||
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -260,35 +260,36 @@ bool sched_removereadytorun(FAR struct tcb_s *rtcb)
|
|||||||
&g_cpu_schedlock);
|
&g_cpu_schedlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust global IRQ controls. This works differently if we are
|
/* Adjust global IRQ controls. If irqcount is greater than zero,
|
||||||
* performing a context switch from an interrupt handler and the
|
* then this task/this CPU holds the IRQ lock
|
||||||
* interrupt handler has established a critical section. We can
|
*/
|
||||||
* detect this case when g_cpu_nestcount[me] > 0.
|
|
||||||
|
if (nxttcb->irqcount > 0)
|
||||||
|
{
|
||||||
|
/* Yes... make sure that scheduling logic on other CPUs knows
|
||||||
|
* that we hold the IRQ lock.
|
||||||
|
*/
|
||||||
|
|
||||||
|
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
|
&g_cpu_irqlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This CPU will be relinquishing the lock. But this works
|
||||||
|
* differently if we are performing a context switch from an
|
||||||
|
* interrupt handler and the interrupt handler has established
|
||||||
|
* a critical section. We can detect this case when
|
||||||
|
* g_cpu_nestcount[me] > 0.
|
||||||
*
|
*
|
||||||
* REVISIT: Could this not cause logic to exit the critical section
|
* REVISIT: Could this not cause logic to exit the critical section
|
||||||
* prematurely in the context switch sequence?
|
* prematurely in the context switch sequence?
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (g_cpu_nestcount[me] <= 0)
|
else if (g_cpu_nestcount[me] <= 0)
|
||||||
{
|
{
|
||||||
/* If irqcount is greater than zero, then this task/this CPU
|
/* No.. we may need to release our hold on the IRQ state. */
|
||||||
* holds the IRQ lock
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (nxttcb->irqcount > 0)
|
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
||||||
{
|
&g_cpu_irqlock);
|
||||||
/* Yes... make sure that scheduling logic knows about this */
|
|
||||||
|
|
||||||
spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
|
||||||
&g_cpu_irqlock);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* No.. we may need to release our hold on the IRQ state. */
|
|
||||||
|
|
||||||
spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
|
|
||||||
&g_cpu_irqlock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user