SMP: Move sharable function to common file as irq_cpu_locked(). Use irq_cpu_locked() in sched_unlock()
This commit is contained in:
parent
96394f339e
commit
e7d2b9f0e8
@ -44,6 +44,7 @@
|
|||||||
#include <nuttx/compiler.h>
|
#include <nuttx/compiler.h>
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
|
#include <stdbool.h>
|
||||||
|
|
||||||
#include <nuttx/arch.h>
|
#include <nuttx/arch.h>
|
||||||
#include <nuttx/irq.h>
|
#include <nuttx/irq.h>
|
||||||
@ -106,6 +107,33 @@ void weak_function irq_initialize(void);
|
|||||||
|
|
||||||
int irq_unexpected_isr(int irq, FAR void *context);
|
int irq_unexpected_isr(int irq, FAR void *context);
|
||||||
|
|
||||||
|
/****************************************************************************
|
||||||
|
* Name: irq_cpu_locked
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
||||||
|
* There is an interaction with pre-emption controls and IRQ locking:
|
||||||
|
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
||||||
|
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
||||||
|
* holder of the IRQ lock.
|
||||||
|
*
|
||||||
|
* Inputs:
|
||||||
|
* rtcb - Points to the blocked TCB that is ready-to-run
|
||||||
|
*
|
||||||
|
* Return Value:
|
||||||
|
* true - IRQs are locked by a different CPU.
|
||||||
|
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
||||||
|
* is the holder of the lock.
|
||||||
|
*
|
||||||
|
* Warning: This values are volatile at only valid at the instance that
|
||||||
|
* the CPU set was queried.
|
||||||
|
*
|
||||||
|
****************************************************************************/
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
bool irq_cpu_locked(int cpu);
|
||||||
|
#endif
|
||||||
|
|
||||||
#undef EXTERN
|
#undef EXTERN
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -572,4 +572,78 @@ void leave_critical_section(irqstate_t flags)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/****************************************************************************
|
||||||
|
* Name: irq_cpu_locked
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
||||||
|
* There is an interaction with pre-emption controls and IRQ locking:
|
||||||
|
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
||||||
|
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
||||||
|
* holder of the IRQ lock.
|
||||||
|
*
|
||||||
|
* Inputs:
|
||||||
|
* rtcb - Points to the blocked TCB that is ready-to-run
|
||||||
|
*
|
||||||
|
* Return Value:
|
||||||
|
* true - IRQs are locked by a different CPU.
|
||||||
|
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
||||||
|
* is the holder of the lock.
|
||||||
|
*
|
||||||
|
* Warning: This values are volatile at only valid at the instance that
|
||||||
|
* the CPU set was queried.
|
||||||
|
*
|
||||||
|
****************************************************************************/
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
bool irq_cpu_locked(int cpu)
|
||||||
|
{
|
||||||
|
cpu_set_t irqset;
|
||||||
|
|
||||||
|
/* g_cpu_irqset is not valid in early phases of initialization */
|
||||||
|
|
||||||
|
if (g_os_initstate < OSINIT_OSREADY)
|
||||||
|
{
|
||||||
|
/* We are still single threaded. In either state of g_cpu_irqlock,
|
||||||
|
* the correct return value should always be false.
|
||||||
|
*/
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Test if g_cpu_irqlock is locked. We don't really need to use check
|
||||||
|
* g_cpu_irqlock to do this, we can use the g_cpu_set.
|
||||||
|
*
|
||||||
|
* Sample the g_cpu_irqset once. That is an atomic operation. All
|
||||||
|
* subsequent operations will operate on the sampled cpu set.
|
||||||
|
*/
|
||||||
|
|
||||||
|
irqset = (cpu_set_t)g_cpu_irqset;
|
||||||
|
if (irqset != 0)
|
||||||
|
{
|
||||||
|
/* Some CPU holds the lock. So g_cpu_irqlock should be locked.
|
||||||
|
* Return false if the 'cpu' is the holder of the lock; return
|
||||||
|
* true if g_cpu_irqlock is locked, but this CPU is not the
|
||||||
|
* holder of the lock.
|
||||||
|
*/
|
||||||
|
|
||||||
|
return ((irqset & (1 << cpu)) == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* No CPU holds the lock */
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/* In this case g_cpu_irqlock should be unlocked. However, if
|
||||||
|
* the lock was established in the interrupt handler AND there are
|
||||||
|
* no bits set in g_cpu_irqset, that probabaly means only that
|
||||||
|
* critical section was established from an interrupt handler.
|
||||||
|
* Return false in either case.
|
||||||
|
*/
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION */
|
#endif /* CONFIG_SMP || CONFIG_SCHED_INSTRUMENTATION_CSECTION */
|
||||||
|
@ -43,86 +43,9 @@
|
|||||||
#include <queue.h>
|
#include <queue.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
#include <nuttx/init.h>
|
|
||||||
|
|
||||||
#include "irq/irq.h"
|
#include "irq/irq.h"
|
||||||
#include "sched/sched.h"
|
#include "sched/sched.h"
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Private Functions
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Name: sched_cpulocked
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
|
||||||
* There is an interaction with pre-emption controls and IRQ locking:
|
|
||||||
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
|
||||||
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
|
||||||
* holder of the IRQ lock.
|
|
||||||
*
|
|
||||||
* Inputs:
|
|
||||||
* rtcb - Points to the blocked TCB that is ready-to-run
|
|
||||||
*
|
|
||||||
* Return Value:
|
|
||||||
* true - IRQs are locked by a different CPU.
|
|
||||||
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
|
||||||
* is the holder of the lock.
|
|
||||||
*
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static bool sched_cpulocked(int cpu)
|
|
||||||
{
|
|
||||||
cpu_set_t irqset;
|
|
||||||
|
|
||||||
/* g_cpu_irqset is not valid in early phases of initialization */
|
|
||||||
|
|
||||||
if (g_os_initstate < OSINIT_OSREADY)
|
|
||||||
{
|
|
||||||
/* We are still single threaded. In either state of g_cpu_irqlock,
|
|
||||||
* the correct return value should always be false.
|
|
||||||
*/
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Test if g_cpu_irqlock is locked. We don't really need to use check
|
|
||||||
* g_cpu_irqlock to do this, we can use the g_cpu_set.
|
|
||||||
*
|
|
||||||
* Sample the g_cpu_irqset once. That is an atomic operation. All
|
|
||||||
* subsequent operations will operate on the sampled cpu set.
|
|
||||||
*/
|
|
||||||
|
|
||||||
irqset = (cpu_set_t)g_cpu_irqset;
|
|
||||||
if (irqset != 0)
|
|
||||||
{
|
|
||||||
/* Some CPU holds the lock. So g_cpu_irqlock should be locked.
|
|
||||||
* Return false if the 'cpu' is the holder of the lock; return
|
|
||||||
* true if g_cpu_irqlock is locked, but this CPU is not the
|
|
||||||
* holder of the lock.
|
|
||||||
*/
|
|
||||||
|
|
||||||
return ((irqset & (1 << cpu)) == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* No CPU holds the lock */
|
|
||||||
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* In this case g_cpu_irqlock should be unlocked. However, if
|
|
||||||
* the lock was established in the interrupt handler AND there are
|
|
||||||
* no bits set in g_cpu_irqset, that probabaly means only that
|
|
||||||
* critical section was established from an interrupt handler.
|
|
||||||
* Return false in either case.
|
|
||||||
*/
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Functions
|
* Public Functions
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
@ -308,12 +231,12 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
|||||||
* There is an interaction here with IRQ locking. Even if the pre-
|
* There is an interaction here with IRQ locking. Even if the pre-
|
||||||
* emption is enabled, tasks will be forced to pend if the IRQ lock
|
* emption is enabled, tasks will be forced to pend if the IRQ lock
|
||||||
* is also set UNLESS the CPU starting the thread is also the holder of
|
* is also set UNLESS the CPU starting the thread is also the holder of
|
||||||
* the IRQ lock. sched_cpulocked() performs an atomic check for that
|
* the IRQ lock. irq_cpu_locked() performs an atomic check for that
|
||||||
* situation.
|
* situation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
me = this_cpu();
|
me = this_cpu();
|
||||||
if ((spin_islocked(&g_cpu_schedlock) || sched_cpulocked(me)) &&
|
if ((spin_islocked(&g_cpu_schedlock) || irq_cpu_locked(me)) &&
|
||||||
task_state != TSTATE_TASK_ASSIGNED)
|
task_state != TSTATE_TASK_ASSIGNED)
|
||||||
{
|
{
|
||||||
/* Add the new ready-to-run task to the g_pendingtasks task list for
|
/* Add the new ready-to-run task to the g_pendingtasks task list for
|
||||||
|
@ -67,6 +67,10 @@
|
|||||||
int sched_unlock(void)
|
int sched_unlock(void)
|
||||||
{
|
{
|
||||||
FAR struct tcb_s *rtcb = this_task();
|
FAR struct tcb_s *rtcb = this_task();
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
cpu = this_cpu();
|
||||||
|
rtcb = current_task(cpu);
|
||||||
|
|
||||||
/* Check for some special cases: (1) rtcb may be NULL only during
|
/* Check for some special cases: (1) rtcb may be NULL only during
|
||||||
* early boot-up phases, and (2) sched_unlock() should have no
|
* early boot-up phases, and (2) sched_unlock() should have no
|
||||||
@ -107,9 +111,9 @@ int sched_unlock(void)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
|
DEBUGASSERT(g_cpu_schedlock == SP_LOCKED &&
|
||||||
(g_cpu_lockset & (1 << this_cpu())) != 0);
|
(g_cpu_lockset & (1 << cpu)) != 0);
|
||||||
|
|
||||||
spin_clrbit(&g_cpu_lockset, this_cpu(), &g_cpu_locksetlock,
|
spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
|
||||||
&g_cpu_schedlock);
|
&g_cpu_schedlock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -122,20 +126,25 @@ int sched_unlock(void)
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* In the SMP case, the tasks remains pend(1) if we are
|
/* In the SMP case, the tasks remains pend(1) if we are
|
||||||
* in a critical section, i.e., g_cpu_irqlock is locked , or (2)
|
* in a critical section, i.e., g_cpu_irqlock is locked by other
|
||||||
* other CPUs still have pre-emption disabled, i.e.,
|
* CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
|
||||||
* g_cpu_schedlock is locked. In those cases, the release of the
|
* g_cpu_schedlock is locked. In those cases, the release of the
|
||||||
* pending tasks must be deferred until those conditions are met.
|
* pending tasks must be deferred until those conditions are met.
|
||||||
*
|
*
|
||||||
* There are certain conditions that we must avoid by preventing
|
* There are certain conditions that we must avoid by preventing
|
||||||
* releasing the pending tasks while withn a critical section.
|
* releasing the pending tasks while within the critical section
|
||||||
* This logic does that and there is matching logic in
|
* of other CPUs. This logic does that and there is matching
|
||||||
* sched_addreadytorun to avoid starting new tasks within the
|
* logic in sched_addreadytorun to avoid starting new tasks within
|
||||||
* critical section (unless the CPU is the holder of the lock).
|
* the critical section (unless the CPU is the holder of the lock).
|
||||||
|
*
|
||||||
|
* REVISIT: If this CPU is only one that holds the IRQ lock, then
|
||||||
|
* we should go ahead and release the pending tasks. See the logic
|
||||||
|
* leave_critical_section(): It will call up_release_pending()
|
||||||
|
* BEFORE it clears IRQ lock.
|
||||||
|
* BEFORE it clears IRQ lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!spin_islocked(&g_cpu_schedlock) &&
|
if (!spin_islocked(&g_cpu_schedlock) && !irq_cpu_locked(cpu) &&
|
||||||
!spin_islocked(&g_cpu_irqlock) &&
|
|
||||||
g_pendingtasks.head != NULL)
|
g_pendingtasks.head != NULL)
|
||||||
#else
|
#else
|
||||||
/* In the single CPU case, decrementing irqcount to zero is
|
/* In the single CPU case, decrementing irqcount to zero is
|
||||||
|
Loading…
Reference in New Issue
Block a user