SMP: There were certain conditions that we must avoid by preventing releasing the pending tasks while withn a critical section. But this logic was incomplete; there was no logic to prevent other CPUs from adding new, running tasks while on CPU is in a critical section.
This commit corrects this. This is matching logic in sched_addreadytorun to avoid starting new tasks within the critical section (unless the CPU is the holder of the lock). The holder of the IRQ lock must be permitted to do whatever it needs to do.
This commit is contained in:
parent
675d684a41
commit
cfb876263a
@ -330,7 +330,7 @@ void spin_unlockr(FAR struct spinlock_s *lock);
|
||||
* Input Parameters:
|
||||
* set - A reference to the bitset to set the CPU bit in
|
||||
* cpu - The bit number to be set
|
||||
* setlock - A reference to the lock lock protecting the set
|
||||
* setlock - A reference to the lock protecting the set
|
||||
* orlock - Will be set to SP_LOCKED while holding setlock
|
||||
*
|
||||
* Returned Value:
|
||||
@ -351,7 +351,7 @@ void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu,
|
||||
* Input Parameters:
|
||||
* set - A reference to the bitset to set the CPU bit in
|
||||
* cpu - The bit number to be set
|
||||
* setlock - A reference to the lock lock protecting the set
|
||||
* setlock - A reference to the lock protecting the set
|
||||
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
|
||||
*
|
||||
* Returned Value:
|
||||
|
@ -46,6 +46,76 @@
|
||||
#include "irq/irq.h"
|
||||
#include "sched/sched.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: sched_cpulocked
|
||||
*
|
||||
* Description:
|
||||
* Test if the IRQ lock set OR if this CPU holds the IRQ lock
|
||||
* There is an interaction with pre-emption controls and IRQ locking:
|
||||
* Even if the pre-emption is enabled, tasks will be forced to pend if
|
||||
* the IRQ lock is also set UNLESS the CPU starting the task is the
|
||||
* holder of the IRQ lock.
|
||||
*
|
||||
* Inputs:
|
||||
* rtcb - Points to the blocked TCB that is ready-to-run
|
||||
*
|
||||
* Return Value:
|
||||
* true - IRQs are locked by a different CPU.
|
||||
* false - IRQs are unlocked OR if they are locked BUT this CPU
|
||||
* is the holder of the lock.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static bool sched_cpulocked(int cpu)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/* First, get the g_cpu_irqsetlock spinlock so that g_cpu_irqset and
|
||||
* g_cpu_irqlock will be stable throughout this function.
|
||||
*/
|
||||
|
||||
spin_lock(&g_cpu_irqsetlock);
|
||||
|
||||
/* Test if g_cpu_irqlock is locked. We don't really need to use check
|
||||
* g_cpu_irqlock to do this, we can use the g_cpu_set.
|
||||
*/
|
||||
|
||||
if (g_cpu_irqset != 0)
|
||||
{
|
||||
/* Some CPU holds the lock. So 'orlock' should be locked */
|
||||
|
||||
DEBUGASSERT(spin_islocked(&g_cpu_irqlock));
|
||||
|
||||
/* Return false if the 'cpu' is the holder of the lock; return
|
||||
* true if g_cpu_irqlock is locked, but this CPU is not the
|
||||
* holder of the lock.
|
||||
*/
|
||||
|
||||
ret = ((g_cpu_irqset & (1 << cpu)) == 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* No CPU holds the lock. So 'orlock' should be unlocked */
|
||||
|
||||
DEBUGASSERT(!spin_islocked(&g_cpu_irqlock));
|
||||
|
||||
/* Return false if g_cpu_irqlock is unlocked */
|
||||
|
||||
ret = false;
|
||||
}
|
||||
|
||||
/* Release the g_cpu_irqsetlock */
|
||||
|
||||
spin_unlock(&g_cpu_irqsetlock);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -167,10 +237,11 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
||||
{
|
||||
FAR struct tcb_s *rtcb;
|
||||
FAR dq_queue_t *tasklist;
|
||||
int task_state;
|
||||
int cpu;
|
||||
bool switched;
|
||||
bool doswitch;
|
||||
int task_state;
|
||||
int cpu;
|
||||
int me;
|
||||
|
||||
/* Check if the blocked TCB is locked to this CPU */
|
||||
|
||||
@ -226,9 +297,17 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
||||
* disabled. If the selected state is TSTATE_TASK_READYTORUN, then it
|
||||
* should also go to the pending task list so that it will have a chance
|
||||
* to be restarted when the scheduler is unlocked.
|
||||
*
|
||||
* There is an interaction here with IRQ locking. Even if the pre-
|
||||
* emption is enabled, tasks will be forced to pend if the IRQ lock
|
||||
* is also set UNLESS the CPU starting the thread is also the holder of
|
||||
* the IRQ lock. sched_cpulocked() performs an atomic check for that
|
||||
* situation.
|
||||
*/
|
||||
|
||||
if (spin_islocked(&g_cpu_schedlock) && task_state != TSTATE_TASK_ASSIGNED)
|
||||
me = this_cpu();
|
||||
if ((spin_islocked(&g_cpu_schedlock) || sched_cpulocked(me)) &&
|
||||
task_state != TSTATE_TASK_ASSIGNED)
|
||||
{
|
||||
/* Add the new ready-to-run task to the g_pendingtasks task list for
|
||||
* now.
|
||||
@ -255,10 +334,8 @@ bool sched_addreadytorun(FAR struct tcb_s *btcb)
|
||||
}
|
||||
else /* (task_state == TSTATE_TASK_ASSIGNED || task_state == TSTATE_TASK_RUNNING) */
|
||||
{
|
||||
int me = this_cpu();
|
||||
|
||||
/* If we are modifying some assigned task list other than our own, we will
|
||||
* need to stop that CPU.
|
||||
/* If we are modifying some assigned task list other than our own, we
|
||||
* will need to stop that CPU.
|
||||
*/
|
||||
|
||||
if (cpu != me)
|
||||
|
@ -127,11 +127,11 @@ int sched_unlock(void)
|
||||
* g_cpu_schedlock is locked. In those cases, the release of the
|
||||
* pending tasks must be deferred until those conditions are met.
|
||||
*
|
||||
* REVISIT: This seems incomplete. Apparently there is some
|
||||
* condition that we must prevent releasing the pending tasks
|
||||
* when in a critical section. This logic does that, but there
|
||||
* no corresponding logic to prohibit a new task from being
|
||||
* started on the g_assignedtasks list. Something is amiss.
|
||||
* There are certain conditions that we must avoid by preventing
|
||||
* releasing the pending tasks while withn a critical section.
|
||||
* This logic does that and there is matching logic in
|
||||
* sched_addreadytorun to avoid starting new tasks within the
|
||||
* critical section (unless the CPU is the holder of the lock).
|
||||
*/
|
||||
|
||||
if (!spin_islocked(&g_cpu_schedlock) &&
|
||||
|
@ -390,7 +390,7 @@ void spin_unlockr(FAR struct spinlock_s *lock)
|
||||
* Input Parameters:
|
||||
* set - A reference to the bitset to set the CPU bit in
|
||||
* cpu - The bit number to be set
|
||||
* setlock - A reference to the lock lock protecting the set
|
||||
* setlock - A reference to the lock protecting the set
|
||||
* orlock - Will be set to SP_LOCKED while holding setlock
|
||||
*
|
||||
* Returned Value:
|
||||
@ -441,7 +441,7 @@ void spin_setbit(FAR volatile cpu_set_t *set, unsigned int cpu,
|
||||
* Input Parameters:
|
||||
* set - A reference to the bitset to set the CPU bit in
|
||||
* cpu - The bit number to be set
|
||||
* setlock - A reference to the lock lock protecting the set
|
||||
* setlock - A reference to the lock protecting the set
|
||||
* orlock - Will be set to SP_UNLOCKED if all bits become cleared in set
|
||||
*
|
||||
* Returned Value:
|
||||
|
Loading…
Reference in New Issue
Block a user