sched: sched: Remove sched_tasklistlock.c for SMP
Summary: - sched_tasklistlock.c was introduced to stabilize NuttX SMP - However, the current SMP implementation is stable by recent fixes - So I decided to remove the file finally Impact: - SMP only Testing: - Tested with ostest with the following configurations - spresense:smp - spresense:wifi_smp (NCPUS=2 and 4) - sabre-6quad:smp (QEMU) - esp32-core:smp (QEMU) - maix-bit:smp (QEMU) - sim:smp - lc823450-xgevk:rndis Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
parent
f0bf2143f6
commit
1dad55d4be
@ -85,7 +85,6 @@ CSRCS += sched_processtimer.c
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(CONFIG_SMP),y)
|
ifeq ($(CONFIG_SMP),y)
|
||||||
CSRCS += sched_tasklistlock.c
|
|
||||||
CSRCS += sched_thistask.c
|
CSRCS += sched_thistask.c
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -417,9 +417,6 @@ FAR struct tcb_s *this_task(void);
|
|||||||
int nxsched_select_cpu(cpu_set_t affinity);
|
int nxsched_select_cpu(cpu_set_t affinity);
|
||||||
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
|
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
|
||||||
|
|
||||||
irqstate_t nxsched_lock_tasklist(void);
|
|
||||||
void nxsched_unlock_tasklist(irqstate_t lock);
|
|
||||||
|
|
||||||
# define nxsched_islocked_global() spin_islocked(&g_cpu_schedlock)
|
# define nxsched_islocked_global() spin_islocked(&g_cpu_schedlock)
|
||||||
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()
|
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()
|
||||||
|
|
||||||
|
@ -77,12 +77,6 @@ void nxsched_add_blocked(FAR struct tcb_s *btcb, tstate_t task_state)
|
|||||||
DEBUGASSERT(task_state >= FIRST_BLOCKED_STATE &&
|
DEBUGASSERT(task_state >= FIRST_BLOCKED_STATE &&
|
||||||
task_state <= LAST_BLOCKED_STATE);
|
task_state <= LAST_BLOCKED_STATE);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Lock the tasklists before accessing */
|
|
||||||
|
|
||||||
irqstate_t lock = nxsched_lock_tasklist();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Add the TCB to the blocked task list associated with this state. */
|
/* Add the TCB to the blocked task list associated with this state. */
|
||||||
|
|
||||||
tasklist = TLIST_BLOCKED(task_state);
|
tasklist = TLIST_BLOCKED(task_state);
|
||||||
@ -102,12 +96,6 @@ void nxsched_add_blocked(FAR struct tcb_s *btcb, tstate_t task_state)
|
|||||||
dq_addlast((FAR dq_entry_t *)btcb, tasklist);
|
dq_addlast((FAR dq_entry_t *)btcb, tasklist);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Unlock the tasklists */
|
|
||||||
|
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Make sure the TCB's state corresponds to the list */
|
/* Make sure the TCB's state corresponds to the list */
|
||||||
|
|
||||||
btcb->task_state = task_state;
|
btcb->task_state = task_state;
|
||||||
|
@ -174,10 +174,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
|||||||
int cpu;
|
int cpu;
|
||||||
int me;
|
int me;
|
||||||
|
|
||||||
/* Lock the tasklists before accessing */
|
|
||||||
|
|
||||||
irqstate_t lock = nxsched_lock_tasklist();
|
|
||||||
|
|
||||||
/* Check if the blocked TCB is locked to this CPU */
|
/* Check if the blocked TCB is locked to this CPU */
|
||||||
|
|
||||||
if ((btcb->flags & TCB_FLAG_CPU_LOCKED) != 0)
|
if ((btcb->flags & TCB_FLAG_CPU_LOCKED) != 0)
|
||||||
@ -276,9 +272,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
|||||||
|
|
||||||
if (cpu != me)
|
if (cpu != me)
|
||||||
{
|
{
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
DEBUGVERIFY(up_cpu_pause(cpu));
|
DEBUGVERIFY(up_cpu_pause(cpu));
|
||||||
lock = nxsched_lock_tasklist();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add the task to the list corresponding to the selected state
|
/* Add the task to the list corresponding to the selected state
|
||||||
@ -433,9 +427,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unlock the tasklists */
|
|
||||||
|
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
return doswitch;
|
return doswitch;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,10 +184,6 @@ bool nxsched_merge_pending(void)
|
|||||||
int cpu;
|
int cpu;
|
||||||
int me;
|
int me;
|
||||||
|
|
||||||
/* Lock the tasklist before accessing */
|
|
||||||
|
|
||||||
irqstate_t lock = nxsched_lock_tasklist();
|
|
||||||
|
|
||||||
/* Remove and process every TCB in the g_pendingtasks list.
|
/* Remove and process every TCB in the g_pendingtasks list.
|
||||||
*
|
*
|
||||||
* Do nothing if (1) pre-emption is still disabled (by any CPU), or (2) if
|
* Do nothing if (1) pre-emption is still disabled (by any CPU), or (2) if
|
||||||
@ -204,7 +200,7 @@ bool nxsched_merge_pending(void)
|
|||||||
{
|
{
|
||||||
/* The pending task list is empty */
|
/* The pending task list is empty */
|
||||||
|
|
||||||
goto errout_with_lock;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu = nxsched_select_cpu(ALL_CPUS); /* REVISIT: Maybe ptcb->affinity */
|
cpu = nxsched_select_cpu(ALL_CPUS); /* REVISIT: Maybe ptcb->affinity */
|
||||||
@ -228,9 +224,7 @@ bool nxsched_merge_pending(void)
|
|||||||
|
|
||||||
/* Add the pending task to the correct ready-to-run list. */
|
/* Add the pending task to the correct ready-to-run list. */
|
||||||
|
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
ret |= nxsched_add_readytorun(tcb);
|
ret |= nxsched_add_readytorun(tcb);
|
||||||
lock = nxsched_lock_tasklist();
|
|
||||||
|
|
||||||
/* This operation could cause the scheduler to become locked.
|
/* This operation could cause the scheduler to become locked.
|
||||||
* Check if that happened.
|
* Check if that happened.
|
||||||
@ -251,7 +245,7 @@ bool nxsched_merge_pending(void)
|
|||||||
* pending task list.
|
* pending task list.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
goto errout_with_lock;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set up for the next time through the loop */
|
/* Set up for the next time through the loop */
|
||||||
@ -262,7 +256,7 @@ bool nxsched_merge_pending(void)
|
|||||||
{
|
{
|
||||||
/* The pending task list is empty */
|
/* The pending task list is empty */
|
||||||
|
|
||||||
goto errout_with_lock;
|
goto errout;
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu = nxsched_select_cpu(ALL_CPUS); /* REVISIT: Maybe ptcb->affinity */
|
cpu = nxsched_select_cpu(ALL_CPUS); /* REVISIT: Maybe ptcb->affinity */
|
||||||
@ -278,11 +272,8 @@ bool nxsched_merge_pending(void)
|
|||||||
TSTATE_TASK_READYTORUN);
|
TSTATE_TASK_READYTORUN);
|
||||||
}
|
}
|
||||||
|
|
||||||
errout_with_lock:
|
errout:
|
||||||
|
|
||||||
/* Unlock the tasklist */
|
|
||||||
|
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -68,12 +68,6 @@ void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
|
|||||||
FAR struct tcb_s *tcb2;
|
FAR struct tcb_s *tcb2;
|
||||||
FAR struct tcb_s *tmp;
|
FAR struct tcb_s *tmp;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Lock the tasklists before accessing */
|
|
||||||
|
|
||||||
irqstate_t lock = nxsched_lock_tasklist();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
DEBUGASSERT(list1 != NULL && list2 != NULL);
|
DEBUGASSERT(list1 != NULL && list2 != NULL);
|
||||||
|
|
||||||
/* Get a private copy of list1, clearing list1. We do this early so that
|
/* Get a private copy of list1, clearing list1. We do this early so that
|
||||||
@ -90,7 +84,7 @@ void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
|
|||||||
{
|
{
|
||||||
/* Special case.. list1 is empty. There is nothing to be done. */
|
/* Special case.. list1 is empty. There is nothing to be done. */
|
||||||
|
|
||||||
goto ret_with_lock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now the TCBs are no longer accessible and we can change the state on
|
/* Now the TCBs are no longer accessible and we can change the state on
|
||||||
@ -113,7 +107,7 @@ void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
|
|||||||
/* Special case.. list2 is empty. Move list1 to list2. */
|
/* Special case.. list2 is empty. Move list1 to list2. */
|
||||||
|
|
||||||
dq_move(&clone, list2);
|
dq_move(&clone, list2);
|
||||||
goto ret_with_lock;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now loop until all entries from list1 have been merged into list2. tcb1
|
/* Now loop until all entries from list1 have been merged into list2. tcb1
|
||||||
@ -163,12 +157,7 @@ void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
|
|||||||
}
|
}
|
||||||
while (tcb1 != NULL);
|
while (tcb1 != NULL);
|
||||||
|
|
||||||
ret_with_lock:
|
out:
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Unlock the tasklists */
|
|
||||||
|
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
#endif
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -140,10 +140,6 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb)
|
|||||||
bool doswitch = false;
|
bool doswitch = false;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
/* Lock the tasklists before accessing */
|
|
||||||
|
|
||||||
irqstate_t lock = nxsched_lock_tasklist();
|
|
||||||
|
|
||||||
/* Which CPU (if any) is the task running on? Which task list holds the
|
/* Which CPU (if any) is the task running on? Which task list holds the
|
||||||
* TCB?
|
* TCB?
|
||||||
*/
|
*/
|
||||||
@ -188,9 +184,7 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb)
|
|||||||
me = this_cpu();
|
me = this_cpu();
|
||||||
if (cpu != me)
|
if (cpu != me)
|
||||||
{
|
{
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
DEBUGVERIFY(up_cpu_pause(cpu));
|
DEBUGVERIFY(up_cpu_pause(cpu));
|
||||||
lock = nxsched_lock_tasklist();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The task is running but the CPU that it was running on has been
|
/* The task is running but the CPU that it was running on has been
|
||||||
@ -336,9 +330,6 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb)
|
|||||||
|
|
||||||
rtcb->task_state = TSTATE_TASK_INVALID;
|
rtcb->task_state = TSTATE_TASK_INVALID;
|
||||||
|
|
||||||
/* Unlock the tasklists */
|
|
||||||
|
|
||||||
nxsched_unlock_tasklist(lock);
|
|
||||||
return doswitch;
|
return doswitch;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -1,118 +0,0 @@
|
|||||||
/****************************************************************************
|
|
||||||
* sched/sched/sched_tasklistlock.c
|
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
* contributor license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright ownership. The
|
|
||||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance with the
|
|
||||||
* License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
* License for the specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Included Files
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
#include <nuttx/config.h>
|
|
||||||
#include <nuttx/spinlock.h>
|
|
||||||
|
|
||||||
#include <sys/types.h>
|
|
||||||
#include <arch/irq.h>
|
|
||||||
|
|
||||||
#include "sched/sched.h"
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Public Data
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
/* Splinlock to protect the tasklists */
|
|
||||||
|
|
||||||
static volatile spinlock_t g_tasklist_lock SP_SECTION = SP_UNLOCKED;
|
|
||||||
|
|
||||||
/* Handles nested calls */
|
|
||||||
|
|
||||||
static volatile uint8_t g_tasklist_lock_count[CONFIG_SMP_NCPUS];
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Public Functions
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Name: nxsched_lock_tasklist()
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Disable local interrupts and take the global spinlock (g_tasklist_lock)
|
|
||||||
* if the call counter (g_tasklist_lock_count[cpu]) equals to 0. Then the
|
|
||||||
* counter on the CPU is incremented to allow nested call.
|
|
||||||
*
|
|
||||||
* NOTE: This API is used to protect tasklists in the scheduler. So do not
|
|
||||||
* use this API for other purposes.
|
|
||||||
*
|
|
||||||
* Returned Value:
|
|
||||||
* An opaque, architecture-specific value that represents the state of
|
|
||||||
* the interrupts prior to the call to nxsched_lock_tasklist();
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
irqstate_t nxsched_lock_tasklist(void)
|
|
||||||
{
|
|
||||||
int me;
|
|
||||||
irqstate_t ret;
|
|
||||||
|
|
||||||
ret = up_irq_save();
|
|
||||||
me = this_cpu();
|
|
||||||
|
|
||||||
if (0 == g_tasklist_lock_count[me])
|
|
||||||
{
|
|
||||||
spin_lock(&g_tasklist_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
g_tasklist_lock_count[me]++;
|
|
||||||
DEBUGASSERT(0 != g_tasklist_lock_count[me]);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Name: nxsched_unlock_tasklist()
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* Decrement the call counter (g_tasklist_lock_count[cpu]) and if it
|
|
||||||
* decrements to zero then release the spinlock (g_tasklist_lock) and
|
|
||||||
* restore the interrupt state as it was prior to the previous call to
|
|
||||||
* nxsched_lock_tasklist().
|
|
||||||
*
|
|
||||||
* NOTE: This API is used to protect tasklists in the scheduler. So do not
|
|
||||||
* use this API for other purposes.
|
|
||||||
*
|
|
||||||
* Input Parameters:
|
|
||||||
* lock - The architecture-specific value that represents the state of
|
|
||||||
* the interrupts prior to the call to nxsched_lock_tasklist().
|
|
||||||
*
|
|
||||||
* Returned Value:
|
|
||||||
* None
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
void nxsched_unlock_tasklist(irqstate_t lock)
|
|
||||||
{
|
|
||||||
int me;
|
|
||||||
|
|
||||||
me = this_cpu();
|
|
||||||
|
|
||||||
DEBUGASSERT(0 < g_tasklist_lock_count[me]);
|
|
||||||
g_tasklist_lock_count[me]--;
|
|
||||||
|
|
||||||
if (0 == g_tasklist_lock_count[me])
|
|
||||||
{
|
|
||||||
spin_unlock(&g_tasklist_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
up_irq_restore(lock);
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user