sched: change nxsched_islocked_global to nxsched_islocked_tcb
reason: 1 To improve efficiency, we mimic Linux's behavior where preemption disabling is only applicable to the current CPU and does not affect other CPUs. 2 In the future, we will implement "spinlock+sched_lock", and use it extensively. Under such circumstances, if preemption is still globally disabled, it will seriously impact the scheduling efficiency. 3 We have removed g_cpu_lockset and used irqcount in order to eliminate the dependency of schedlock on critical sections in the future, simplify the logic, and further enhance the performance of sched_lock. 4 We set lockcount to 1 in order to lock scheduling on all CPUs during startup, without the need to provide additional functions to disable scheduling on other CPUs. 5 Cpu1~n must wait for cpu0 to enter the idle state before enabling scheduling because it prevents CPUs1~n from competing with cpu0 for the memory manager mutex, which could cause the cpu0 idle task to enter a wait state and trigger an assert. size nuttx before: text data bss dec hex filename 265396 51057 63646 380099 5ccc3 nuttx after: text data bss dec hex filename 265184 51057 63642 379883 5cbeb nuttx size -216 Configuring NuttX and compile: $ ./tools/configure.sh -l qemu-armv8a:nsh_smp $ make Running with qemu $ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \ -machine virt,virtualization=on,gic-version=3 \ -net none -chardev stdio,id=con,mux=on -serial chardev:con \ -mon chardev=con,mode=readline -kernel ./nuttx Signed-off-by: hujun5 <hujun5@xiaomi.com>
This commit is contained in:
parent
1237e9fcea
commit
508c5889d7
@ -89,7 +89,7 @@ extern "C"
|
|||||||
* hardware resources may not yet be available to the OS-internal logic.
|
* hardware resources may not yet be available to the OS-internal logic.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
EXTERN uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
EXTERN volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Function Prototypes
|
* Public Function Prototypes
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
#include <nuttx/kmalloc.h>
|
#include <nuttx/kmalloc.h>
|
||||||
#include <nuttx/sched.h>
|
#include <nuttx/sched.h>
|
||||||
#include <nuttx/sched_note.h>
|
#include <nuttx/sched_note.h>
|
||||||
|
#include <nuttx/init.h>
|
||||||
|
|
||||||
#include "group/group.h"
|
#include "group/group.h"
|
||||||
#include "sched/sched.h"
|
#include "sched/sched.h"
|
||||||
@ -74,6 +75,12 @@ void nx_idle_trampoline(void)
|
|||||||
sched_note_start(tcb);
|
sched_note_start(tcb);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* wait until cpu0 in idle() */
|
||||||
|
|
||||||
|
while (!OSINIT_IDLELOOP());
|
||||||
|
|
||||||
|
sched_unlock();
|
||||||
|
|
||||||
/* Enter the IDLE loop */
|
/* Enter the IDLE loop */
|
||||||
|
|
||||||
sinfo("CPU%d: Beginning Idle Loop\n", this_cpu());
|
sinfo("CPU%d: Beginning Idle Loop\n", this_cpu());
|
||||||
|
@ -195,7 +195,7 @@ struct tasklist_s g_tasklisttable[NUM_TASK_STATES];
|
|||||||
* hardware resources may not yet be available to the kernel logic.
|
* hardware resources may not yet be available to the kernel logic.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
volatile uint8_t g_nx_initstate; /* See enum nx_initstate_e */
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Private Data
|
* Private Data
|
||||||
@ -361,6 +361,7 @@ static void idle_task_initialize(void)
|
|||||||
|
|
||||||
tcb->pid = i;
|
tcb->pid = i;
|
||||||
tcb->task_state = TSTATE_TASK_RUNNING;
|
tcb->task_state = TSTATE_TASK_RUNNING;
|
||||||
|
tcb->lockcount = 1;
|
||||||
|
|
||||||
/* Set the entry point. This is only for debug purposes. NOTE: that
|
/* Set the entry point. This is only for debug purposes. NOTE: that
|
||||||
* the start_t entry point is not saved. That is acceptable, however,
|
* the start_t entry point is not saved. That is acceptable, however,
|
||||||
@ -628,13 +629,6 @@ void nx_start(void)
|
|||||||
|
|
||||||
task_initialize();
|
task_initialize();
|
||||||
|
|
||||||
/* Disables context switching because we need take the memory manager
|
|
||||||
* semaphore on this CPU so that it will not be available on the other
|
|
||||||
* CPUs until we have finished initialization.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sched_lock();
|
|
||||||
|
|
||||||
/* Initialize the instrument function */
|
/* Initialize the instrument function */
|
||||||
|
|
||||||
instrument_initialize();
|
instrument_initialize();
|
||||||
|
@ -297,9 +297,6 @@ extern volatile clock_t g_cpuload_total;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
|
||||||
|
|
||||||
extern volatile cpu_set_t g_cpu_lockset;
|
|
||||||
|
|
||||||
/* This is the spinlock that enforces critical sections when interrupts are
|
/* This is the spinlock that enforces critical sections when interrupts are
|
||||||
* disabled.
|
* disabled.
|
||||||
@ -406,16 +403,13 @@ static inline_function FAR struct tcb_s *this_task(void)
|
|||||||
int nxsched_select_cpu(cpu_set_t affinity);
|
int nxsched_select_cpu(cpu_set_t affinity);
|
||||||
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
|
int nxsched_pause_cpu(FAR struct tcb_s *tcb);
|
||||||
void nxsched_process_delivered(int cpu);
|
void nxsched_process_delivered(int cpu);
|
||||||
|
|
||||||
# define nxsched_islocked_global() (g_cpu_lockset != 0)
|
|
||||||
# define nxsched_islocked_tcb(tcb) nxsched_islocked_global()
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
# define nxsched_select_cpu(a) (0)
|
# define nxsched_select_cpu(a) (0)
|
||||||
# define nxsched_pause_cpu(t) (-38) /* -ENOSYS */
|
# define nxsched_pause_cpu(t) (-38) /* -ENOSYS */
|
||||||
# define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define nxsched_islocked_tcb(tcb) ((tcb)->lockcount > 0)
|
||||||
|
|
||||||
/* CPU load measurement support */
|
/* CPU load measurement support */
|
||||||
|
|
||||||
#if defined(CONFIG_SCHED_CPULOAD_SYSCLK) || \
|
#if defined(CONFIG_SCHED_CPULOAD_SYSCLK) || \
|
||||||
|
@ -194,7 +194,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
|||||||
* situation.
|
* situation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (nxsched_islocked_global())
|
if (nxsched_islocked_tcb(this_task()))
|
||||||
{
|
{
|
||||||
/* Add the new ready-to-run task to the g_pendingtasks task list for
|
/* Add the new ready-to-run task to the g_pendingtasks task list for
|
||||||
* now.
|
* now.
|
||||||
@ -275,14 +275,6 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
|
|||||||
btcb->task_state = TSTATE_TASK_RUNNING;
|
btcb->task_state = TSTATE_TASK_RUNNING;
|
||||||
|
|
||||||
doswitch = true;
|
doswitch = true;
|
||||||
|
|
||||||
/* Resume scheduling lock */
|
|
||||||
|
|
||||||
DEBUGASSERT(g_cpu_lockset == 0);
|
|
||||||
if (btcb->lockcount > 0)
|
|
||||||
{
|
|
||||||
g_cpu_lockset |= (1 << cpu);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return doswitch;
|
return doswitch;
|
||||||
|
@ -42,30 +42,6 @@
|
|||||||
* Public Data
|
* Public Data
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
/* Pre-emption is disabled via the interface sched_lock(). sched_lock()
|
|
||||||
* works by preventing context switches from the currently executing tasks.
|
|
||||||
* This prevents other tasks from running (without disabling interrupts) and
|
|
||||||
* gives the currently executing task exclusive access to the (single) CPU
|
|
||||||
* resources. Thus, sched_lock() and its companion, sched_unlock(), are
|
|
||||||
* used to implement some critical sections.
|
|
||||||
*
|
|
||||||
* In the single CPU case, pre-emption is disabled using a simple lockcount
|
|
||||||
* in the TCB. When the scheduling is locked, the lockcount is incremented;
|
|
||||||
* when the scheduler is unlocked, the lockcount is decremented. If the
|
|
||||||
* lockcount for the task at the head of the g_readytorun list has a
|
|
||||||
* lockcount > 0, then pre-emption is disabled.
|
|
||||||
*
|
|
||||||
* No special protection is required since only the executing task can
|
|
||||||
* modify its lockcount.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Used to keep track of which CPU(s) hold the IRQ lock. */
|
|
||||||
|
|
||||||
volatile cpu_set_t g_cpu_lockset;
|
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Functions
|
* Public Functions
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
@ -93,7 +69,6 @@ volatile cpu_set_t g_cpu_lockset;
|
|||||||
int sched_lock(void)
|
int sched_lock(void)
|
||||||
{
|
{
|
||||||
FAR struct tcb_s *rtcb;
|
FAR struct tcb_s *rtcb;
|
||||||
int cpu;
|
|
||||||
|
|
||||||
/* If the CPU supports suppression of interprocessor interrupts, then
|
/* If the CPU supports suppression of interprocessor interrupts, then
|
||||||
* simple disabling interrupts will provide sufficient protection for
|
* simple disabling interrupts will provide sufficient protection for
|
||||||
@ -118,36 +93,9 @@ int sched_lock(void)
|
|||||||
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
|
DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
|
||||||
|
|
||||||
flags = enter_critical_section();
|
flags = enter_critical_section();
|
||||||
cpu = this_cpu();
|
|
||||||
|
|
||||||
/* We must hold the lock on this CPU before we increment the lockcount
|
|
||||||
* for the first time. Holding the lock is sufficient to lockout
|
|
||||||
* context switching.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (rtcb->lockcount == 0)
|
|
||||||
{
|
|
||||||
/* We don't have the scheduler locked. But logic running on a
|
|
||||||
* different CPU may have the scheduler locked. It is not
|
|
||||||
* possible for some other task on this CPU to have the scheduler
|
|
||||||
* locked (or we would not be executing!).
|
|
||||||
*/
|
|
||||||
|
|
||||||
DEBUGASSERT((g_cpu_lockset & (1 << cpu)) == 0);
|
|
||||||
g_cpu_lockset |= (1 << cpu);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* If this thread already has the scheduler locked, then
|
|
||||||
* g_cpu_lockset should indicate that the scheduler is locked
|
|
||||||
* and g_cpu_lockset should include the bit setting for this CPU.
|
|
||||||
*/
|
|
||||||
|
|
||||||
DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* A counter is used to support locking. This allows nested lock
|
/* A counter is used to support locking. This allows nested lock
|
||||||
* operations on this thread (on any CPU)
|
* operations on this thread
|
||||||
*/
|
*/
|
||||||
|
|
||||||
rtcb->lockcount++;
|
rtcb->lockcount++;
|
||||||
|
@ -199,7 +199,7 @@ bool nxsched_merge_pending(void)
|
|||||||
* some CPU other than this one is in a critical section.
|
* some CPU other than this one is in a critical section.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!nxsched_islocked_global())
|
if (!nxsched_islocked_tcb(this_task()))
|
||||||
{
|
{
|
||||||
/* Find the CPU that is executing the lowest priority task */
|
/* Find the CPU that is executing the lowest priority task */
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ bool nxsched_merge_pending(void)
|
|||||||
* Check if that happened.
|
* Check if that happened.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (nxsched_islocked_global())
|
if (nxsched_islocked_tcb(this_task()))
|
||||||
{
|
{
|
||||||
/* Yes.. then we may have incorrectly placed some TCBs in the
|
/* Yes.. then we may have incorrectly placed some TCBs in the
|
||||||
* g_readytorun list (unlikely, but possible). We will have to
|
* g_readytorun list (unlikely, but possible). We will have to
|
||||||
|
@ -84,9 +84,10 @@ void nxsched_process_delivered(int cpu)
|
|||||||
g_cpu_irqset |= (1 << cpu);
|
g_cpu_irqset |= (1 << cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tcb = current_task(cpu);
|
||||||
|
|
||||||
if (g_delivertasks[cpu] == NULL)
|
if (g_delivertasks[cpu] == NULL)
|
||||||
{
|
{
|
||||||
tcb = current_task(cpu);
|
|
||||||
if (tcb->irqcount <= 0)
|
if (tcb->irqcount <= 0)
|
||||||
{
|
{
|
||||||
cpu_irqlock_clear();
|
cpu_irqlock_clear();
|
||||||
@ -95,13 +96,12 @@ void nxsched_process_delivered(int cpu)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nxsched_islocked_global())
|
if (nxsched_islocked_tcb(tcb))
|
||||||
{
|
{
|
||||||
btcb = g_delivertasks[cpu];
|
btcb = g_delivertasks[cpu];
|
||||||
g_delivertasks[cpu] = NULL;
|
g_delivertasks[cpu] = NULL;
|
||||||
nxsched_add_prioritized(btcb, &g_pendingtasks);
|
nxsched_add_prioritized(btcb, &g_pendingtasks);
|
||||||
btcb->task_state = TSTATE_TASK_PENDING;
|
btcb->task_state = TSTATE_TASK_PENDING;
|
||||||
tcb = current_task(cpu);
|
|
||||||
if (tcb->irqcount <= 0)
|
if (tcb->irqcount <= 0)
|
||||||
{
|
{
|
||||||
cpu_irqlock_clear();
|
cpu_irqlock_clear();
|
||||||
@ -111,9 +111,8 @@ void nxsched_process_delivered(int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
btcb = g_delivertasks[cpu];
|
btcb = g_delivertasks[cpu];
|
||||||
tasklist = &g_assignedtasks[cpu];
|
|
||||||
|
|
||||||
for (next = (FAR struct tcb_s *)tasklist->head;
|
for (next = tcb;
|
||||||
(next && btcb->sched_priority <= next->sched_priority);
|
(next && btcb->sched_priority <= next->sched_priority);
|
||||||
next = next->flink);
|
next = next->flink);
|
||||||
|
|
||||||
@ -122,6 +121,7 @@ void nxsched_process_delivered(int cpu)
|
|||||||
{
|
{
|
||||||
/* Special case: Insert at the head of the list */
|
/* Special case: Insert at the head of the list */
|
||||||
|
|
||||||
|
tasklist = &g_assignedtasks[cpu];
|
||||||
dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist);
|
dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist);
|
||||||
btcb->cpu = cpu;
|
btcb->cpu = cpu;
|
||||||
btcb->task_state = TSTATE_TASK_RUNNING;
|
btcb->task_state = TSTATE_TASK_RUNNING;
|
||||||
@ -129,11 +129,6 @@ void nxsched_process_delivered(int cpu)
|
|||||||
DEBUGASSERT(btcb->flink != NULL);
|
DEBUGASSERT(btcb->flink != NULL);
|
||||||
DEBUGASSERT(next == btcb->flink);
|
DEBUGASSERT(next == btcb->flink);
|
||||||
next->task_state = TSTATE_TASK_ASSIGNED;
|
next->task_state = TSTATE_TASK_ASSIGNED;
|
||||||
|
|
||||||
if (btcb->lockcount > 0)
|
|
||||||
{
|
|
||||||
g_cpu_lockset |= (1 << cpu);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -262,23 +262,6 @@ void nxsched_remove_running(FAR struct tcb_s *tcb)
|
|||||||
nxttcb = rtrtcb;
|
nxttcb = rtrtcb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Will pre-emption be disabled after the switch? If the lockcount is
|
|
||||||
* greater than zero, then this task/this CPU holds the scheduler lock.
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (nxttcb->lockcount > 0)
|
|
||||||
{
|
|
||||||
/* Yes... make sure that scheduling logic knows about this */
|
|
||||||
|
|
||||||
g_cpu_lockset |= (1 << cpu);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* No.. we may need to perform release our hold on the lock. */
|
|
||||||
|
|
||||||
g_cpu_lockset &= ~(1 << cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
|
/* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
|
||||||
* controls will be done in the pause handler on the new CPU(cpu).
|
* controls will be done in the pause handler on the new CPU(cpu).
|
||||||
* If the task is scheduled on this CPU(me), do nothing because
|
* If the task is scheduled on this CPU(me), do nothing because
|
||||||
|
@ -70,7 +70,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb)
|
|||||||
* then use the 'nxttcb' which will probably be the IDLE thread.
|
* then use the 'nxttcb' which will probably be the IDLE thread.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!nxsched_islocked_global())
|
if (!nxsched_islocked_tcb(this_task()))
|
||||||
{
|
{
|
||||||
/* Search for the highest priority task that can run on tcb->cpu. */
|
/* Search for the highest priority task that can run on tcb->cpu. */
|
||||||
|
|
||||||
|
@ -77,12 +77,11 @@ int sched_unlock(void)
|
|||||||
irqstate_t flags = enter_critical_section();
|
irqstate_t flags = enter_critical_section();
|
||||||
int cpu = this_cpu();
|
int cpu = this_cpu();
|
||||||
|
|
||||||
|
DEBUGASSERT(rtcb->lockcount > 0);
|
||||||
|
|
||||||
/* Decrement the preemption lock counter */
|
/* Decrement the preemption lock counter */
|
||||||
|
|
||||||
if (rtcb->lockcount > 0)
|
rtcb->lockcount--;
|
||||||
{
|
|
||||||
rtcb->lockcount--;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the lock counter has decremented to zero. If so,
|
/* Check if the lock counter has decremented to zero. If so,
|
||||||
* then pre-emption has been re-enabled.
|
* then pre-emption has been re-enabled.
|
||||||
@ -103,14 +102,6 @@ int sched_unlock(void)
|
|||||||
|
|
||||||
rtcb->lockcount = 0;
|
rtcb->lockcount = 0;
|
||||||
|
|
||||||
/* The lockcount has decremented to zero and we need to perform
|
|
||||||
* release our hold on the lock.
|
|
||||||
*/
|
|
||||||
|
|
||||||
DEBUGASSERT((g_cpu_lockset & (1 << cpu)) != 0);
|
|
||||||
|
|
||||||
g_cpu_lockset &= ~(1 << cpu);
|
|
||||||
|
|
||||||
/* Release any ready-to-run tasks that have collected in
|
/* Release any ready-to-run tasks that have collected in
|
||||||
* g_pendingtasks.
|
* g_pendingtasks.
|
||||||
*
|
*
|
||||||
@ -137,7 +128,7 @@ int sched_unlock(void)
|
|||||||
* BEFORE it clears IRQ lock.
|
* BEFORE it clears IRQ lock.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (!nxsched_islocked_global() &&
|
if (!nxsched_islocked_tcb(rtcb) &&
|
||||||
list_pendingtasks()->head != NULL)
|
list_pendingtasks()->head != NULL)
|
||||||
{
|
{
|
||||||
if (nxsched_merge_pending())
|
if (nxsched_merge_pending())
|
||||||
@ -211,6 +202,7 @@ int sched_unlock(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
UNUSED(cpu);
|
||||||
leave_critical_section(flags);
|
leave_critical_section(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,12 +226,11 @@ int sched_unlock(void)
|
|||||||
|
|
||||||
irqstate_t flags = enter_critical_section();
|
irqstate_t flags = enter_critical_section();
|
||||||
|
|
||||||
|
DEBUGASSERT(rtcb->lockcount > 0);
|
||||||
|
|
||||||
/* Decrement the preemption lock counter */
|
/* Decrement the preemption lock counter */
|
||||||
|
|
||||||
if (rtcb->lockcount > 0)
|
rtcb->lockcount--;
|
||||||
{
|
|
||||||
rtcb->lockcount--;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if the lock counter has decremented to zero. If so,
|
/* Check if the lock counter has decremented to zero. If so,
|
||||||
* then pre-emption has been re-enabled.
|
* then pre-emption has been re-enabled.
|
||||||
|
@ -137,12 +137,6 @@ int nxtask_exit(void)
|
|||||||
|
|
||||||
rtcb->lockcount++;
|
rtcb->lockcount++;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* Make sure that the system knows about the locked state */
|
|
||||||
|
|
||||||
g_cpu_lockset |= (1 << cpu);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
rtcb->task_state = TSTATE_TASK_READYTORUN;
|
rtcb->task_state = TSTATE_TASK_READYTORUN;
|
||||||
|
|
||||||
/* Move the TCB to the specified blocked task list and delete it. Calling
|
/* Move the TCB to the specified blocked task list and delete it. Calling
|
||||||
@ -177,14 +171,5 @@ int nxtask_exit(void)
|
|||||||
|
|
||||||
rtcb->lockcount--;
|
rtcb->lockcount--;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (rtcb->lockcount == 0)
|
|
||||||
{
|
|
||||||
/* Make sure that the system knows about the unlocked state */
|
|
||||||
|
|
||||||
g_cpu_lockset &= ~(1 << cpu);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user