2016-02-10 20:49:27 +01:00
|
|
|
/****************************************************************************
|
2019-02-04 23:20:35 +01:00
|
|
|
* sched/init/nx_smpstart.c
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
2024-09-11 13:45:11 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
2021-02-08 16:33:58 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
2021-02-08 16:33:58 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
2021-02-08 16:33:58 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
2016-02-11 21:11:26 +01:00
|
|
|
#include <stdio.h>
|
2021-05-18 08:59:14 +02:00
|
|
|
#include <assert.h>
|
2016-02-10 20:49:27 +01:00
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#include <nuttx/arch.h>
|
|
|
|
#include <nuttx/kmalloc.h>
|
2016-02-11 21:11:26 +01:00
|
|
|
#include <nuttx/sched.h>
|
2016-05-17 20:06:32 +02:00
|
|
|
#include <nuttx/sched_note.h>
|
sched: change nxsched_islocked_global to nxsched_islocked_tcb
reason:
1 To improve efficiency, we mimic Linux's behavior where preemption disabling is only applicable to the current CPU and does not affect other CPUs.
2 In the future, we will implement "spinlock+sched_lock", and use it extensively. Under such circumstances, if preemption is still globally disabled, it will seriously impact the scheduling efficiency.
3 We have removed g_cpu_lockset and used irqcount in order to eliminate the dependency of schedlock on critical sections in the future, simplify the logic, and further enhance the performance of sched_lock.
4 We set lockcount to 1 in order to lock scheduling on all CPUs during startup, without the need to provide additional functions to disable scheduling on other CPUs.
5 Cpu1~n must wait for cpu0 to enter the idle state before enabling scheduling because it prevents CPUs1~n from competing with cpu0 for the memory manager mutex, which could cause the cpu0 idle task to enter a wait state and trigger an assert.
size nuttx
before:
text data bss dec hex filename
265396 51057 63646 380099 5ccc3 nuttx
after:
text data bss dec hex filename
265184 51057 63642 379883 5cbeb nuttx
size -216
Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-armv8a:nsh_smp
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \
-machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con \
-mon chardev=con,mode=readline -kernel ./nuttx
Signed-off-by: hujun5 <hujun5@xiaomi.com>
2024-05-13 13:33:01 +02:00
|
|
|
#include <nuttx/init.h>
|
2016-02-10 20:49:27 +01:00
|
|
|
|
2016-02-11 21:11:26 +01:00
|
|
|
#include "group/group.h"
|
|
|
|
#include "sched/sched.h"
|
2016-02-11 19:18:54 +01:00
|
|
|
#include "init/init.h"
|
2016-02-10 20:49:27 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
/****************************************************************************
|
2016-03-12 22:29:33 +01:00
|
|
|
* Public Functions
|
2016-02-10 20:49:27 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2019-02-04 23:20:35 +01:00
|
|
|
* Name: nx_idle_trampoline
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2016-03-13 14:16:56 +01:00
|
|
|
* This is the common start-up logic for the IDLE task for CPUs 1 through
|
|
|
|
* (CONFIG_SMP_NCPUS-1). Having a start-up function such as this for the
|
|
|
|
* IDLE is not really an architectural necessity. It is used only for
|
2024-08-21 07:47:14 +02:00
|
|
|
* symmetry with how other threads are started (see nxtask_start() and
|
2016-03-13 14:16:56 +01:00
|
|
|
* pthread_start()).
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
2016-03-13 14:16:56 +01:00
|
|
|
* None.
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* This function does not return.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2019-02-04 23:20:35 +01:00
|
|
|
void nx_idle_trampoline(void)
|
2016-03-12 22:29:33 +01:00
|
|
|
{
|
2024-04-22 06:22:22 +02:00
|
|
|
#ifdef CONFIG_SCHED_INSTRUMENTATION_SWITCH
|
2016-03-17 16:49:43 +01:00
|
|
|
FAR struct tcb_s *tcb = this_task();
|
2016-03-13 14:16:56 +01:00
|
|
|
|
2016-03-17 16:49:43 +01:00
|
|
|
/* Announce that the IDLE task has started */
|
|
|
|
|
|
|
|
sched_note_start(tcb);
|
|
|
|
#endif
|
|
|
|
|
sched: change nxsched_islocked_global to nxsched_islocked_tcb
reason:
1 To improve efficiency, we mimic Linux's behavior where preemption disabling is only applicable to the current CPU and does not affect other CPUs.
2 In the future, we will implement "spinlock+sched_lock", and use it extensively. Under such circumstances, if preemption is still globally disabled, it will seriously impact the scheduling efficiency.
3 We have removed g_cpu_lockset and used irqcount in order to eliminate the dependency of schedlock on critical sections in the future, simplify the logic, and further enhance the performance of sched_lock.
4 We set lockcount to 1 in order to lock scheduling on all CPUs during startup, without the need to provide additional functions to disable scheduling on other CPUs.
5 Cpu1~n must wait for cpu0 to enter the idle state before enabling scheduling because it prevents CPUs1~n from competing with cpu0 for the memory manager mutex, which could cause the cpu0 idle task to enter a wait state and trigger an assert.
size nuttx
before:
text data bss dec hex filename
265396 51057 63646 380099 5ccc3 nuttx
after:
text data bss dec hex filename
265184 51057 63642 379883 5cbeb nuttx
size -216
Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-armv8a:nsh_smp
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \
-machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con \
-mon chardev=con,mode=readline -kernel ./nuttx
Signed-off-by: hujun5 <hujun5@xiaomi.com>
2024-05-13 13:33:01 +02:00
|
|
|
/* wait until cpu0 in idle() */
|
|
|
|
|
|
|
|
while (!OSINIT_IDLELOOP());
|
|
|
|
|
|
|
|
sched_unlock();
|
|
|
|
|
2016-02-11 19:18:54 +01:00
|
|
|
/* Enter the IDLE loop */
|
2016-02-11 15:01:09 +01:00
|
|
|
|
2016-06-12 00:42:42 +02:00
|
|
|
sinfo("CPU%d: Beginning Idle Loop\n", this_cpu());
|
2016-02-17 02:33:22 +01:00
|
|
|
|
2016-02-10 20:49:27 +01:00
|
|
|
for (; ; )
|
|
|
|
{
|
|
|
|
/* Perform any processor-specific idle state operations */
|
|
|
|
|
|
|
|
up_idle();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
2019-02-04 23:20:35 +01:00
|
|
|
* Name: nx_smp_start
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2018-08-19 19:19:43 +02:00
|
|
|
* In an SMP configuration, only one CPU is initially active (CPU 0).
|
|
|
|
* System initialization occurs on that single thread. At the completion
|
|
|
|
* of the initialization of the OS, just before beginning normal
|
|
|
|
* multitasking, the additional CPUs would be started by calling this
|
|
|
|
* function.
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Returned Value:
|
2018-09-14 14:55:45 +02:00
|
|
|
* Zero on success; a negated errno value on failure.
|
2016-02-10 20:49:27 +01:00
|
|
|
*
|
2016-02-11 21:11:26 +01:00
|
|
|
* Assumption:
|
|
|
|
* Runs before the full initialization sequence has completed. Runs after
|
|
|
|
* all OS facilities are set up, but before multi-tasking has been started.
|
|
|
|
*
|
2016-02-10 20:49:27 +01:00
|
|
|
****************************************************************************/
|
|
|
|
|
2019-02-04 23:20:35 +01:00
|
|
|
int nx_smp_start(void)
|
2016-02-10 20:49:27 +01:00
|
|
|
{
|
|
|
|
int ret;
|
2016-02-11 21:11:26 +01:00
|
|
|
int cpu;
|
|
|
|
|
2023-04-27 14:56:20 +02:00
|
|
|
/* Flush dcache before start other CPUs. */
|
|
|
|
|
|
|
|
up_flush_dcache_all();
|
|
|
|
|
2021-07-15 12:25:48 +02:00
|
|
|
/* Start all of the other CPUs. CPU0 is already running. */
|
2016-02-10 20:49:27 +01:00
|
|
|
|
2016-02-13 16:04:54 +01:00
|
|
|
for (cpu = 1; cpu < CONFIG_SMP_NCPUS; cpu++)
|
2016-02-10 20:49:27 +01:00
|
|
|
{
|
2016-03-12 22:29:33 +01:00
|
|
|
/* Start the CPU */
|
2016-02-11 21:11:26 +01:00
|
|
|
|
2016-03-12 22:29:33 +01:00
|
|
|
ret = up_cpu_start(cpu);
|
2016-02-10 20:49:27 +01:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-06-11 23:50:49 +02:00
|
|
|
serr("ERROR: Failed to start CPU%d: %d\n", cpu, ret);
|
2016-02-10 20:49:27 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SMP */
|