diff --git a/include/nuttx/sched.h b/include/nuttx/sched.h index b25f64e219..7372db28b0 100644 --- a/include/nuttx/sched.h +++ b/include/nuttx/sched.h @@ -137,9 +137,9 @@ #define TCB_FLAG_TTYPE_SHIFT (0) /* Bits 0-1: thread type */ #define TCB_FLAG_TTYPE_MASK (3 << TCB_FLAG_TTYPE_SHIFT) -# define TCB_FLAG_TTYPE_TASK (0 << TCB_FLAG_TTYPE_SHIFT) /* Normal user task */ -# define TCB_FLAG_TTYPE_PTHREAD (1 << TCB_FLAG_TTYPE_SHIFT) /* User pthread */ -# define TCB_FLAG_TTYPE_KERNEL (2 << TCB_FLAG_TTYPE_SHIFT) /* Kernel thread */ +# define TCB_FLAG_TTYPE_TASK (0 << TCB_FLAG_TTYPE_SHIFT) /* Normal user task */ +# define TCB_FLAG_TTYPE_PTHREAD (1 << TCB_FLAG_TTYPE_SHIFT) /* User pthread */ +# define TCB_FLAG_TTYPE_KERNEL (2 << TCB_FLAG_TTYPE_SHIFT) /* Kernel thread */ #define TCB_FLAG_NONCANCELABLE (1 << 2) /* Bit 2: Pthread is non-cancelable */ #define TCB_FLAG_CANCEL_PENDING (1 << 3) /* Bit 3: Pthread cancel is pending */ #define TCB_FLAG_POLICY_SHIFT (4) /* Bit 4-5: Scheduling policy */ @@ -148,7 +148,8 @@ # define TCB_FLAG_SCHED_RR (1 << TCB_FLAG_POLICY_SHIFT) /* Round robin scheding policy */ # define TCB_FLAG_SCHED_SPORADIC (2 << TCB_FLAG_POLICY_SHIFT) /* Sporadic scheding policy */ # define TCB_FLAG_SCHED_OTHER (3 << TCB_FLAG_POLICY_SHIFT) /* Other scheding policy */ -#define TCB_FLAG_EXIT_PROCESSING (1 << 6) /* Bit 6: Exitting */ +#define TCB_FLAG_EXIT_ASSIGNED (1 << 6) /* Bit 6: Assigned to a CPU */ +#define TCB_FLAG_EXIT_PROCESSING (1 << 7) /* Bit 7: Exitting */ /* Values for struct task_group tg_flags */ @@ -547,6 +548,9 @@ struct tcb_s #endif uint8_t task_state; /* Current state of the thread */ +#ifdef CONFIG_SMP + uint8_t cpu; /* CPU index if running or assigned */ +#endif uint16_t flags; /* Misc. general status flags */ int16_t lockcount; /* 0=preemptable (not-locked) */ diff --git a/sched/init/os_start.c b/sched/init/os_start.c index a9696c8943..9bfcc9c7c2 100644 --- a/sched/init/os_start.c +++ b/sched/init/os_start.c @@ -328,6 +328,13 @@ void os_start(void) sq_init(&g_delayed_kufree); #endif +#ifdef CONFIG_SMP + for (i = 0; i < CONFIG_SMP_NCPUS; i++) + { + dq_init(&g_assignedtasks[i]); + } +#endif + /* Initialize the logic that determine unique process IDs. */ g_lastpid = 0; @@ -380,7 +387,15 @@ void os_start(void) /* Then add the idle task's TCB to the head of the ready to run list */ +#ifdef CONFIG_SMP + /* Use the list a ready-to-run tasks assigned to CPU0 */ + + dq_addfirst((FAR dq_entry_t *)&g_idletcb, (FAR dq_queue_t *)&g_assignedtasks[0]); +#else + /* Use the common, unassigned ready-to-run list */ + dq_addfirst((FAR dq_entry_t *)&g_idletcb, (FAR dq_queue_t *)&g_readytorun); +#endif /* Initialize the processor-specific portion of the TCB */ diff --git a/sched/sched/sched.h b/sched/sched/sched.h index e97e66f265..793de67a24 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -138,7 +138,7 @@ extern volatile dq_queue_t g_readytorun; #ifdef CONFIG_SMP /* In order to support SMP, the function of the g_readytorun list changes, - * The g_readytorun is still used but in the SMP cae it will contain only: + * The g_readytorun is still used but in the SMP case it will contain only: * * - Only tasks/threads that are eligible to run, but not currently running, * and