Correct naming of a TCB flag; update some comments
This commit is contained in:
parent
96223cedf1
commit
292d2fe648
@ -148,7 +148,7 @@
|
|||||||
# define TCB_FLAG_SCHED_RR (1 << TCB_FLAG_POLICY_SHIFT) /* Round robin scheding policy */
|
# define TCB_FLAG_SCHED_RR (1 << TCB_FLAG_POLICY_SHIFT) /* Round robin scheding policy */
|
||||||
# define TCB_FLAG_SCHED_SPORADIC (2 << TCB_FLAG_POLICY_SHIFT) /* Sporadic scheding policy */
|
# define TCB_FLAG_SCHED_SPORADIC (2 << TCB_FLAG_POLICY_SHIFT) /* Sporadic scheding policy */
|
||||||
# define TCB_FLAG_SCHED_OTHER (3 << TCB_FLAG_POLICY_SHIFT) /* Other scheding policy */
|
# define TCB_FLAG_SCHED_OTHER (3 << TCB_FLAG_POLICY_SHIFT) /* Other scheding policy */
|
||||||
#define TCB_FLAG_EXIT_ASSIGNED (1 << 6) /* Bit 6: Assigned to a CPU */
|
#define TCB_FLAG_CPU_ASSIGNED (1 << 6) /* Bit 6: Assigned to a CPU */
|
||||||
#define TCB_FLAG_EXIT_PROCESSING (1 << 7) /* Bit 7: Exitting */
|
#define TCB_FLAG_EXIT_PROCESSING (1 << 7) /* Bit 7: Exitting */
|
||||||
|
|
||||||
/* Values for struct task_group tg_flags */
|
/* Values for struct task_group tg_flags */
|
||||||
|
@ -275,6 +275,8 @@ extern volatile uint32_t g_cpuload_total;
|
|||||||
* Public Function Prototypes
|
* Public Function Prototypes
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
|
/* Task list manipulation functions */
|
||||||
|
|
||||||
bool sched_addreadytorun(FAR struct tcb_s *rtrtcb);
|
bool sched_addreadytorun(FAR struct tcb_s *rtrtcb);
|
||||||
bool sched_removereadytorun(FAR struct tcb_s *rtrtcb);
|
bool sched_removereadytorun(FAR struct tcb_s *rtrtcb);
|
||||||
bool sched_addprioritized(FAR struct tcb_s *newTcb, DSEG dq_queue_t *list);
|
bool sched_addprioritized(FAR struct tcb_s *newTcb, DSEG dq_queue_t *list);
|
||||||
@ -283,6 +285,8 @@ void sched_addblocked(FAR struct tcb_s *btcb, tstate_t task_state);
|
|||||||
void sched_removeblocked(FAR struct tcb_s *btcb);
|
void sched_removeblocked(FAR struct tcb_s *btcb);
|
||||||
int sched_setpriority(FAR struct tcb_s *tcb, int sched_priority);
|
int sched_setpriority(FAR struct tcb_s *tcb, int sched_priority);
|
||||||
|
|
||||||
|
/* Priority inheritance support */
|
||||||
|
|
||||||
#ifdef CONFIG_PRIORITY_INHERITANCE
|
#ifdef CONFIG_PRIORITY_INHERITANCE
|
||||||
int sched_reprioritize(FAR struct tcb_s *tcb, int sched_priority);
|
int sched_reprioritize(FAR struct tcb_s *tcb, int sched_priority);
|
||||||
#else
|
#else
|
||||||
@ -290,6 +294,8 @@ int sched_reprioritize(FAR struct tcb_s *tcb, int sched_priority);
|
|||||||
sched_setpriority(tcb,sched_priority)
|
sched_setpriority(tcb,sched_priority)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Support for tickless operation */
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_TICKLESS
|
#ifdef CONFIG_SCHED_TICKLESS
|
||||||
unsigned int sched_timer_cancel(void);
|
unsigned int sched_timer_cancel(void);
|
||||||
void sched_timer_resume(void);
|
void sched_timer_resume(void);
|
||||||
@ -300,6 +306,8 @@ void sched_timer_reassess(void);
|
|||||||
# define sched_timer_reassess()
|
# define sched_timer_reassess()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Scheduler policy support */
|
||||||
|
|
||||||
#if CONFIG_RR_INTERVAL > 0
|
#if CONFIG_RR_INTERVAL > 0
|
||||||
uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
|
uint32_t sched_roundrobin_process(FAR struct tcb_s *tcb, uint32_t ticks,
|
||||||
bool noswitches);
|
bool noswitches);
|
||||||
@ -317,10 +325,14 @@ uint32_t sched_sporadic_process(FAR struct tcb_s *tcb, uint32_t ticks,
|
|||||||
void sched_sporadic_lowpriority(FAR struct tcb_s *tcb);
|
void sched_sporadic_lowpriority(FAR struct tcb_s *tcb);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* CPU load measurement support */
|
||||||
|
|
||||||
#if defined(CONFIG_SCHED_CPULOAD) && !defined(CONFIG_SCHED_CPULOAD_EXTCLK)
|
#if defined(CONFIG_SCHED_CPULOAD) && !defined(CONFIG_SCHED_CPULOAD_EXTCLK)
|
||||||
void weak_function sched_process_cpuload(void);
|
void weak_function sched_process_cpuload(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* TCB operations */
|
||||||
|
|
||||||
bool sched_verifytcb(FAR struct tcb_s *tcb);
|
bool sched_verifytcb(FAR struct tcb_s *tcb);
|
||||||
int sched_releasetcb(FAR struct tcb_s *tcb, uint8_t ttype);
|
int sched_releasetcb(FAR struct tcb_s *tcb, uint8_t ttype);
|
||||||
|
|
||||||
|
@ -62,9 +62,9 @@ pid_t getpid(void)
|
|||||||
|
|
||||||
/* Get the the TCB at the head of the ready-to-run task list. That
|
/* Get the the TCB at the head of the ready-to-run task list. That
|
||||||
* will be the currently executing task. There is an exception to
|
* will be the currently executing task. There is an exception to
|
||||||
* this: Verify early in the start-up sequence, the g_readytorun
|
* this: Early in the start-up sequence, the ready-to-run list may be
|
||||||
* list may be empty! This case, of course, the start-up/IDLE thread
|
* empty! This case, of course, the start-up/IDLE thread with pid == 0
|
||||||
* with pid == 0 must be running.
|
* must be running.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
rtcb = this_task();
|
rtcb = this_task();
|
||||||
@ -78,7 +78,7 @@ pid_t getpid(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* We must have been called earlier in the start up sequence from the
|
/* We must have been called earlier in the start up sequence from the
|
||||||
* start-up/IDLE thread before the g_readytorun list has been initialized.
|
* start-up/IDLE thread before the ready-to-run list has been initialized.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user