diff --git a/sched/sched/sched_cpuload.c b/sched/sched/sched_cpuload.c index ad6c999a23..c744164099 100644 --- a/sched/sched/sched_cpuload.c +++ b/sched/sched/sched_cpuload.c @@ -1,7 +1,7 @@ /**************************************************************************** * sched/sched/sched_cpuload.c * - * Copyright (C) 2014 Gregory Nutt. All rights reserved. + * Copyright (C) 2014, 2019 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -105,7 +105,7 @@ volatile uint32_t g_cpuload_total; ****************************************************************************/ /**************************************************************************** - * Name: sched_cpu_process_cpuload + * Name: nxsched_cpu_process_cpuload * * Description: * Collect data that can be used for CPU load measurements. @@ -122,7 +122,7 @@ volatile uint32_t g_cpuload_total; * ****************************************************************************/ -static inline void sched_cpu_process_cpuload(int cpu) +static inline void nxsched_cpu_process_cpuload(int cpu) { FAR struct tcb_s *rtcb = current_task(cpu); int hash_index; @@ -151,7 +151,7 @@ static inline void sched_cpu_process_cpuload(int cpu) ****************************************************************************/ /**************************************************************************** - * Name: sched_process_cpuload + * Name: nxsched_process_cpuload * * Description: * Collect data that can be used for CPU load measurements. @@ -168,7 +168,7 @@ static inline void sched_cpu_process_cpuload(int cpu) * ****************************************************************************/ -void weak_function sched_process_cpuload(void) +void weak_function nxsched_process_cpuload(void) { int i; @@ -180,13 +180,13 @@ void weak_function sched_process_cpuload(void) flags = enter_critical_section(); for (i = 0; i < CONFIG_SMP_NCPUS; i++) { - sched_cpu_process_cpuload(i); + nxsched_cpu_process_cpuload(i); } #else /* Perform scheduler operations on the single CPU. */ - sched_cpu_process_cpuload(0); + nxsched_cpu_process_cpuload(0); #endif diff --git a/sched/sched/sched_cpuload_oneshot.c b/sched/sched/sched_cpuload_oneshot.c index 97b7b869dc..6fc6c75072 100644 --- a/sched/sched/sched_cpuload_oneshot.c +++ b/sched/sched/sched_cpuload_oneshot.c @@ -1,7 +1,7 @@ /**************************************************************************** * sched/sched/sched_cpuload_oneshot.c * - * Copyright (C) 2016-2017 Gregory Nutt. All rights reserved. + * Copyright (C) 2016-2017, 2019 Gregory Nutt. All rights reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -119,9 +119,9 @@ struct sched_oneshot_s * Private Function Prototypes ****************************************************************************/ -static void sched_oneshot_start(void); -static void sched_oneshot_callback(FAR struct oneshot_lowerhalf_s *lower, - FAR void *arg); +static void nxsched_oneshot_start(void); +static void nxsched_oneshot_callback(FAR struct oneshot_lowerhalf_s *lower, + FAR void *arg); /**************************************************************************** * Private Data @@ -134,7 +134,7 @@ static struct sched_oneshot_s g_sched_oneshot; ****************************************************************************/ /**************************************************************************** - * Name: sched_oneshot_start + * Name: nxsched_oneshot_start * * Description: * [Re-]start the oneshot timer, applying entropy as configured @@ -149,7 +149,7 @@ static struct sched_oneshot_s g_sched_oneshot; * ****************************************************************************/ -static void sched_oneshot_start(void) +static void nxsched_oneshot_start(void) { struct timespec ts; #if CONFIG_CPULOAD_ENTROPY > 0 @@ -203,12 +203,12 @@ static void sched_oneshot_start(void) ts.tv_sec = secs; ts.tv_nsec = 1000 * usecs; - DEBUGVERIFY(ONESHOT_START(g_sched_oneshot.oneshot, sched_oneshot_callback, - NULL, &ts)); + DEBUGVERIFY(ONESHOT_START(g_sched_oneshot.oneshot, + nxsched_oneshot_callback, NULL, &ts)); } /**************************************************************************** - * Name: sched_oneshot_callback + * Name: nxsched_oneshot_callback * * Description: * This is the callback function that will be invoked when the oneshot @@ -223,8 +223,8 @@ static void sched_oneshot_start(void) * ****************************************************************************/ -static void sched_oneshot_callback(FAR struct oneshot_lowerhalf_s *lower, - FAR void *arg) +static void nxsched_oneshot_callback(FAR struct oneshot_lowerhalf_s *lower, + FAR void *arg) { /* Perform CPU load measurements */ @@ -237,7 +237,7 @@ static void sched_oneshot_callback(FAR struct oneshot_lowerhalf_s *lower, /* Then restart the oneshot */ - sched_oneshot_start(); + nxsched_oneshot_start(); } /**************************************************************************** @@ -299,6 +299,6 @@ void sched_oneshot_extclk(FAR struct oneshot_lowerhalf_s *lower) /* Then start the oneshot */ g_sched_oneshot.oneshot = lower; - sched_oneshot_start(); + nxsched_oneshot_start(); } #endif diff --git a/sched/sched/sched_cpuload_period.c b/sched/sched/sched_cpuload_period.c index 932df76dcb..613c9db29c 100644 --- a/sched/sched/sched_cpuload_period.c +++ b/sched/sched/sched_cpuload_period.c @@ -115,8 +115,8 @@ struct sched_period_s * Private Function Prototypes ****************************************************************************/ -static bool sched_period_callback(FAR uint32_t *next_interval_us, - FAR void *arg); +static bool nxsched_period_callback(FAR uint32_t *next_interval_us, + FAR void *arg); /**************************************************************************** * Private Data @@ -131,7 +131,7 @@ static struct sched_period_s g_sched_period; ****************************************************************************/ /**************************************************************************** - * Name: sched_period_callback + * Name: nxsched_period_callback * * Description: * This is the callback function that will be invoked when the period @@ -146,8 +146,8 @@ static struct sched_period_s g_sched_period; * ****************************************************************************/ -static bool sched_period_callback(FAR uint32_t *next_interval_us, - FAR void *arg) +static bool nxsched_period_callback(FAR uint32_t *next_interval_us, + FAR void *arg) { /* Get the next delay */ @@ -243,7 +243,7 @@ void sched_period_extclk(FAR struct timer_lowerhalf_s *lower) /* Then start the period timer */ - lower->ops->setcallback(lower, sched_period_callback, NULL); + lower->ops->setcallback(lower, nxsched_period_callback, NULL); lower->ops->settimeout(lower, CPULOAD_PERIOD_NOMINAL); lower->ops->start(lower); } diff --git a/sched/sched/sched_garbage.c b/sched/sched/sched_garbage.c index 68248516c1..01d2c3a42d 100644 --- a/sched/sched/sched_garbage.c +++ b/sched/sched/sched_garbage.c @@ -48,7 +48,7 @@ ****************************************************************************/ /**************************************************************************** - * Name: sched_kucleanup + * Name: nxsched_kucleanup * * Description: * Clean-up deferred de-allocations of user memory @@ -61,7 +61,7 @@ * ****************************************************************************/ -static inline void sched_kucleanup(void) +static inline void nxsched_kucleanup(void) { #ifdef CONFIG_BUILD_KERNEL /* REVISIT: It is not safe to defer user allocation in the kernel mode @@ -105,7 +105,7 @@ static inline void sched_kucleanup(void) } /**************************************************************************** - * Name: sched_have_kugarbage + * Name: nxsched_have_kugarbage * * Description: * Return TRUE if there is user heap garbage to be collected. @@ -119,16 +119,16 @@ static inline void sched_kucleanup(void) ****************************************************************************/ #ifndef CONFIG_BUILD_KERNEL -static inline bool sched_have_kugarbage(void) +static inline bool nxsched_have_kugarbage(void) { return (g_delayed_kufree.head != NULL); } #else -# define sched_have_kugarbage() false +# define nxsched_have_kugarbage() false #endif /**************************************************************************** - * Name: sched_kcleanup + * Name: nxsched_kcleanup * * Description: * Clean-up deferred de-allocations of kernel memory @@ -143,7 +143,7 @@ static inline bool sched_have_kugarbage(void) #if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \ defined(CONFIG_MM_KERNEL_HEAP) -static inline void sched_kcleanup(void) +static inline void nxsched_kcleanup(void) { irqstate_t flags; FAR void *address; @@ -175,11 +175,11 @@ static inline void sched_kcleanup(void) } } #else -# define sched_kcleanup() +# define nxsched_kcleanup() #endif /**************************************************************************** - * Name: sched_have_kgarbage + * Name: nxsched_have_kgarbage * * Description: * Return TRUE if there is kernal heap garbage to be collected. @@ -194,12 +194,12 @@ static inline void sched_kcleanup(void) #if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \ defined(CONFIG_MM_KERNEL_HEAP) -static inline bool sched_have_kgarbage(void) +static inline bool nxsched_have_kgarbage(void) { return (g_delayed_kfree.head != NULL); } #else -# define sched_have_kgarbage() false +# define nxsched_have_kgarbage() false #endif /**************************************************************************** @@ -232,11 +232,11 @@ void sched_garbage_collection(void) { /* Handle deferred deallocations for the kernel heap */ - sched_kcleanup(); + nxsched_kcleanup(); /* Handle deferred deallocations for the user heap */ - sched_kucleanup(); + nxsched_kucleanup(); /* Handle the architecure-specific garbage collection */ @@ -268,6 +268,6 @@ void sched_garbage_collection(void) bool sched_have_garbage(void) { - return (sched_have_kgarbage() || sched_have_kugarbage() || + return (nxsched_have_kgarbage() || nxsched_have_kugarbage() || up_sched_have_garbage()); } diff --git a/sched/sched/sched_processtimer.c b/sched/sched/sched_processtimer.c index b24f2d2808..a7eddd61d3 100644 --- a/sched/sched/sched_processtimer.c +++ b/sched/sched/sched_processtimer.c @@ -59,7 +59,7 @@ ****************************************************************************/ /**************************************************************************** - * Name: sched_cpu_scheduler + * Name: nxsched_cpu_scheduler * * Description: * Check for operations specific to scheduling policy of the currently @@ -74,7 +74,7 @@ ****************************************************************************/ #if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC) -static inline void sched_cpu_scheduler(int cpu) +static inline void nxsched_cpu_scheduler(int cpu) { FAR struct tcb_s *rtcb = current_task(cpu); @@ -107,7 +107,7 @@ static inline void sched_cpu_scheduler(int cpu) #endif /**************************************************************************** - * Name: sched_process_scheduler + * Name: nxsched_process_scheduler * * Description: * Check for operations specific to scheduling policy of the currently @@ -122,7 +122,7 @@ static inline void sched_cpu_scheduler(int cpu) ****************************************************************************/ #if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC) -static inline void sched_process_scheduler(void) +static inline void nxsched_process_scheduler(void) { #ifdef CONFIG_SMP irqstate_t flags; @@ -142,7 +142,7 @@ static inline void sched_process_scheduler(void) for (i = 0; i < CONFIG_SMP_NCPUS; i++) { - sched_cpu_scheduler(i); + nxsched_cpu_scheduler(i); } leave_critical_section(flags); @@ -150,11 +150,11 @@ static inline void sched_process_scheduler(void) #else /* Perform scheduler operations on the single CPUs */ - sched_cpu_scheduler(0); + nxsched_cpu_scheduler(0); #endif } #else -# define sched_process_scheduler() +# define nxsched_process_scheduler() #endif /**************************************************************************** @@ -221,7 +221,7 @@ void sched_process_timer(void) * timeslice. */ - sched_process_scheduler(); + nxsched_process_scheduler(); /* Process watchdogs */ diff --git a/sched/sched/sched_releasetcb.c b/sched/sched/sched_releasetcb.c index f55ebb8868..861c2321cb 100644 --- a/sched/sched/sched_releasetcb.c +++ b/sched/sched/sched_releasetcb.c @@ -1,7 +1,8 @@ /**************************************************************************** * sched/sched/sched_releasetcb.c * - * Copyright (C) 2007, 2009, 2012-2014 Gregory Nutt. All rights reserved. + * Copyright (C) 2007, 2009, 2012-2014, 2019 Gregory Nutt. All rights + * reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -55,13 +56,13 @@ ****************************************************************************/ /**************************************************************************** - * Name: sched_releasepid + * Name: nxsched_releasepid * * Description: When a task is destroyed, this function must * be called to make its process ID available for re-use. ****************************************************************************/ -static void sched_releasepid(pid_t pid) +static void nxsched_releasepid(pid_t pid) { int hash_ndx = PIDHASH(pid); @@ -139,7 +140,7 @@ int sched_releasetcb(FAR struct tcb_s *tcb, uint8_t ttype) if (tcb->pid) { - sched_releasepid(tcb->pid); + nxsched_releasepid(tcb->pid); } /* Delete the thread's stack if one has been allocated */ diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c index 61ef8e847e..92d1d1a246 100644 --- a/sched/sched/sched_setpriority.c +++ b/sched/sched/sched_setpriority.c @@ -1,7 +1,8 @@ /**************************************************************************** * sched/sched/sched_setpriority.c * - * Copyright (C) 2009, 2013, 2016, 2018 Gregory Nutt. All rights reserved. + * Copyright (C) 2009, 2013, 2016, 2018-2019 Gregory Nutt. All rights + * reserved. * Author: Gregory Nutt * * Redistribution and use in source and binary forms, with or without @@ -54,7 +55,7 @@ ****************************************************************************/ /**************************************************************************** - * Name: sched_nexttcb + * Name: nxsched_nexttcb * * Description: * Get the next highest priority ready-to-run task. @@ -68,7 +69,7 @@ ****************************************************************************/ #ifdef CONFIG_SMP -static FAR struct tcb_s *sched_nexttcb(FAR struct tcb_s *tcb) +static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb) { FAR struct tcb_s *nxttcb = (FAR struct tcb_s *)tcb->flink; FAR struct tcb_s *rtrtcb; @@ -112,7 +113,7 @@ static FAR struct tcb_s *sched_nexttcb(FAR struct tcb_s *tcb) #endif /**************************************************************************** - * Name: sched_running_setpriority + * Name: nxsched_running_setpriority * * Description: * This function sets the priority of a running task. This does nothing @@ -133,15 +134,15 @@ static FAR struct tcb_s *sched_nexttcb(FAR struct tcb_s *tcb) * ****************************************************************************/ -static inline void sched_running_setpriority(FAR struct tcb_s *tcb, - int sched_priority) +static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb, + int sched_priority) { FAR struct tcb_s *nxttcb; /* Get the TCB of the next highest priority, ready to run task */ #ifdef CONFIG_SMP - nxttcb = sched_nexttcb(tcb); + nxttcb = nxsched_nexttcb(tcb); #else nxttcb = (FAR struct tcb_s *)tcb->flink; #endif @@ -171,7 +172,7 @@ static inline void sched_running_setpriority(FAR struct tcb_s *tcb, } /**************************************************************************** - * Name: sched_readytorun_setpriority + * Name: nxsched_readytorun_setpriority * * Description: * This function sets the priority of a ready-to-run task. This may alter @@ -187,8 +188,8 @@ static inline void sched_running_setpriority(FAR struct tcb_s *tcb, * ****************************************************************************/ -static void sched_readytorun_setpriority(FAR struct tcb_s *tcb, - int sched_priority) +static void nxsched_readytorun_setpriority(FAR struct tcb_s *tcb, + int sched_priority) { FAR struct tcb_s *rtcb; @@ -261,7 +262,7 @@ static void sched_readytorun_setpriority(FAR struct tcb_s *tcb, } /**************************************************************************** - * Name: sched_blocked_setpriority + * Name: nxsched_blocked_setpriority * * Description: * Change the priority of a blocked tasks. The only issue here is that @@ -276,8 +277,8 @@ static void sched_readytorun_setpriority(FAR struct tcb_s *tcb, * ****************************************************************************/ -static inline void sched_blocked_setpriority(FAR struct tcb_s *tcb, - int sched_priority) +static inline void nxsched_blocked_setpriority(FAR struct tcb_s *tcb, + int sched_priority) { FAR dq_queue_t *tasklist; tstate_t task_state = tcb->task_state; @@ -366,7 +367,7 @@ int nxsched_setpriority(FAR struct tcb_s *tcb, int sched_priority) */ case TSTATE_TASK_RUNNING: - sched_running_setpriority(tcb, sched_priority); + nxsched_running_setpriority(tcb, sched_priority); break; /* CASE 2. The task is ready-to-run (but not running) and a context @@ -377,7 +378,7 @@ int nxsched_setpriority(FAR struct tcb_s *tcb, int sched_priority) #ifdef CONFIG_SMP case TSTATE_TASK_ASSIGNED: #endif - sched_readytorun_setpriority(tcb, sched_priority); + nxsched_readytorun_setpriority(tcb, sched_priority); break; /* CASE 3. The task is not in the ready to run list. Changing its @@ -385,7 +386,7 @@ int nxsched_setpriority(FAR struct tcb_s *tcb, int sched_priority) */ default: - sched_blocked_setpriority(tcb, sched_priority); + nxsched_blocked_setpriority(tcb, sched_priority); break; } diff --git a/sched/sched/sched_timerexpiration.c b/sched/sched/sched_timerexpiration.c index 5e854bbf64..f6331aa917 100644 --- a/sched/sched/sched_timerexpiration.c +++ b/sched/sched/sched_timerexpiration.c @@ -62,6 +62,7 @@ /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ + /* In the original design, it was planned that sched_timer_reasses() be * called whenever there was a change at the head of the ready-to-run * list. That call was intended to establish a new time-slice or to @@ -117,13 +118,15 @@ uint32_t g_oneshot_maxticks = UINT32_MAX; ****************************************************************************/ #if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC) -static uint32_t sched_cpu_scheduler(int cpu, uint32_t ticks, bool noswitches); +static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks, + bool noswitches); #endif #if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC) -static uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches); +static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches); #endif -static unsigned int sched_timer_process(unsigned int ticks, bool noswitches); -static void sched_timer_start(unsigned int ticks); +static unsigned int nxsched_timer_process(unsigned int ticks, + bool noswitches); +static void nxsched_timer_start(unsigned int ticks); /**************************************************************************** * Private Data @@ -156,7 +159,7 @@ static struct timespec g_sched_time; ****************************************************************************/ /**************************************************************************** - * Name: sched_cpu_scheduler + * Name: nxsched_cpu_scheduler * * Description: * Check for operations specific to scheduling policy of the currently @@ -181,7 +184,8 @@ static struct timespec g_sched_time; ****************************************************************************/ #if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC) -static uint32_t sched_cpu_scheduler(int cpu, uint32_t ticks, bool noswitches) +static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks, + bool noswitches) { FAR struct tcb_s *rtcb = current_task(cpu); FAR struct tcb_s *ntcb = current_task(cpu); @@ -239,7 +243,7 @@ static uint32_t sched_cpu_scheduler(int cpu, uint32_t ticks, bool noswitches) { /* Recurse just to get the correct return value */ - return sched_process_scheduler(0, true); + return nxsched_process_scheduler(0, true); } /* Returning zero means that there is no interesting event to be timed */ @@ -258,7 +262,7 @@ static uint32_t sched_cpu_scheduler(int cpu, uint32_t ticks, bool noswitches) #endif /**************************************************************************** - * Name: sched_process_scheduler + * Name: nxsched_process_scheduler * * Description: * Check for operations specific to scheduling policy of the currently @@ -282,7 +286,7 @@ static uint32_t sched_cpu_scheduler(int cpu, uint32_t ticks, bool noswitches) ****************************************************************************/ #if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC) -static uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches) +static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches) { #ifdef CONFIG_SMP uint32_t minslice = UINT32_MAX; @@ -304,7 +308,7 @@ static uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches) for (i = 0; i < CONFIG_SMP_NCPUS; i++) { - timeslice = sched_cpu_scheduler(i, ticks, noswitches); + timeslice = nxsched_cpu_scheduler(i, ticks, noswitches); if (timeslice > 0 && timeslice < minslice) { minslice = timeslice; @@ -317,15 +321,15 @@ static uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches) #else /* Perform scheduler operations on the single CPUs */ - return sched_cpu_scheduler(0, ticks, noswitches); + return nxsched_cpu_scheduler(0, ticks, noswitches); #endif } #else -# define sched_process_scheduler(t,n) (0) +# define nxsched_process_scheduler(t,n) (0) #endif /**************************************************************************** - * Name: sched_timer_process + * Name: nxsched_timer_process * * Description: * Process events on timer expiration. @@ -340,7 +344,8 @@ static uint32_t sched_process_scheduler(uint32_t ticks, bool noswitches) * ****************************************************************************/ -static unsigned int sched_timer_process(unsigned int ticks, bool noswitches) +static unsigned int nxsched_timer_process(unsigned int ticks, + bool noswitches) { unsigned int cmptime = UINT_MAX; unsigned int rettime = 0; @@ -365,7 +370,7 @@ static unsigned int sched_timer_process(unsigned int ticks, bool noswitches) * active task. */ - tmp = sched_process_scheduler(ticks, noswitches); + tmp = nxsched_process_scheduler(ticks, noswitches); if (tmp > 0 && tmp < cmptime) { rettime = tmp; @@ -375,7 +380,7 @@ static unsigned int sched_timer_process(unsigned int ticks, bool noswitches) } /**************************************************************************** - * Name: sched_timer_start + * Name: nxsched_timer_start * * Description: * Start the interval timer. @@ -388,7 +393,7 @@ static unsigned int sched_timer_process(unsigned int ticks, bool noswitches) * ****************************************************************************/ -static void sched_timer_start(unsigned int ticks) +static void nxsched_timer_start(unsigned int ticks) { #ifdef CONFIG_HAVE_LONG_LONG uint64_t usecs; @@ -522,8 +527,8 @@ void sched_alarm_expiration(FAR const struct timespec *ts) /* Process the timer ticks and set up the next interval (or not) */ - nexttime = sched_timer_process(elapsed, false); - sched_timer_start(nexttime); + nexttime = nxsched_timer_process(elapsed, false); + nxsched_timer_start(nexttime); } #endif @@ -562,8 +567,8 @@ void sched_timer_expiration(void) /* Process the timer ticks and set up the next interval (or not) */ - nexttime = sched_timer_process(elapsed, false); - sched_timer_start(nexttime); + nexttime = nxsched_timer_process(elapsed, false); + nxsched_timer_start(nexttime); } #endif @@ -638,7 +643,7 @@ unsigned int sched_timer_cancel(void) /* Process the timer ticks and return the next interval */ - return sched_timer_process(elapsed, true); + return nxsched_timer_process(elapsed, true); } #else unsigned int sched_timer_cancel(void) @@ -679,7 +684,7 @@ unsigned int sched_timer_cancel(void) /* Process the timer ticks and return the next interval */ - return sched_timer_process(elapsed, true); + return nxsched_timer_process(elapsed, true); } #endif @@ -718,8 +723,8 @@ void sched_timer_resume(void) * and set up the next interval (or not). */ - nexttime = sched_timer_process(0, true); - sched_timer_start(nexttime); + nexttime = nxsched_timer_process(0, true); + nxsched_timer_start(nexttime); } /**************************************************************************** @@ -763,7 +768,7 @@ void sched_timer_reassess(void) /* Cancel and restart the timer */ nexttime = sched_timer_cancel(); - sched_timer_start(nexttime); + nxsched_timer_start(nexttime); } #endif /* CONFIG_SCHED_TICKLESS */