diff --git a/sched/clock/clock_systime_ticks.c b/sched/clock/clock_systime_ticks.c index 766f7a14ca..a638719f94 100644 --- a/sched/clock/clock_systime_ticks.c +++ b/sched/clock/clock_systime_ticks.c @@ -78,86 +78,51 @@ clock_t clock_systime_ticks(void) { #ifdef CONFIG_RTC_HIRES - /* Do we have a high-resolution RTC that can provide us with the time? */ - - if (g_rtc_enabled) + struct timespec ts = { - struct timespec ts; + 0 + }; - /* Get the time from the platform specific hardware */ + clock_systime_timespec(&ts); + return clock_time2ticks(&ts); +#elif defined(CONFIG_SCHED_TICKLESS_TICK_ARGUMENT) + clock_t ticks = 0; - if (clock_systime_timespec(&ts) == OK) - { - /* Convert to a 64-bit value in microseconds, - * then in clock tick units. - */ - - return clock_time2ticks(&ts); - } - else - { - return 0; - } - } - else -#endif - { - /* In tickless mode, all timing is controlled by platform-specific - * code. Let the platform timer do the work. - */ - -#if defined(CONFIG_SCHED_TICKLESS_TICK_ARGUMENT) - clock_t ticks; - if (up_timer_gettick(&ticks) == OK) - { - return ticks; - } - else - { - return 0; - } + up_timer_gettick(&ticks); + return ticks; #elif defined(CONFIG_SCHED_TICKLESS) - struct timespec ts; - if (up_timer_gettime(&ts) == OK) - { - return clock_time2ticks(&ts); - } - else - { - return 0; - } + struct timespec ts = + { + 0 + }; + + up_timer_gettime(&ts); + return clock_time2ticks(&ts); #elif defined(CONFIG_SYSTEM_TIME64) + clock_t sample; + clock_t verify; - clock_t sample; - clock_t verify; + /* 64-bit accesses are not atomic on most architectures. The following + * loop samples the 64-bit timer twice and loops in the rare event that + * there was 32-bit rollover between samples. + * + * If there is no 32-bit rollover, then: + * + * - The MS 32-bits of each sample will be the same, and + * - The LS 32-bits of the second sample will be greater than or equal + * to the LS 32-bits for the first sample. + */ - /* 64-bit accesses are not atomic on most architectures. The following - * loop samples the 64-bit timer twice and loops in the rare event that - * there was 32-bit rollover between samples. - * - * If there is no 32-bit rollover, then: - * - * - The MS 32-bits of each sample will be the same, and - * - The LS 32-bits of the second sample will be greater than or equal - * to the LS 32-bits for the first sample. - */ - - do - { - verify = g_system_ticks; - sample = g_system_ticks; - } - while ((sample & TIMER_MASK32) < (verify & TIMER_MASK32) || - (sample & ~TIMER_MASK32) != (verify & ~TIMER_MASK32)); - - return sample; - -#else /* CONFIG_SYSTEM_TIME64 */ - - /* Return the current system time */ - - return g_system_ticks; - -#endif /* CONFIG_SYSTEM_TIME64 */ + do + { + verify = g_system_ticks; + sample = g_system_ticks; } + while ((sample & TIMER_MASK32) < (verify & TIMER_MASK32) || + (sample & ~TIMER_MASK32) != (verify & ~TIMER_MASK32)); + + return sample; +#else + return g_system_ticks; +#endif } diff --git a/sched/clock/clock_systime_timespec.c b/sched/clock/clock_systime_timespec.c index 727b7a8bfe..b27c548631 100644 --- a/sched/clock/clock_systime_timespec.c +++ b/sched/clock/clock_systime_timespec.c @@ -33,6 +33,7 @@ #include #include +#include #include "clock/clock.h" @@ -61,76 +62,31 @@ int clock_systime_timespec(FAR struct timespec *ts) { #ifdef CONFIG_RTC_HIRES - /* Do we have a high-resolution RTC that can provide us with the time? */ - if (g_rtc_enabled) { - int ret; + irqstate_t flags; - /* Get the hi-resolution time from the RTC. This will return the - * current time, not the time since power up. - */ + up_rtc_gettime(ts); - ret = up_rtc_gettime(ts); - if (ret < 0) - { - return ret; - } - - /* Subtract the base time to this in order to convert this to the - * time since power up. - */ - - DEBUGASSERT(ts->tv_sec >= g_basetime.tv_sec); - if (ts->tv_sec < g_basetime.tv_sec) - { - /* Negative times are not supported */ - - return -ENOSYS; - } - - ts->tv_sec -= g_basetime.tv_sec; - if (ts->tv_nsec < g_basetime.tv_nsec) - { - /* Borrow */ - - if (ts->tv_sec < 1) - { - /* Negative times are not supported */ - - return -ENOSYS; - } - - ts->tv_sec--; - ts->tv_nsec += NSEC_PER_SEC; - } - - ts->tv_nsec -= g_basetime.tv_nsec; - return OK; + flags = spin_lock_irqsave(NULL); + clock_timespec_subtract(ts, &g_basetime, ts); + spin_unlock_irqrestore(NULL, flags); } else -#endif { - /* In tickless mode, all timing is controlled by platform-specific - * code. Let the platform timer do the work. - */ - -#if defined(CONFIG_SCHED_TICKLESS_TICK_ARGUMENT) - clock_t ticks; - int ret; - - ret = up_timer_gettick(&ticks); - clock_ticks2time(ts, ticks); - return ret; -#elif defined(CONFIG_SCHED_TICKLESS) - return up_timer_gettime(ts); -#else - /* 64-bit microsecond calculations should improve our accuracy - * when the clock period is in units of microseconds. - */ - - clock_ticks2time(ts, clock_systime_ticks()); - return OK; -#endif + ts->tv_sec = 0; + ts->tv_nsec = 0; } +#elif defined(CONFIG_SCHED_TICKLESS_TICK_ARGUMENT) + clock_t ticks = 0; + + up_timer_gettick(&ticks); + clock_ticks2time(ts, ticks); +#elif defined(CONFIG_SCHED_TICKLESS) + up_timer_gettime(ts); +#else + clock_ticks2time(ts, g_system_ticks); +#endif + return 0; } +