sched/wdog: Refactor wdog module

This commit refactors the wdog module to use absolute time representation internally. The main improvements include:
1. Fixed recursive watchdog handling caused by calling wd_start within watchdog timeout callback function.
2. Simplified timer processing to improve performance and enhance code readability.
3. Improved accuracy of timers.
4. Reduced critical section and interrupt disable time, improving real-time performance.

Signed-off-by: ouyangxiangzhen <ouyangxiangzhen@xiaomi.com>
Signed-off-by: ligd <liguiding1@xiaomi.com>
This commit is contained in:
ouyangxiangzhen 2024-06-18 19:57:57 +08:00 committed by Xiang Xiao
parent 7dabc6ff2f
commit 3b111c8b99
10 changed files with 355 additions and 342 deletions

View File

@ -390,6 +390,47 @@ void clock_timespec_subtract(FAR const struct timespec *ts1,
FAR const struct timespec *ts2,
FAR struct timespec *ts3);
/****************************************************************************
* Name: clock_compare
*
* Description:
* This function is used for check whether the expired time is reached.
* It take the ticks wrap-around into consideration.
*
* Input Parameters:
* tick1 - Expected time in clock ticks
* tick2 - Current time in clock ticks
*
* Returned Value:
* true - Expected ticks is timeout.
* false - Otherwise.
*
* Assumptions:
* The type of delay value should be sclock_t.
*
****************************************************************************/
/* clock_compare considers tick wraparound, discussed as follows:
* Assuming clock_t is a 64-bit data type.
*
* Case 1: If tick2 - tick1 > 2^63, it is considered expired
* or expired after tick2 wraparound.
*
* Case 2: If tick2 - tick1 <= 2^63,
* it is considered not expired.
*
* For bit-63 as the sign bit, we can simplify this to:
* (sclock_t)(tick2 - tick1) >= 0.
*
* However, this function requires an assumption to work correctly:
* Assumes the timer delay time does not exceed SCLOCK_MAX (2^63 - 1).
*
* The range of the delay data type sclock_t being
* [- (SCLOCK_MAX + 1), SCLOCK_MAX] ensures this assumption holds.
*/
#define clock_compare(tick1, tick2) ((sclock_t)((tick2) - (tick1)) >= 0)
/****************************************************************************
* Name: clock_isleapyear
*

View File

@ -83,7 +83,7 @@ struct wdog_s
#ifdef CONFIG_PIC
FAR void *picbase; /* PIC base address */
#endif
sclock_t lag; /* Timer associated with the delay */
clock_t expired; /* Timer associated with the absoulute time */
};
/****************************************************************************
@ -137,6 +137,45 @@ extern "C"
int wd_start(FAR struct wdog_s *wdog, sclock_t delay,
wdentry_t wdentry, wdparm_t arg);
/****************************************************************************
* Name: wd_start_absolute
*
* Description:
* This function adds a watchdog timer to the active timer queue. The
* specified watchdog function at 'wdentry' will be called from the
* interrupt level after the specified number of ticks has reached.
* Watchdog timers may be started from the interrupt level.
*
* Watchdog timers execute in the address environment that was in effect
* when wd_start() is called.
*
* Watchdog timers execute only once.
*
* To replace either the timeout delay or the function to be executed,
* call wd_start again with the same wdog; only the most recent wdStart()
* on a given watchdog ID has any effect.
*
* Input Parameters:
* wdog - Watchdog ID
* ticks - Absoulute time in clock ticks
* wdentry - Function to call on timeout
* arg - Parameter to pass to wdentry.
*
* NOTE: The parameter must be of type wdparm_t.
*
* Returned Value:
* Zero (OK) is returned on success; a negated errno value is return to
* indicate the nature of any failure.
*
* Assumptions:
* The watchdog routine runs in the context of the timer interrupt handler
* and is subject to all ISR restrictions.
*
****************************************************************************/
int wd_start_absolute(FAR struct wdog_s *wdog, clock_t ticks,
wdentry_t wdentry, wdparm_t arg);
/****************************************************************************
* Name: wd_cancel
*

View File

@ -340,7 +340,7 @@ int nxsched_reprioritize(FAR struct tcb_s *tcb, int sched_priority);
/* Support for tickless operation */
#ifdef CONFIG_SCHED_TICKLESS
unsigned int nxsched_cancel_timer(void);
clock_t nxsched_cancel_timer(void);
void nxsched_resume_timer(void);
void nxsched_reassess_timer(void);
#else

View File

@ -157,7 +157,7 @@ static inline void nxsched_process_scheduler(void)
****************************************************************************/
#ifdef CONFIG_SMP
static inline void nxsched_process_wdtimer(void)
static inline void nxsched_process_wdtimer(clock_t ticks)
{
irqstate_t flags;
@ -171,11 +171,11 @@ static inline void nxsched_process_wdtimer(void)
*/
flags = enter_critical_section();
wd_timer();
wd_timer(ticks);
leave_critical_section(flags);
}
#else
# define nxsched_process_wdtimer() wd_timer()
# define nxsched_process_wdtimer(ticks) wd_timer(ticks)
#endif
/****************************************************************************
@ -236,7 +236,7 @@ void nxsched_process_timer(void)
/* Process watchdogs */
nxsched_process_wdtimer();
nxsched_process_wdtimer(clock_systime_ticks());
#ifdef CONFIG_SYSTEMTICK_HOOK
/* Call out to a user-provided function in order to perform board-specific,

View File

@ -71,28 +71,28 @@ uint32_t g_oneshot_maxticks = UINT32_MAX;
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
bool noswitches);
static clock_t nxsched_cpu_scheduler(int cpu, clock_t elapsed,
bool noswitches);
#endif
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches);
static clock_t nxsched_process_scheduler(clock_t elapsed, bool noswitches);
#endif
static unsigned int nxsched_timer_process(unsigned int ticks,
bool noswitches);
static void nxsched_timer_start(unsigned int ticks);
static clock_t nxsched_timer_process(clock_t ticks, clock_t elapsed,
bool noswitches);
static void nxsched_timer_start(clock_t interval);
/****************************************************************************
* Private Data
****************************************************************************/
#ifdef CONFIG_SCHED_TICKLESS_ALARM
/* This is the time that the timer was stopped. All future times are
/* This is the tick that the timer was stopped. All future times are
* calculated against this time. It must be valid at all times when
* the timer is not running.
*/
static clock_t g_stop_time;
#else
static clock_t g_timer_tick;
#ifndef CONFIG_SCHED_TICKLESS_ALARM
/* This is the duration of the currently active timer or, when
* nxsched_timer_expiration() is called, the duration of interval timer
* that just expired. The value zero means that no timer was active.
@ -167,7 +167,7 @@ int up_timer_tick_cancel(FAR clock_t *ticks)
*
* Input Parameters:
* cpu - The CPU that we are performing the scheduler operations on.
* ticks - The number of ticks that have elapsed on the interval timer.
* elapsed - The number of ticks that have elapsed on the interval timer.
* noswitches - True: Can't do context switches now.
*
* Returned Value:
@ -184,12 +184,12 @@ int up_timer_tick_cancel(FAR clock_t *ticks)
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
bool noswitches)
static clock_t nxsched_cpu_scheduler(int cpu, clock_t elapsed,
bool noswitches)
{
FAR struct tcb_s *rtcb = current_task(cpu);
FAR struct tcb_s *ntcb = current_task(cpu);
uint32_t ret = 0;
clock_t ret = 0;
#if CONFIG_RR_INTERVAL > 0
/* Check if the currently executing task uses round robin scheduling. */
@ -200,7 +200,7 @@ static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
* timeslice.
*/
ret = nxsched_process_roundrobin(rtcb, ticks, noswitches);
ret = nxsched_process_roundrobin(rtcb, elapsed, noswitches);
}
#endif
@ -226,7 +226,7 @@ static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
* budget.
*/
ret = nxsched_process_sporadic(rtcb, ticks, noswitches);
ret = nxsched_process_sporadic(rtcb, elapsed, noswitches);
}
#endif
@ -259,7 +259,7 @@ static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
* active task on a single CPU.
*
* Input Parameters:
* ticks - The number of ticks that have elapsed on the interval timer.
* elapsed - The number of ticks that have elapsed on the interval timer.
* noswitches - True: Can't do context switches now.
*
* Returned Value:
@ -276,11 +276,11 @@ static uint32_t nxsched_cpu_scheduler(int cpu, uint32_t ticks,
****************************************************************************/
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches)
static clock_t nxsched_process_scheduler(clock_t elapsed, bool noswitches)
{
#ifdef CONFIG_SMP
uint32_t minslice = UINT32_MAX;
uint32_t timeslice;
clock_t minslice = CLOCK_MAX;
clock_t timeslice;
irqstate_t flags;
int i;
@ -298,7 +298,7 @@ static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches)
for (i = 0; i < CONFIG_SMP_NCPUS; i++)
{
timeslice = nxsched_cpu_scheduler(i, ticks, noswitches);
timeslice = nxsched_cpu_scheduler(i, elapsed, noswitches);
if (timeslice > 0 && timeslice < minslice)
{
minslice = timeslice;
@ -306,12 +306,12 @@ static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches)
}
leave_critical_section(flags);
return minslice < UINT32_MAX ? minslice : 0;
return minslice < CLOCK_MAX ? minslice : 0;
#else
/* Perform scheduler operations on the single CPUs */
return nxsched_cpu_scheduler(0, ticks, noswitches);
return nxsched_cpu_scheduler(0, elapsed, noswitches);
#endif
}
#else
@ -334,10 +334,10 @@ static uint32_t nxsched_process_scheduler(uint32_t ticks, bool noswitches)
****************************************************************************/
#ifdef CONFIG_SMP
static inline unsigned int nxsched_process_wdtimer(uint32_t ticks,
bool noswitches)
static inline_function clock_t nxsched_process_wdtimer(clock_t ticks,
bool noswitches)
{
unsigned int ret;
clock_t ret;
irqstate_t flags;
/* We are in an interrupt handler and, as a consequence, interrupts are
@ -356,7 +356,7 @@ static inline unsigned int nxsched_process_wdtimer(uint32_t ticks,
return ret;
}
#else
# define nxsched_process_wdtimer(t,n) wd_timer(t,n)
# define nxsched_process_wdtimer(t, n) wd_timer(t, n)
#endif
/****************************************************************************
@ -375,11 +375,11 @@ static inline unsigned int nxsched_process_wdtimer(uint32_t ticks,
*
****************************************************************************/
static unsigned int nxsched_timer_process(unsigned int ticks,
bool noswitches)
static clock_t nxsched_timer_process(clock_t ticks, clock_t elapsed,
bool noswitches)
{
unsigned int rettime = 0;
unsigned int tmp;
clock_t rettime = 0;
clock_t tmp;
#ifdef CONFIG_CLOCK_TIMEKEEPING
/* Process wall time */
@ -392,7 +392,7 @@ static unsigned int nxsched_timer_process(unsigned int ticks,
* switches can occur)
*/
nxsched_process_cpuload_ticks(ticks);
nxsched_process_cpuload_ticks(elapsed);
#endif
/* Process watchdogs */
@ -407,7 +407,7 @@ static unsigned int nxsched_timer_process(unsigned int ticks,
* active task.
*/
tmp = nxsched_process_scheduler(ticks, noswitches);
tmp = nxsched_process_scheduler(elapsed, noswitches);
#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
if (tmp > 0 && tmp < rettime)
@ -433,16 +433,16 @@ static unsigned int nxsched_timer_process(unsigned int ticks,
*
****************************************************************************/
static void nxsched_timer_start(unsigned int ticks)
static void nxsched_timer_start(clock_t interval)
{
int ret;
if (ticks > 0)
if (interval > 0)
{
#ifdef CONFIG_SCHED_TICKLESS_LIMIT_MAX_SLEEP
if (ticks > g_oneshot_maxticks)
if (interval > g_oneshot_maxticks)
{
ticks = g_oneshot_maxticks;
interval = g_oneshot_maxticks;
}
#endif
@ -451,15 +451,15 @@ static void nxsched_timer_start(unsigned int ticks)
* to the time when last stopped the timer).
*/
ret = up_alarm_tick_start(g_stop_time + ticks);
ret = up_alarm_tick_start(g_timer_tick + interval);
#else
/* Save new timer interval */
g_timer_interval = ticks;
g_timer_interval = interval;
/* [Re-]start the interval timer */
ret = up_timer_tick_start(ticks);
ret = up_timer_tick_start(interval);
#endif
if (ret < 0)
@ -497,8 +497,8 @@ static void nxsched_timer_start(unsigned int ticks)
#ifdef CONFIG_SCHED_TICKLESS_ALARM
void nxsched_alarm_tick_expiration(clock_t ticks)
{
unsigned int elapsed;
unsigned int nexttime;
clock_t elapsed;
clock_t nexttime;
#ifdef CONFIG_SMP
irqstate_t flags;
@ -514,11 +514,11 @@ void nxsched_alarm_tick_expiration(clock_t ticks)
/* Calculate elapsed */
elapsed = ticks - g_stop_time;
elapsed = ticks - g_timer_tick;
/* Save the time that the alarm occurred */
g_stop_time = ticks;
g_timer_tick = ticks;
#ifdef CONFIG_SCHED_SPORADIC
/* Save the last time that the scheduler ran */
@ -528,7 +528,8 @@ void nxsched_alarm_tick_expiration(clock_t ticks)
/* Process the timer ticks and set up the next interval (or not) */
nexttime = nxsched_timer_process(elapsed, false);
nexttime = nxsched_timer_process(ticks, elapsed, false);
nxsched_timer_start(nexttime);
#ifdef CONFIG_SMP
leave_critical_section(flags);
@ -565,8 +566,8 @@ void nxsched_alarm_expiration(FAR const struct timespec *ts)
#ifndef CONFIG_SCHED_TICKLESS_ALARM
void nxsched_timer_expiration(void)
{
unsigned int elapsed;
unsigned int nexttime;
clock_t elapsed;
clock_t nexttime;
irqstate_t flags;
/* If we are running on a single CPU architecture, then we know interrupts
@ -578,6 +579,8 @@ void nxsched_timer_expiration(void)
flags = enter_critical_section();
up_timer_gettick(&g_timer_tick);
/* Get the interval associated with last expiration */
elapsed = g_timer_interval;
@ -591,7 +594,7 @@ void nxsched_timer_expiration(void)
/* Process the timer ticks and set up the next interval (or not) */
nexttime = nxsched_timer_process(elapsed, false);
nexttime = nxsched_timer_process(g_timer_tick, elapsed, false);
nxsched_timer_start(nexttime);
leave_critical_section(flags);
}
@ -621,10 +624,10 @@ void nxsched_timer_expiration(void)
****************************************************************************/
#ifdef CONFIG_SCHED_TICKLESS_ALARM
unsigned int nxsched_cancel_timer(void)
clock_t nxsched_cancel_timer(void)
{
clock_t ticks;
unsigned int elapsed;
clock_t elapsed;
/* Cancel the alarm and and get the time that the alarm was cancelled.
* If the alarm was not enabled (or, perhaps, just expired since
@ -632,40 +635,34 @@ unsigned int nxsched_cancel_timer(void)
* current time.
*/
ticks = g_stop_time;
ticks = g_timer_tick;
up_alarm_tick_cancel(&g_stop_time);
up_alarm_tick_cancel(&g_timer_tick);
#ifdef CONFIG_SCHED_SPORADIC
/* Save the last time that the scheduler ran */
g_sched_time = g_stop_time;
g_sched_time = g_timer_tick;
#endif
/* Convert this to the elapsed time */
/* Convert this to the elapsed time and update clock tickbase */
elapsed = g_stop_time - ticks;
elapsed = g_timer_tick - ticks;
/* Process the timer ticks and return the next interval */
return nxsched_timer_process(elapsed, true);
return nxsched_timer_process(g_timer_tick, elapsed, true);
}
#else
unsigned int nxsched_cancel_timer(void)
clock_t nxsched_cancel_timer(void)
{
clock_t ticks;
unsigned int elapsed;
clock_t elapsed;
/* Get the time remaining on the interval timer and cancel the timer. */
up_timer_tick_cancel(&ticks);
#ifdef CONFIG_SCHED_SPORADIC
/* Save the last time that the scheduler ran */
g_sched_time = ticks;
#endif
DEBUGASSERT(ticks <= g_timer_interval);
/* Handle the partial timer. This will reassess all timer conditions and
@ -676,10 +673,17 @@ unsigned int nxsched_cancel_timer(void)
elapsed = g_timer_interval - ticks;
g_timer_interval = 0;
g_timer_tick += elapsed;
#ifdef CONFIG_SCHED_SPORADIC
/* Save the last time that the scheduler ran */
g_sched_time = g_timer_tick;
#endif
/* Process the timer ticks and return the next interval */
return nxsched_timer_process(elapsed, true);
return nxsched_timer_process(g_timer_tick, elapsed, true);
}
#endif
@ -699,14 +703,14 @@ unsigned int nxsched_cancel_timer(void)
*
* Assumptions:
* This function is called right after nxsched_cancel_timer(). If
* CONFIG_SCHED_TICKLESS_ALARM=y, then g_stop_time must be the value time
* when the timer was cancelled.
* CONFIG_SCHED_TICKLESS_ALARM=y, then g_timer_tick must be the
* value time when the timer was cancelled.
*
****************************************************************************/
void nxsched_resume_timer(void)
{
unsigned int nexttime;
clock_t nexttime;
#ifdef CONFIG_SCHED_SPORADIC
/* Save the last time that the scheduler ran */
@ -718,7 +722,7 @@ void nxsched_resume_timer(void)
* and set up the next interval (or not).
*/
nexttime = nxsched_timer_process(0, true);
nexttime = nxsched_timer_process(g_timer_tick, 0, true);
nxsched_timer_start(nexttime);
}
@ -758,7 +762,11 @@ void nxsched_resume_timer(void)
void nxsched_reassess_timer(void)
{
unsigned int nexttime;
clock_t nexttime;
#ifndef CONFIG_SCHED_TICKLESS_ALARM
up_timer_gettick(&g_timer_tick);
#endif
/* Cancel and restart the timer */

View File

@ -58,7 +58,11 @@
int wd_cancel(FAR struct wdog_s *wdog)
{
irqstate_t flags;
int ret = -EINVAL;
if (wdog == NULL)
{
return -EINVAL;
}
/* Prohibit timer interactions with the timer queue until the
* cancellation is complete
@ -66,23 +70,11 @@ int wd_cancel(FAR struct wdog_s *wdog)
flags = enter_critical_section();
/* Make sure that the watchdog is initialized (non-NULL) and is still
* active.
*/
/* Make sure that the watchdog is still active. */
if (wdog != NULL && WDOG_ISACTIVE(wdog))
if (WDOG_ISACTIVE(wdog))
{
bool head = list_is_head(&g_wdactivelist, &wdog->node);
FAR struct wdog_s *next = list_next_entry(wdog, struct wdog_s, node);
/* If there is a watchdog in the timer queue after the one that
* is being canceled, then it inherits the remaining ticks.
*/
if (next)
{
next->lag += wdog->lag;
}
/* Now, remove the watchdog from the timer queue */
@ -101,12 +93,8 @@ int wd_cancel(FAR struct wdog_s *wdog)
nxsched_reassess_timer();
}
/* Return success */
ret = OK;
}
leave_critical_section(flags);
return ret;
return OK;
}

View File

@ -52,32 +52,14 @@
sclock_t wd_gettime(FAR struct wdog_s *wdog)
{
irqstate_t flags;
sclock_t delay;
/* Verify the wdog */
flags = enter_critical_section();
if (wdog != NULL && WDOG_ISACTIVE(wdog))
if (wdog == NULL || !WDOG_ISACTIVE(wdog))
{
/* Traverse the watchdog list accumulating lag times until we find the
* wdog that we are looking for
*/
FAR struct wdog_s *curr;
sclock_t delay = 0;
list_for_every_entry(&g_wdactivelist, curr, struct wdog_s, node)
{
delay += curr->lag;
if (curr == wdog)
{
delay -= wd_elapse();
leave_critical_section(flags);
return delay < 0 ? 0 : delay;
}
}
return 0;
}
leave_critical_section(flags);
return 0;
delay = wdog->expired - clock_systime_ticks();
return delay < 0 ? 0 : delay;
}

View File

@ -39,14 +39,6 @@
struct list_node g_wdactivelist = LIST_INITIAL_VALUE(g_wdactivelist);
/* This is wdog tickbase, for wd_gettime() may called many times
* between 2 times of wd_timer(), we use it to update wd_gettime().
*/
#ifdef CONFIG_SCHED_TICKLESS
clock_t g_wdtickbase;
#endif
/****************************************************************************
* Public Functions
****************************************************************************/

View File

@ -81,14 +81,14 @@
* run. If so, remove the watchdog from the list and execute it.
*
* Input Parameters:
* None
* ticks - current time in ticks
*
* Returned Value:
* None
*
****************************************************************************/
static inline void wd_expiration(void)
static inline_function void wd_expiration(clock_t ticks)
{
FAR struct wdog_s *wdog;
FAR struct wdog_s *next;
@ -101,7 +101,9 @@ static inline void wd_expiration(void)
list_for_every_entry_safe(&g_wdactivelist, wdog,
next, struct wdog_s, node)
{
if (wdog->lag > 0)
/* Check if expected time is expired */
if (!clock_compare(wdog->expired, ticks))
{
break;
}
@ -110,15 +112,6 @@ static inline void wd_expiration(void)
list_delete(&wdog->node);
/* If there is another watchdog behind this one, update its
* its lag (this shouldn't be necessary).
*/
if (!list_is_empty(&g_wdactivelist))
{
next->lag += wdog->lag;
}
/* Indicate that the watchdog is no longer active. */
func = wdog->func;
@ -131,10 +124,160 @@ static inline void wd_expiration(void)
}
}
/****************************************************************************
* Name: wd_insert
*
* Description:
* Insert the timer into the global list to ensure that
* the list is sorted in increasing order of expiration absolute time.
*
* Input Parameters:
* wdog - Watchdog ID
* expired - expired absolute time in clock ticks
* wdentry - Function to call on timeout
* arg - Parameter to pass to wdentry
*
* Assumptions:
* wdog and wdentry is not NULL.
*
* Returned Value:
* None.
*
****************************************************************************/
static inline_function
void wd_insert(FAR struct wdog_s *wdog, clock_t expired,
wdentry_t wdentry, wdparm_t arg)
{
FAR struct wdog_s *curr;
DEBUGASSERT(wdog && wdentry);
/* Traverse the watchdog list */
list_for_every_entry(&g_wdactivelist, curr, struct wdog_s, node)
{
/* Until curr->expired has not timed out relative to expired */
if (!clock_compare(curr->expired, expired))
{
break;
}
}
/* There are two cases:
* - Traverse to the end, where curr == &g_wdactivelist.
* - Find a curr such that curr->expected has not timed out
* relative to expired.
* In either case 1 or 2, we just insert the wdog before curr.
*/
list_add_before(&curr->node, &wdog->node);
wdog->func = wdentry;
up_getpicbase(&wdog->picbase);
wdog->arg = arg;
wdog->expired = expired;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: wd_start_absolute
*
* Description:
* This function adds a watchdog timer to the active timer queue. The
* specified watchdog function at 'wdentry' will be called from the
* interrupt level after the specified number of ticks has reached.
* Watchdog timers may be started from the interrupt level.
*
* Watchdog timers execute in the address environment that was in effect
* when wd_start() is called.
*
* Watchdog timers execute only once.
*
* To replace either the timeout delay or the function to be executed,
* call wd_start again with the same wdog; only the most recent wdStart()
* on a given watchdog ID has any effect.
*
* Input Parameters:
* wdog - Watchdog ID
* ticks - Absoulute time in clock ticks
* wdentry - Function to call on timeout
* arg - Parameter to pass to wdentry.
*
* NOTE: The parameter must be of type wdparm_t.
*
* Returned Value:
* Zero (OK) is returned on success; a negated errno value is return to
* indicate the nature of any failure.
*
* Assumptions:
* The watchdog routine runs in the context of the timer interrupt handler
* and is subject to all ISR restrictions.
*
****************************************************************************/
int wd_start_absolute(FAR struct wdog_s *wdog, clock_t ticks,
wdentry_t wdentry, wdparm_t arg)
{
irqstate_t flags;
bool reassess = false;
/* Verify the wdog and setup parameters */
if (wdog == NULL || wdentry == NULL)
{
return -EINVAL;
}
/* NOTE: There is a race condition here... the caller may receive
* the watchdog between the time that wd_start_absolute is called and
* the critical section is established.
*/
flags = enter_critical_section();
#ifdef CONFIG_SCHED_TICKLESS
/* We need to reassess timer if the watchdog list head has changed. */
if (WDOG_ISACTIVE(wdog))
{
reassess |= list_is_head(&g_wdactivelist, &wdog->node);
list_delete(&wdog->node);
wdog->func = NULL;
}
wd_insert(wdog, ticks, wdentry, arg);
reassess |= list_is_head(&g_wdactivelist, &wdog->node);
if (reassess)
{
/* Resume the interval timer that will generate the next
* interval event. If the timer at the head of the list changed,
* then this will pick that new delay.
*/
nxsched_reassess_timer();
}
#else
UNUSED(reassess);
/* Check if the watchdog has been started. If so, delete it. */
if (WDOG_ISACTIVE(wdog))
{
list_delete(&wdog->node);
wdog->func = NULL;
}
wd_insert(wdog, ticks, wdentry, arg);
#endif
leave_critical_section(flags);
return OK;
}
/****************************************************************************
* Name: wd_start
*
@ -174,146 +317,15 @@ static inline void wd_expiration(void)
int wd_start(FAR struct wdog_s *wdog, sclock_t delay,
wdentry_t wdentry, wdparm_t arg)
{
FAR struct wdog_s *curr;
irqstate_t flags;
sclock_t now;
/* Verify the wdog and setup parameters */
if (wdog == NULL || wdentry == NULL || delay < 0)
if (delay < 0)
{
return -EINVAL;
}
/* Check if the watchdog has been started. If so, stop it.
* NOTE: There is a race condition here... the caller may receive
* the watchdog between the time that wd_start is called and
* the critical section is established.
*/
flags = enter_critical_section();
if (WDOG_ISACTIVE(wdog))
{
wd_cancel(wdog);
}
/* Save the data in the watchdog structure */
wdog->func = wdentry; /* Function to execute when delay expires */
up_getpicbase(&wdog->picbase);
wdog->arg = arg;
/* Calculate delay+1, forcing the delay into a range that we can handle.
*
* NOTE that one is added to the delay. This is correct and must not be
* changed: The contract for the use wdog_start is that the wdog will
* delay FOR AT LEAST as long as requested, but may delay longer due to
* variety of factors. The wdog logic has no knowledge of the the phase
* of the system timer when it is started: The next timer interrupt may
* occur immediately or may be delayed for almost a full cycle. In order
* to meet the contract requirement, the requested time is also always
* incremented by one so that the delay is always at least as long as
* requested.
*
* There is extensive documentation about this time issue elsewhere.
*/
if (delay <= 0)
{
delay = 1;
}
else if (++delay <= 0)
{
delay--;
}
#ifdef CONFIG_SCHED_TICKLESS
/* Cancel the interval timer that drives the timing events. This will
* cause wd_timer to be called which update the delay value for the first
* time at the head of the timer list (there is a possibility that it
* could even remove it).
*/
nxsched_cancel_timer();
#endif
/* Do the easy case first -- when the watchdog timer queue is empty. */
if (list_is_empty(&g_wdactivelist))
{
#ifdef CONFIG_SCHED_TICKLESS
/* Update clock tickbase */
g_wdtickbase = clock_systime_ticks();
#endif
/* Add the watchdog to the head == tail of the queue. */
list_add_tail(&g_wdactivelist, &wdog->node);
}
/* There are other active watchdogs in the timer queue */
else
{
now = 0;
/* Advance past shorter delays */
list_for_every_entry(&g_wdactivelist, curr, struct wdog_s, node)
{
now += curr->lag;
if (now > delay)
{
break;
}
}
/* Check if the new wdog must be inserted before the curr. */
if (delay < now)
{
/* The relative delay time is smaller or equal to the current delay
* time, so decrement the current delay time by the new relative
* delay time.
*/
delay -= (now - curr->lag);
curr->lag -= delay;
/* Insert the new watchdog in the list */
list_add_before(&curr->node, &wdog->node);
}
/* The new watchdog delay time is greater than the curr delay time,
* so the new wdog must be inserted after the curr. This only occurs
* if the wdog is to be added to the end of the list.
*/
else
{
delay -= now;
list_add_tail(&g_wdactivelist, &wdog->node);
}
}
/* Put the lag into the watchdog structure and mark it as active. */
wdog->lag = delay;
#ifdef CONFIG_SCHED_TICKLESS
/* Resume the interval timer that will generate the next interval event.
* If the timer at the head of the list changed, then this will pick that
* new delay.
*/
nxsched_resume_timer();
#endif
leave_critical_section(flags);
return OK;
return wd_start_absolute(wdog, clock_systime_ticks() + delay,
wdentry, arg);
}
/****************************************************************************
@ -343,66 +355,42 @@ int wd_start(FAR struct wdog_s *wdog, sclock_t delay,
****************************************************************************/
#ifdef CONFIG_SCHED_TICKLESS
unsigned int wd_timer(int ticks, bool noswitches)
clock_t wd_timer(clock_t ticks, bool noswitches)
{
FAR struct wdog_s *wdog;
unsigned int ret;
int decr;
/* Update clock tickbase */
g_wdtickbase += ticks;
/* Check if there are any active watchdogs to process */
list_for_every_entry(&g_wdactivelist, wdog, struct wdog_s, node)
{
if (ticks <= 0)
{
break;
}
/* Decrement the lag for this watchdog. */
decr = MIN(wdog->lag, ticks);
/* There are. Decrement the lag counter */
wdog->lag -= decr;
ticks -= decr;
}
sclock_t ret;
/* Check if the watchdog at the head of the list is ready to run */
if (!noswitches)
{
wd_expiration();
wd_expiration(ticks);
}
/* Return the delay for the next watchdog to expire */
ret = list_is_empty(&g_wdactivelist) ? 0 :
list_first_entry(&g_wdactivelist, struct wdog_s, node)->lag;
if (list_is_empty(&g_wdactivelist))
{
return CLOCK_MAX;
}
/* Notice that if noswitches, expired - g_wdtickbase
* may get negative value.
*/
wdog = list_first_entry(&g_wdactivelist, struct wdog_s, node);
ret = wdog->expired - ticks;
/* Return the delay for the next watchdog to expire */
return ret;
return MAX(ret, 1);
}
#else
void wd_timer(void)
void wd_timer(clock_t ticks)
{
/* Check if there are any active watchdogs to process */
if (!list_is_empty(&g_wdactivelist))
{
/* There are. Decrement the lag counter */
--(list_first_entry(&g_wdactivelist, struct wdog_s, node)->lag);
/* Check if the watchdog at the head of the list is ready to run */
wd_expiration();
}
wd_expiration(ticks);
}
#endif /* CONFIG_SCHED_TICKLESS */

View File

@ -44,23 +44,6 @@
#define list_node wdlist_node
/****************************************************************************
* Name: wd_elapse
*
* Description:
* This function is used to get time-elapse from last time wd_timer() be
* called. In case of CONFIG_SCHED_TICKLESS configured, wd_timer() may
* take lots of ticks, during this time, wd_start()/wd_cancel() may
* called, so we need wd_elapse() to correct the delay/lag.
*
****************************************************************************/
#ifdef CONFIG_SCHED_TICKLESS
# define wd_elapse() (clock_systime_ticks() - g_wdtickbase)
#else
# define wd_elapse() (0)
#endif
/****************************************************************************
* Public Data
****************************************************************************/
@ -80,14 +63,6 @@ extern "C"
extern struct list_node g_wdactivelist;
/* This is wdog tickbase, for wd_gettime() may called many times
* between 2 times of wd_timer(), we use it to update wd_gettime().
*/
#ifdef CONFIG_SCHED_TICKLESS
extern clock_t g_wdtickbase;
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
@ -119,9 +94,9 @@ extern clock_t g_wdtickbase;
****************************************************************************/
#ifdef CONFIG_SCHED_TICKLESS
unsigned int wd_timer(int ticks, bool noswitches);
clock_t wd_timer(clock_t ticks, bool noswitches);
#else
void wd_timer(void);
void wd_timer(clock_t ticks);
#endif
/****************************************************************************