From 16a3e83258f0dfea720b50bbab61a59599258131 Mon Sep 17 00:00:00 2001 From: Gregory Nutt Date: Fri, 10 Oct 2014 13:21:37 -0600 Subject: [PATCH] Add support for delays of different durations in work queue processing --- include/nuttx/wqueue.h | 49 +++++++++++++++++++++++------------ libc/Kconfig | 25 +++++++++++------- libc/wqueue/work_process.c | 34 +++++++++++++++--------- libc/wqueue/work_queue.c | 2 +- libc/wqueue/work_signal.c | 2 +- libc/wqueue/work_usrthread.c | 18 +++++++------ sched/wqueue/kwork_hpthread.c | 16 +++++++----- sched/wqueue/kwork_inherit.c | 4 +-- sched/wqueue/kwork_lpthread.c | 16 +++++++----- sched/wqueue/kwork_signal.c | 4 +-- 10 files changed, 103 insertions(+), 67 deletions(-) diff --git a/include/nuttx/wqueue.h b/include/nuttx/wqueue.h index e572493fae..918a25afef 100644 --- a/include/nuttx/wqueue.h +++ b/include/nuttx/wqueue.h @@ -44,7 +44,7 @@ #include #include -#include +#include #include /**************************************************************************** @@ -66,11 +66,14 @@ * build (CONFIG_SCHED_KERNEL=n) but must be defined in kernel mode * in order to build the high priority work queue. * CONFIG_SCHED_WORKPRIORITY - The execution priority of the worker - * thread. Default: 192 + * thread. Default: 224 * CONFIG_SCHED_WORKPERIOD - How often the worker thread checks for - * work in units of microseconds. Default: 50*1000 (50 MS). + * work in units of microseconds. If the high priority worker thread is + * performing garbage collection, then the default is 50*1000 (50 MS). + * Otherwise, if the lower priority worker thread is performing garbage + * collection, the default is 100*1000. * CONFIG_SCHED_WORKSTACKSIZE - The stack size allocated for the worker - * thread. Default: CONFIG_IDLETHREAD_STACKSIZE. + * thread. Default: 2048. * CONFIG_SIG_SIGWORK - The signal number that will be used to wake-up * the worker thread. Default: 17 * @@ -86,7 +89,20 @@ * CONFIG_SCHED_LPWORKPERIOD - How often the lower priority worker thread * checks for work in units of microseconds. Default: 50*1000 (50 MS). * CONFIG_SCHED_LPWORKSTACKSIZE - The stack size allocated for the lower - * priority worker thread. Default: CONFIG_IDLETHREAD_STACKSIZE. + * priority worker thread. Default: 2048. + * + * The user-mode work queue is only available in the protected or kernel + * builds. This those configurations, the user-mode work queue provides the + * same (non-standard) facility for use by applications. + * + * CONFIG_SCHED_USRWORK. If CONFIG_SCHED_USRWORK is also defined then the + * user-mode work queue will be created. + * CONFIG_SCHED_USRWORKPRIORITY - The minimum execution priority of the lower + * priority worker thread. Default: 100 + * CONFIG_SCHED_USRWORKPERIOD - How often the lower priority worker thread + * checks for work in units of microseconds. Default: 100*1000 (100 MS). + * CONFIG_SCHED_USRWORKSTACKSIZE - The stack size allocated for the lower + * priority worker thread. Default: 2048. */ /* Is this a protected build (CONFIG_BUILD_PROTECTED=y) */ @@ -147,22 +163,20 @@ #ifdef CONFIG_SCHED_WORKQUEUE -/* We are building work queues... Work queues need signal support */ - -#ifdef CONFIG_DISABLE_SIGNALS -# warning "Worker thread support requires signals" -#endif - /* High priority, kernel work queue configuration ***************************/ #ifdef CONFIG_SCHED_HPWORK # ifndef CONFIG_SCHED_WORKPRIORITY -# define CONFIG_SCHED_WORKPRIORITY 192 +# define CONFIG_SCHED_WORKPRIORITY 224 # endif # ifndef CONFIG_SCHED_WORKPERIOD -# define CONFIG_SCHED_WORKPERIOD (50*1000) /* 50 milliseconds */ +# ifdef CONFIG_SCHED_LOWORK +# define CONFIG_SCHED_WORKPERIOD (100*1000) /* 100 milliseconds */ +# else +# define CONFIG_SCHED_WORKPERIOD (50*1000) /* 50 milliseconds */ +# endif # endif # ifndef CONFIG_SCHED_WORKSTACKSIZE @@ -223,11 +237,11 @@ #ifdef CONFIG_SCHED_USRWORK # ifndef CONFIG_SCHED_USRWORKPRIORITY -# define CONFIG_SCHED_USRWORKPRIORITY 50 +# define CONFIG_SCHED_USRWORKPRIORITY 100 # endif # ifndef CONFIG_SCHED_USRWORKPERIOD -# define CONFIG_SCHED_USRWORKPERIOD (50*1000) /* 50 milliseconds */ +# define CONFIG_SCHED_USRWORKPERIOD (100*1000) /* 100 milliseconds */ # endif # ifndef CONFIG_SCHED_USRWORKSTACKSIZE @@ -286,8 +300,9 @@ struct wqueue_s { - pid_t pid; /* The task ID of the worker thread */ - struct dq_queue_s q; /* The queue of pending work */ + uint32_t delay; /* Delay between polling cycles (ticks) */ + struct dq_queue_s q; /* The queue of pending work */ + pid_t pid[1]; /* The task ID of the worker thread(s) */ }; /* Defines the work callback */ diff --git a/libc/Kconfig b/libc/Kconfig index eba38e0b6d..d899bae0d9 100644 --- a/libc/Kconfig +++ b/libc/Kconfig @@ -421,7 +421,7 @@ if SCHED_HPWORK config SCHED_WORKPRIORITY int "High priority worker thread priority" - default 192 + default 224 ---help--- The execution priority of the higher priority worker thread. @@ -440,10 +440,14 @@ config SCHED_WORKPRIORITY config SCHED_WORKPERIOD int "High priority worker thread period" - default 50000 + default 100000 if SCHED_LPWORK + default 50000 if !SCHED_LPWORK ---help--- How often the worker thread checks for work in units of microseconds. - Default: 50*1000 (50 MS). + Default: If the high priority worker thread is performing garbage + collection, then the default is 50*1000 (50 MS). Otherwise, if the + lower priority worker thread is performing garbage collection, the + default is 100*1000. config SCHED_WORKSTACKSIZE int "High priority worker thread stack size" @@ -452,6 +456,8 @@ config SCHED_WORKSTACKSIZE ---help--- The stack size allocated for the worker thread. Default: 2K. +endif # SCHED_HPWORK + config SCHED_LPWORK bool "Low priority (kernel) worker thread" default n @@ -533,7 +539,6 @@ config SCHED_LPWORKSTACKSIZE The stack size allocated for the lower priority worker thread. Default: 2K. endif # SCHED_LPWORK -endif # SCHED_HPWORK if BUILD_PROTECTED @@ -545,20 +550,20 @@ config SCHED_USRWORK if SCHED_USRWORK -config SCHED_LPWORKPRIORITY +config SCHED_USRWORKPRIORITY int "User mode priority worker thread priority" - default 50 + default 100 ---help--- The execution priority of the lopwer priority worker thread. Default: 192 -config SCHED_LPWORKPERIOD +config SCHED_USRWORKPERIOD int "User mode worker thread period" - default 50000 + default 100000 ---help--- How often the lower priority worker thread checks for work in units - of microseconds. Default: 50*1000 (50 MS). + of microseconds. Default: 100*1000 (100 MS). -config SCHED_LPWORKSTACKSIZE +config SCHED_USRWORKSTACKSIZE int "User mode worker thread stack size" default 2048 ---help--- diff --git a/libc/wqueue/work_process.c b/libc/wqueue/work_process.c index d55bf1ea6b..92cfd5c876 100644 --- a/libc/wqueue/work_process.c +++ b/libc/wqueue/work_process.c @@ -64,9 +64,9 @@ # define WORK_CLOCK CLOCK_REALTIME #endif -/* The work poll period is in system ticks. */ - -#define WORKPERIOD_TICKS (CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK) +#ifndef MIN +# define MIN(a,b) ((a) < (b) ? (a) : (b)) +#endif /**************************************************************************** * Private Type Declarations @@ -121,7 +121,7 @@ void work_process(FAR struct wqueue_s *wqueue) * we process items in the work list. */ - next = WORKPERIOD_TICKS; + next = wqueue->delay; flags = irqsave(); /* Get the time that we started this polling cycle in clock ticks. */ @@ -221,18 +221,28 @@ void work_process(FAR struct wqueue_s *wqueue) } } - /* There is no work to be performed now. Check if we need to delay or if we have - * already exceed the duration of the polling period. - */ + /* Get the delay (in clock ticks) since we started the sampling */ - if (next < WORKPERIOD_TICKS) + elapsed = clock_systimer() - work->qtime; + if (elapsed <= wqueue->delay) { - /* Wait awhile to check the work list. We will wait here until either - * the time elapses or until we are awakened by a signal. Interrupts - * will be re-enabled while we wait. + /* How must time would we need to delay to get to the end of the + * sampling period? The amount of time we delay should be the smaller + * of the time to the end of the sampling period and the time to the + * next work expiry. */ - usleep(next * USEC_PER_TICK); + remaining = wqueue->delay - elapsed; + next = MIN(next, remaining); + if (next > 0) + { + /* Wait awhile to check the work list. We will wait here until + * either the time elapses or until we are awakened by a signal. + * Interrupts will be re-enabled while we wait. + */ + + usleep(next * USEC_PER_TICK); + } } irqrestore(flags); diff --git a/libc/wqueue/work_queue.c b/libc/wqueue/work_queue.c index edfbb166ec..3abba15994 100644 --- a/libc/wqueue/work_queue.c +++ b/libc/wqueue/work_queue.c @@ -127,7 +127,7 @@ int work_qqueue(FAR struct wqueue_s *wqueue, FAR struct work_s *work, work->qtime = clock_systimer(); /* Time work queued */ dq_addlast((FAR dq_entry_t *)work, &wqueue->q); - kill(wqueue->pid, SIGWORK); /* Wake up the worker thread */ + kill(wqueue->pid[0], SIGWORK); /* Wake up the worker thread */ irqrestore(flags); return OK; diff --git a/libc/wqueue/work_signal.c b/libc/wqueue/work_signal.c index 2d6b9e2388..288c898771 100644 --- a/libc/wqueue/work_signal.c +++ b/libc/wqueue/work_signal.c @@ -96,7 +96,7 @@ int work_signal(int qid) { /* Signal the worker thread */ - ret = kill(g_usrwork.pid, SIGWORK); + ret = kill(g_usrwork.pid[0], SIGWORK); if (ret < 0) { int errcode = errno; diff --git a/libc/wqueue/work_usrthread.c b/libc/wqueue/work_usrthread.c index 4ab1f44684..dfa67735f9 100644 --- a/libc/wqueue/work_usrthread.c +++ b/libc/wqueue/work_usrthread.c @@ -44,6 +44,7 @@ #include #include +#include #if defined(CONFIG_SCHED_WORKQUEUE) && defined(CONFIG_SCHED_USRWORK) && \ !defined(__KERNEL__) @@ -130,20 +131,21 @@ int work_usrstart(void) { /* Initialize work queue data structures */ + g_usrwork.delay = CONFIG_SCHED_USRWORKPERIOD / USEC_PER_TICK; dq_init(&g_usrwork.q); /* Start a user-mode worker thread for use by applications. */ svdbg("Starting user-mode worker thread\n"); - g_usrwork.pid = task_create("uwork", - CONFIG_SCHED_USRWORKPRIORITY, - CONFIG_SCHED_USRWORKSTACKSIZE, - (main_t)work_usrthread, - (FAR char * const *)NULL); + g_usrwork.pid[0] = task_create("uwork", + CONFIG_SCHED_USRWORKPRIORITY, + CONFIG_SCHED_USRWORKSTACKSIZE, + (main_t)work_usrthread, + (FAR char * const *)NULL); - DEBUGASSERT(g_usrwork.pid > 0); - if (g_usrwork.pid < 0) + DEBUGASSERT(g_usrwork.pid[0] > 0); + if (g_usrwork.pid[0] < 0) { int errcode = errno; DEBUGASSERT(errcode > 0); @@ -152,7 +154,7 @@ int work_usrstart(void) return -errcode; } - return g_usrwork.pid; + return g_usrwork.pid[0]; } #endif /* CONFIG_SCHED_WORKQUEUE && CONFIG_SCHED_USRWORK && !__KERNEL__*/ diff --git a/sched/wqueue/kwork_hpthread.c b/sched/wqueue/kwork_hpthread.c index 8745fbfb00..5457452d31 100644 --- a/sched/wqueue/kwork_hpthread.c +++ b/sched/wqueue/kwork_hpthread.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "wqueue/wqueue.h" @@ -151,19 +152,20 @@ int work_hpstart(void) { /* Initialize work queue data structures */ + g_hpwork.delay = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK; dq_init(&g_hpwork.q); /* Start the high-priority, kernel mode worker thread */ svdbg("Starting high-priority kernel worker thread\n"); - g_hpwork.pid = kernel_thread(HPWORKNAME, CONFIG_SCHED_WORKPRIORITY, - CONFIG_SCHED_WORKSTACKSIZE, - (main_t)work_hpthread, - (FAR char * const *)NULL); + g_hpwork.pid[0] = kernel_thread(HPWORKNAME, CONFIG_SCHED_WORKPRIORITY, + CONFIG_SCHED_WORKSTACKSIZE, + (main_t)work_hpthread, + (FAR char * const *)NULL); - DEBUGASSERT(g_hpwork.pid > 0); - if (g_hpwork.pid < 0) + DEBUGASSERT(g_hpwork.pid[0] > 0); + if (g_hpwork.pid[0] < 0) { int errcode = errno; DEBUGASSERT(errcode > 0); @@ -172,7 +174,7 @@ int work_hpstart(void) return -errcode; } - return g_hpwork.pid; + return g_hpwork.pid[0]; } #endif /* CONFIG_SCHED_WORKQUEUE && CONFIG_SCHED_HPWORK*/ diff --git a/sched/wqueue/kwork_inherit.c b/sched/wqueue/kwork_inherit.c index ad71b1ba32..af2698f5db 100644 --- a/sched/wqueue/kwork_inherit.c +++ b/sched/wqueue/kwork_inherit.c @@ -91,7 +91,7 @@ void lpwork_boostpriority(uint8_t reqprio) * thread from the process ID. */ - wpid = g_lpwork.pid; + wpid = g_lpwork.pid[0]; wtcb = sched_gettcb(wpid); /* Prevent context switches until we get the priorities right */ @@ -214,7 +214,7 @@ void lpwork_restorepriority(uint8_t reqprio) * thread from the process ID. */ - wpid = g_lpwork.pid; + wpid = g_lpwork.pid[0]; wtcb = sched_gettcb(wpid); /* Prevent context switches until we get the priorities right */ diff --git a/sched/wqueue/kwork_lpthread.c b/sched/wqueue/kwork_lpthread.c index b0e39f26a4..30e4eb50a1 100644 --- a/sched/wqueue/kwork_lpthread.c +++ b/sched/wqueue/kwork_lpthread.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "wqueue/wqueue.h" @@ -148,19 +149,20 @@ int work_lpstart(void) { /* Initialize work queue data structures */ + g_lpwork.delay = CONFIG_SCHED_LPWORKPERIOD / USEC_PER_TICK; dq_init(&g_lpwork.q); /* Start the low-priority, kernel mode worker thread(s) */ svdbg("Starting low-priority kernel worker thread\n"); - g_lpwork.pid = kernel_thread(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY, - CONFIG_SCHED_LPWORKSTACKSIZE, - (main_t)work_lpthread, - (FAR char * const *)NULL); + g_lpwork.pid[0] = kernel_thread(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY, + CONFIG_SCHED_LPWORKSTACKSIZE, + (main_t)work_lpthread, + (FAR char * const *)NULL); - DEBUGASSERT(g_lpwork.pid > 0); - if (g_lpwork.pid < 0) + DEBUGASSERT(g_lpwork.pid[0] > 0); + if (g_lpwork.pid[0] < 0) { int errcode = errno; DEBUGASSERT(errcode > 0); @@ -169,7 +171,7 @@ int work_lpstart(void) return -errcode; } - return g_lpwork.pid; + return g_lpwork.pid[0]; } #endif /* CONFIG_SCHED_WORKQUEUE && CONFIG_SCHED_LPWORK */ diff --git a/sched/wqueue/kwork_signal.c b/sched/wqueue/kwork_signal.c index 1f99af7b55..03253eff34 100644 --- a/sched/wqueue/kwork_signal.c +++ b/sched/wqueue/kwork_signal.c @@ -97,14 +97,14 @@ int work_signal(int qid) #ifdef CONFIG_SCHED_HPWORK if (qid == HPWORK) { - pid = g_hpwork.pid; + pid = g_hpwork.pid[0]; } else #endif #ifdef CONFIG_SCHED_LPWORK if (qid == LPWORK) { - pid = g_lpwork.pid; + pid = g_lpwork.pid[0]; } else #endif