2009-11-05 23:58:36 +01:00
|
|
|
/****************************************************************************
|
2013-03-05 19:57:51 +01:00
|
|
|
* libc/wqueue/work_thread.c
|
2009-11-05 23:58:36 +01:00
|
|
|
*
|
2013-03-05 19:57:51 +01:00
|
|
|
* Copyright (C) 2009-2013 Gregory Nutt. All rights reserved.
|
2012-06-06 03:44:57 +02:00
|
|
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
2009-11-05 23:58:36 +01:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name NuttX nor the names of its contributors may be
|
|
|
|
* used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2009-12-14 19:39:29 +01:00
|
|
|
#include <stdint.h>
|
2009-11-05 23:58:36 +01:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <queue.h>
|
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
|
|
|
|
#include <nuttx/arch.h>
|
|
|
|
#include <nuttx/wqueue.h>
|
|
|
|
#include <nuttx/clock.h>
|
2013-03-05 19:57:51 +01:00
|
|
|
#include <nuttx/kmalloc.h>
|
2009-11-05 23:58:36 +01:00
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_WORKQUEUE
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Private Type Declarations
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Variables
|
|
|
|
****************************************************************************/
|
|
|
|
|
2013-03-06 01:02:07 +01:00
|
|
|
/* The state of each work queue. */
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUTTX_KERNEL
|
|
|
|
|
|
|
|
/* Play some games in the kernel mode build to assure that different
|
|
|
|
* naming is used for the global work queue data structures. This may
|
|
|
|
* not be necessary but it safer.
|
|
|
|
*
|
|
|
|
* In this case g_work is #define'd to be either g_kernelwork or
|
|
|
|
* g_usrwork in include/nuttx/wqueue.h
|
|
|
|
*/
|
|
|
|
|
|
|
|
# ifdef __KERNEL__
|
|
|
|
struct wqueue_s g_kernelwork[NWORKERS];
|
|
|
|
# else
|
|
|
|
struct wqueue_s g_usrwork[NWORKERS];
|
|
|
|
# endif
|
|
|
|
|
|
|
|
#else /* CONFIG_NUTTX_KERNEL */
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
struct wqueue_s g_work[NWORKERS];
|
2011-03-12 23:09:14 +01:00
|
|
|
|
2013-03-06 01:02:07 +01:00
|
|
|
#endif /* CONFIG_NUTTX_KERNEL */
|
|
|
|
|
2009-11-05 23:58:36 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Variables
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2012-09-04 02:54:09 +02:00
|
|
|
* Name: work_process
|
2009-11-05 23:58:36 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2012-09-04 02:54:09 +02:00
|
|
|
* This is the logic that performs actions placed on any work list.
|
2009-11-05 23:58:36 +01:00
|
|
|
*
|
|
|
|
* Input parameters:
|
2012-09-04 02:54:09 +02:00
|
|
|
* wqueue - Describes the work queue to be processed
|
2009-11-05 23:58:36 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2012-09-04 02:54:09 +02:00
|
|
|
* None
|
2009-11-05 23:58:36 +01:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
static void work_process(FAR struct wqueue_s *wqueue)
|
2009-11-05 23:58:36 +01:00
|
|
|
{
|
|
|
|
volatile FAR struct work_s *work;
|
2009-11-06 14:42:49 +01:00
|
|
|
worker_t worker;
|
2012-09-04 02:54:09 +02:00
|
|
|
irqstate_t flags;
|
2009-11-06 14:42:49 +01:00
|
|
|
FAR void *arg;
|
2009-12-14 19:39:29 +01:00
|
|
|
uint32_t elapsed;
|
|
|
|
uint32_t remaining;
|
|
|
|
uint32_t next;
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Then process queued work. We need to keep interrupts disabled while
|
|
|
|
* we process items in the work list.
|
|
|
|
*/
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
next = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
|
2009-11-18 01:08:41 +01:00
|
|
|
flags = irqsave();
|
2012-09-04 02:54:09 +02:00
|
|
|
work = (FAR struct work_s *)wqueue->q.head;
|
|
|
|
while (work)
|
2009-11-05 23:58:36 +01:00
|
|
|
{
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Is this work ready? It is ready if there is no delay or if
|
|
|
|
* the delay has elapsed. qtime is the time that the work was added
|
|
|
|
* to the work queue. It will always be greater than or equal to
|
|
|
|
* zero. Therefore a delay of zero will always execute immediately.
|
2009-11-05 23:58:36 +01:00
|
|
|
*/
|
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
elapsed = clock_systimer() - work->qtime;
|
|
|
|
if (elapsed >= work->delay)
|
|
|
|
{
|
|
|
|
/* Remove the ready-to-execute work from the list */
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
(void)dq_rem((struct dq_entry_s *)work, &wqueue->q);
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Extract the work description from the entry (in case the work
|
|
|
|
* instance by the re-used after it has been de-queued).
|
|
|
|
*/
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
worker = work->worker;
|
|
|
|
arg = work->arg;
|
|
|
|
|
|
|
|
/* Mark the work as no longer being queued */
|
|
|
|
|
|
|
|
work->worker = NULL;
|
|
|
|
|
|
|
|
/* Do the work. Re-enable interrupts while the work is being
|
|
|
|
* performed... we don't have any idea how long that will take!
|
|
|
|
*/
|
|
|
|
|
|
|
|
irqrestore(flags);
|
|
|
|
worker(arg);
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Now, unfortunately, since we re-enabled interrupts we don't
|
|
|
|
* know the state of the work list and we will have to start
|
|
|
|
* back at the head of the list.
|
|
|
|
*/
|
|
|
|
|
|
|
|
flags = irqsave();
|
|
|
|
work = (FAR struct work_s *)wqueue->q.head;
|
|
|
|
}
|
|
|
|
else
|
2009-11-05 23:58:36 +01:00
|
|
|
{
|
2012-09-04 02:54:09 +02:00
|
|
|
/* This one is not ready.. will it be ready before the next
|
|
|
|
* scheduled wakeup interval?
|
2009-11-06 14:42:49 +01:00
|
|
|
*/
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
remaining = elapsed - work->delay;
|
|
|
|
if (remaining < next)
|
2009-11-06 14:42:49 +01:00
|
|
|
{
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Yes.. Then schedule to wake up when the work is ready */
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
next = remaining;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Then try the next in the list. */
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
work = (FAR struct work_s *)work->dq.flink;
|
|
|
|
}
|
|
|
|
}
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Wait awhile to check the work list. We will wait here until either
|
|
|
|
* the time elapses or until we are awakened by a signal.
|
|
|
|
*/
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
usleep(next * USEC_PER_TICK);
|
|
|
|
irqrestore(flags);
|
|
|
|
}
|
2010-05-26 14:34:59 +02:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
/****************************************************************************
|
2013-03-06 01:02:07 +01:00
|
|
|
* Name: work_hpthread, work_lpthread, and work_usrthread
|
2012-09-04 02:54:09 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2013-03-06 01:02:07 +01:00
|
|
|
* These are the worker threads that performs actions placed on the work
|
|
|
|
* lists.
|
|
|
|
*
|
|
|
|
* work_hpthread and work_lpthread: These are the kernel mode work queues
|
|
|
|
* (also build in the flat build). One of these threads also performs
|
|
|
|
* periodic garbage collection (that is otherwise performed by the idle
|
|
|
|
* thread if CONFIG_SCHED_WORKQUEUE is not defined).
|
|
|
|
*
|
|
|
|
* These worker threads are started by the OS during normal bringup.
|
|
|
|
*
|
2013-03-11 00:42:49 +01:00
|
|
|
* work_usrthread: This is a user mode work queue. It must be built into
|
|
|
|
* the applicatino blob during the user phase of a kernel build. The
|
|
|
|
* user work thread will then automatically be started when the system
|
|
|
|
* boots by calling through the pointer found in the header on the user
|
|
|
|
* space blob.
|
2013-03-06 01:02:07 +01:00
|
|
|
*
|
|
|
|
* All of these entrypoints are referenced by OS internally and should not
|
|
|
|
* not be accessed by application logic.
|
2012-09-04 02:54:09 +02:00
|
|
|
*
|
|
|
|
* Input parameters:
|
|
|
|
* argc, argv (not used)
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Does not return
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
2010-05-26 14:34:59 +02:00
|
|
|
|
2013-12-31 19:48:11 +01:00
|
|
|
#if defined(CONFIG_SCHED_HPWORK)
|
2013-03-06 01:02:07 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
int work_hpthread(int argc, char *argv[])
|
|
|
|
{
|
|
|
|
/* Loop forever */
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
/* First, perform garbage collection. This cleans-up memory de-allocations
|
|
|
|
* that were queued because they could not be freed in that execution
|
|
|
|
* context (for example, if the memory was freed from an interrupt handler).
|
|
|
|
* NOTE: If the work thread is disabled, this clean-up is performed by
|
2012-11-27 18:22:32 +01:00
|
|
|
* the IDLE thread (at a very, very low priority).
|
2012-09-04 02:54:09 +02:00
|
|
|
*/
|
2009-11-05 23:58:36 +01:00
|
|
|
|
2012-11-27 18:22:32 +01:00
|
|
|
#ifndef CONFIG_SCHED_LPWORK
|
2012-09-04 02:54:09 +02:00
|
|
|
sched_garbagecollection();
|
|
|
|
#endif
|
2009-11-06 14:42:49 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Then process queued work. We need to keep interrupts disabled while
|
|
|
|
* we process items in the work list.
|
|
|
|
*/
|
|
|
|
|
|
|
|
work_process(&g_work[HPWORK]);
|
|
|
|
}
|
2009-11-18 01:08:41 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
return OK; /* To keep some compilers happy */
|
|
|
|
}
|
2009-11-18 01:08:41 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
#ifdef CONFIG_SCHED_LPWORK
|
2013-03-06 01:02:07 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
int work_lpthread(int argc, char *argv[])
|
|
|
|
{
|
|
|
|
/* Loop forever */
|
2009-11-06 14:42:49 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
/* First, perform garbage collection. This cleans-up memory de-allocations
|
|
|
|
* that were queued because they could not be freed in that execution
|
|
|
|
* context (for example, if the memory was freed from an interrupt handler).
|
|
|
|
* NOTE: If the work thread is disabled, this clean-up is performed by
|
2012-11-27 18:22:32 +01:00
|
|
|
* the IDLE thread (at a very, very low priority).
|
2012-09-04 02:54:09 +02:00
|
|
|
*/
|
2009-11-18 01:08:41 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
sched_garbagecollection();
|
2009-11-18 01:08:41 +01:00
|
|
|
|
2012-09-04 02:54:09 +02:00
|
|
|
/* Then process queued work. We need to keep interrupts disabled while
|
|
|
|
* we process items in the work list.
|
|
|
|
*/
|
|
|
|
|
|
|
|
work_process(&g_work[LPWORK]);
|
2009-11-05 23:58:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return OK; /* To keep some compilers happy */
|
|
|
|
}
|
2012-09-04 02:54:09 +02:00
|
|
|
|
|
|
|
#endif /* CONFIG_SCHED_LPWORK */
|
2013-03-06 01:02:07 +01:00
|
|
|
#endif /* CONFIG_SCHED_HPWORK */
|
|
|
|
|
2013-12-31 19:48:11 +01:00
|
|
|
#if defined(CONFIG_SCHED_USRWORK) && !defined(__KERNEL__)
|
2013-03-06 01:02:07 +01:00
|
|
|
|
|
|
|
int work_usrthread(int argc, char *argv[])
|
|
|
|
{
|
|
|
|
/* Loop forever */
|
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
/* Then process queued work. We need to keep interrupts disabled while
|
|
|
|
* we process items in the work list.
|
|
|
|
*/
|
|
|
|
|
|
|
|
work_process(&g_work[USRWORK]);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK; /* To keep some compilers happy */
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_SCHED_USRWORK */
|
2012-09-04 02:54:09 +02:00
|
|
|
|
2009-11-05 23:58:36 +01:00
|
|
|
#endif /* CONFIG_SCHED_WORKQUEUE */
|