2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
2018-05-29 21:21:26 +02:00
|
|
|
* libs/libc/wqueue/work_usrthread.c
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2018-08-25 22:58:07 +02:00
|
|
|
* Copyright (C) 2009-2018 Gregory Nutt. All rights reserved.
|
2014-10-10 14:22:51 +02:00
|
|
|
* Author: Gregory Nutt <gnutt@nuttx.org>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name NuttX nor the names of its contributors may be
|
|
|
|
* used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
|
|
|
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
|
|
|
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <pthread.h>
|
2018-09-16 19:07:24 +02:00
|
|
|
#include <signal.h>
|
2014-10-10 22:52:04 +02:00
|
|
|
#include <sched.h>
|
2014-10-10 20:27:11 +02:00
|
|
|
#include <errno.h>
|
2014-10-10 22:52:04 +02:00
|
|
|
#include <assert.h>
|
2014-10-10 20:27:11 +02:00
|
|
|
#include <queue.h>
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2017-10-08 19:52:32 +02:00
|
|
|
#include <nuttx/semaphore.h>
|
2014-10-10 21:21:37 +02:00
|
|
|
#include <nuttx/clock.h>
|
2017-10-08 19:52:32 +02:00
|
|
|
#include <nuttx/wqueue.h>
|
2014-10-10 14:22:51 +02:00
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
#include "wqueue/wqueue.h"
|
|
|
|
|
2014-10-11 23:59:40 +02:00
|
|
|
#if defined(CONFIG_LIB_USRWORK) && !defined(__KERNEL__)
|
2014-10-10 14:22:51 +02:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
/* Use CLOCK_MONOTONIC if it is available. CLOCK_REALTIME can cause bad
|
|
|
|
* delays if the time is changed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_CLOCK_MONOTONIC
|
|
|
|
# define WORK_CLOCK CLOCK_MONOTONIC
|
|
|
|
#else
|
|
|
|
# define WORK_CLOCK CLOCK_REALTIME
|
|
|
|
#endif
|
2014-10-10 16:35:58 +02:00
|
|
|
|
2018-08-25 22:58:07 +02:00
|
|
|
#ifdef CONFIG_SYSTEM_TIME64
|
|
|
|
# define WORK_DELAY_MAX UINT64_MAX
|
|
|
|
#else
|
|
|
|
# define WORK_DELAY_MAX UINT32_MAX
|
|
|
|
#endif
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
#ifndef MIN
|
|
|
|
# define MIN(a,b) ((a) < (b) ? (a) : (b))
|
|
|
|
#endif
|
2014-10-10 16:35:58 +02:00
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Type Declarations
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Data
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
/* The state of the user mode work queue. */
|
|
|
|
|
2014-10-11 00:24:50 +02:00
|
|
|
struct usr_wqueue_s g_usrwork;
|
2014-10-10 22:52:04 +02:00
|
|
|
|
|
|
|
/* This semaphore supports exclusive access to the user-mode work queue */
|
|
|
|
|
|
|
|
#ifdef CONFIG_BUILD_PROTECTED
|
2014-10-12 00:27:24 +02:00
|
|
|
sem_t g_usrsem;
|
2014-10-10 22:52:04 +02:00
|
|
|
#else
|
2014-10-12 00:27:24 +02:00
|
|
|
pthread_mutex_t g_usrmutex;
|
2014-10-10 22:52:04 +02:00
|
|
|
#endif
|
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_process
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This is the logic that performs actions placed on any work list. This
|
|
|
|
* logic is the common underlying logic to all work queues. This logic is
|
|
|
|
* part of the internal implementation of each work queue; it should not
|
|
|
|
* be called from application level logic.
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2014-10-10 22:52:04 +02:00
|
|
|
* wqueue - Describes the work queue to be processed
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-11 00:24:50 +02:00
|
|
|
void work_process(FAR struct usr_wqueue_s *wqueue)
|
2014-10-10 22:52:04 +02:00
|
|
|
{
|
|
|
|
volatile FAR struct work_s *work;
|
2018-09-14 17:02:03 +02:00
|
|
|
sigset_t sigset;
|
|
|
|
sigset_t oldset;
|
2014-10-10 22:52:04 +02:00
|
|
|
worker_t worker;
|
|
|
|
FAR void *arg;
|
2018-06-16 20:16:13 +02:00
|
|
|
clock_t elapsed;
|
|
|
|
clock_t remaining;
|
|
|
|
clock_t stick;
|
|
|
|
clock_t ctick;
|
|
|
|
clock_t next;
|
2014-10-10 22:52:04 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Then process queued work. Lock the work queue while we process items
|
|
|
|
* in the work list.
|
|
|
|
*/
|
|
|
|
|
2018-08-25 22:58:07 +02:00
|
|
|
next = WORK_DELAY_MAX;
|
2014-10-10 22:52:04 +02:00
|
|
|
ret = work_lock();
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
/* Break out earlier if we were awakened by a signal */
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-14 17:02:03 +02:00
|
|
|
/* Set up the signal mask */
|
|
|
|
|
|
|
|
sigemptyset(&sigset);
|
|
|
|
sigaddset(&sigset, SIGWORK);
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
/* Get the time that we started this polling cycle in clock ticks. */
|
|
|
|
|
2018-06-16 20:50:28 +02:00
|
|
|
stick = clock();
|
2014-10-10 22:52:04 +02:00
|
|
|
|
|
|
|
/* And check each entry in the work queue. Since we have locked the
|
|
|
|
* work queue we know: (1) we will not be suspended unless we do
|
|
|
|
* so ourselves, and (2) there will be no changes to the work queue
|
|
|
|
*/
|
|
|
|
|
|
|
|
work = (FAR struct work_s *)wqueue->q.head;
|
|
|
|
while (work)
|
|
|
|
{
|
|
|
|
/* Is this work ready? It is ready if there is no delay or if
|
|
|
|
* the delay has elapsed. qtime is the time that the work was added
|
|
|
|
* to the work queue. It will always be greater than or equal to
|
|
|
|
* zero. Therefore a delay of zero will always execute immediately.
|
|
|
|
*/
|
|
|
|
|
2018-06-16 20:50:28 +02:00
|
|
|
ctick = clock();
|
2014-10-10 22:52:04 +02:00
|
|
|
elapsed = ctick - work->qtime;
|
|
|
|
if (elapsed >= work->delay)
|
|
|
|
{
|
|
|
|
/* Remove the ready-to-execute work from the list */
|
|
|
|
|
|
|
|
(void)dq_rem((struct dq_entry_s *)work, &wqueue->q);
|
|
|
|
|
|
|
|
/* Extract the work description from the entry (in case the work
|
|
|
|
* instance by the re-used after it has been de-queued).
|
|
|
|
*/
|
|
|
|
|
|
|
|
worker = work->worker;
|
|
|
|
|
|
|
|
/* Check for a race condition where the work may be nullified
|
|
|
|
* before it is removed from the queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (worker != NULL)
|
|
|
|
{
|
|
|
|
/* Extract the work argument (before unlocking the work queue) */
|
|
|
|
|
|
|
|
arg = work->arg;
|
|
|
|
|
|
|
|
/* Mark the work as no longer being queued */
|
|
|
|
|
|
|
|
work->worker = NULL;
|
|
|
|
|
2017-05-11 21:35:56 +02:00
|
|
|
/* Do the work. Unlock the work queue while the work is being
|
2014-10-10 22:52:04 +02:00
|
|
|
* performed... we don't have any idea how long this will take!
|
|
|
|
*/
|
|
|
|
|
|
|
|
work_unlock();
|
|
|
|
worker(arg);
|
|
|
|
|
|
|
|
/* Now, unfortunately, since we unlocked the work queue we don't
|
|
|
|
* know the state of the work list and we will have to start
|
|
|
|
* back at the head of the list.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = work_lock();
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
/* Break out earlier if we were awakened by a signal */
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-12 17:40:26 +02:00
|
|
|
work = (FAR struct work_s *)wqueue->q.head;
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-09-14 17:02:03 +02:00
|
|
|
/* Canceled.. Just move to the next work in the list with
|
2014-10-10 22:52:04 +02:00
|
|
|
* the work queue still locked.
|
|
|
|
*/
|
|
|
|
|
|
|
|
work = (FAR struct work_s *)work->dq.flink;
|
|
|
|
}
|
|
|
|
}
|
2015-01-27 13:17:02 +01:00
|
|
|
else /* elapsed < work->delay */
|
2014-10-10 22:52:04 +02:00
|
|
|
{
|
2015-01-27 13:17:02 +01:00
|
|
|
/* This one is not ready.
|
2014-10-10 22:52:04 +02:00
|
|
|
*
|
2017-05-11 21:35:56 +02:00
|
|
|
* NOTE that elapsed is relative to the current time,
|
2014-10-10 22:52:04 +02:00
|
|
|
* not the time of beginning of this queue processing pass.
|
|
|
|
* So it may need an adjustment.
|
|
|
|
*/
|
|
|
|
|
2015-01-27 13:17:02 +01:00
|
|
|
elapsed += (ctick - stick);
|
|
|
|
if (elapsed > work->delay)
|
|
|
|
{
|
|
|
|
/* The delay has expired while we are processing */
|
|
|
|
|
|
|
|
elapsed = work->delay;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Will it be ready before the next scheduled wakeup interval? */
|
|
|
|
|
|
|
|
remaining = work->delay - elapsed;
|
2014-10-10 22:52:04 +02:00
|
|
|
if (remaining < next)
|
|
|
|
{
|
|
|
|
/* Yes.. Then schedule to wake up when the work is ready */
|
|
|
|
|
|
|
|
next = remaining;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Then try the next in the list. */
|
|
|
|
|
|
|
|
work = (FAR struct work_s *)work->dq.flink;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 17:02:03 +02:00
|
|
|
/* Unlock the work queue before waiting. In order to assure that we do
|
|
|
|
* not lose the SIGWORK signal before waiting, we block the SIGWORK
|
|
|
|
* signals before unlocking the work queue. That will cause in SIGWORK
|
|
|
|
* signals directed to the worker thread to pend.
|
2018-09-14 15:51:31 +02:00
|
|
|
*/
|
|
|
|
|
2018-09-16 19:07:24 +02:00
|
|
|
(void)sigprocmask(SIG_BLOCK, &sigset, &oldset);
|
2018-09-14 15:51:31 +02:00
|
|
|
work_unlock();
|
|
|
|
|
2018-08-25 22:58:07 +02:00
|
|
|
if (next == WORK_DELAY_MAX)
|
2014-10-10 22:52:04 +02:00
|
|
|
{
|
2018-08-25 22:58:07 +02:00
|
|
|
/* Wait indefinitely until signaled with SIGWORK */
|
2014-10-10 22:52:04 +02:00
|
|
|
|
2018-09-14 17:02:03 +02:00
|
|
|
sigwaitinfo(&sigset, NULL);
|
2018-08-25 22:58:07 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-09-14 17:02:03 +02:00
|
|
|
struct timespec rqtp;
|
|
|
|
time_t sec;
|
|
|
|
|
2014-10-12 17:40:26 +02:00
|
|
|
/* Wait awhile to check the work list. We will wait here until
|
|
|
|
* either the time elapses or until we are awakened by a signal.
|
|
|
|
* Interrupts will be re-enabled while we wait.
|
|
|
|
*/
|
|
|
|
|
2018-09-14 17:02:03 +02:00
|
|
|
sec = next / 1000000;
|
|
|
|
rqtp.tv_sec = sec;
|
|
|
|
rqtp.tv_nsec = (next - (sec * 1000000)) * 1000;
|
|
|
|
|
2018-09-16 19:07:24 +02:00
|
|
|
sigtimedwait(&sigset, NULL, &rqtp);
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
|
2018-09-16 19:07:24 +02:00
|
|
|
(void)sigprocmask(SIG_SETMASK, &oldset, NULL);
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_usrthread
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This is the worker thread that performs the actions placed on the user
|
|
|
|
* work queue.
|
|
|
|
*
|
|
|
|
* This is a user mode work queue. It must be used by applications for
|
|
|
|
* miscellaneous operations. The user work thread must be started by
|
|
|
|
* application start-up logic by calling work_usrstart().
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2014-10-10 14:22:51 +02:00
|
|
|
* argc, argv (not used)
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Does not return
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
#ifdef CONFIG_BUILD_PROTECTED
|
2014-10-10 17:34:03 +02:00
|
|
|
static int work_usrthread(int argc, char *argv[])
|
2014-10-10 22:52:04 +02:00
|
|
|
#else
|
|
|
|
static pthread_addr_t work_usrthread(pthread_addr_t arg)
|
|
|
|
#endif
|
2014-10-10 14:22:51 +02:00
|
|
|
{
|
|
|
|
/* Loop forever */
|
|
|
|
|
2015-10-12 15:45:02 +02:00
|
|
|
for (; ; )
|
2014-10-10 14:22:51 +02:00
|
|
|
{
|
2014-10-10 22:52:04 +02:00
|
|
|
/* Then process queued work. We need to keep the work queue locked
|
|
|
|
* while we process items in the work list.
|
2014-10-10 14:22:51 +02:00
|
|
|
*/
|
|
|
|
|
2014-10-10 16:35:58 +02:00
|
|
|
work_process(&g_usrwork);
|
2014-10-10 14:22:51 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
#ifdef CONFIG_BUILD_PROTECTED
|
2014-10-10 14:22:51 +02:00
|
|
|
return OK; /* To keep some compilers happy */
|
2014-10-10 22:52:04 +02:00
|
|
|
#else
|
|
|
|
return NULL; /* To keep some compilers happy */
|
|
|
|
#endif
|
2014-10-10 14:22:51 +02:00
|
|
|
}
|
|
|
|
|
2014-10-10 17:34:03 +02:00
|
|
|
/****************************************************************************
|
2014-10-10 22:52:04 +02:00
|
|
|
* Public Functions
|
2014-10-10 17:34:03 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_usrstart
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Start the user mode work queue.
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2014-10-10 17:34:03 +02:00
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* The task ID of the worker thread is returned on success. A negated
|
|
|
|
* errno value is returned on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int work_usrstart(void)
|
|
|
|
{
|
2014-10-10 20:27:11 +02:00
|
|
|
/* Initialize work queue data structures */
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
#ifdef CONFIG_BUILD_PROTECTED
|
|
|
|
{
|
|
|
|
/* Set up the work queue lock */
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2017-10-08 19:52:32 +02:00
|
|
|
(void)nxsem_init(&g_usrsem, 0, 1);
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
/* Start a user-mode worker thread for use by applications. */
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2014-10-11 00:24:50 +02:00
|
|
|
g_usrwork.pid = task_create("uwork",
|
2014-10-11 23:59:40 +02:00
|
|
|
CONFIG_LIB_USRWORKPRIORITY,
|
|
|
|
CONFIG_LIB_USRWORKSTACKSIZE,
|
2014-10-11 00:24:50 +02:00
|
|
|
(main_t)work_usrthread,
|
|
|
|
(FAR char * const *)NULL);
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2014-10-11 00:24:50 +02:00
|
|
|
DEBUGASSERT(g_usrwork.pid > 0);
|
|
|
|
if (g_usrwork.pid < 0)
|
2014-10-10 22:52:04 +02:00
|
|
|
{
|
2018-01-31 00:57:36 +01:00
|
|
|
int errcode = get_errno();
|
2014-10-10 22:52:04 +02:00
|
|
|
DEBUGASSERT(errcode > 0);
|
|
|
|
return -errcode;
|
|
|
|
}
|
|
|
|
|
2014-10-11 00:24:50 +02:00
|
|
|
return g_usrwork.pid;
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
{
|
|
|
|
pthread_t usrwork;
|
|
|
|
pthread_attr_t attr;
|
|
|
|
struct sched_param param;
|
2015-07-23 21:16:32 +02:00
|
|
|
int ret;
|
2014-10-10 22:52:04 +02:00
|
|
|
|
|
|
|
/* Set up the work queue lock */
|
|
|
|
|
|
|
|
(void)pthread_mutex_init(&g_usrmutex, NULL);
|
|
|
|
|
|
|
|
/* Start a user-mode worker thread for use by applications. */
|
|
|
|
|
|
|
|
(void)pthread_attr_init(&attr);
|
2014-10-11 23:59:40 +02:00
|
|
|
(void)pthread_attr_setstacksize(&attr, CONFIG_LIB_USRWORKSTACKSIZE);
|
2014-10-10 22:52:04 +02:00
|
|
|
|
2015-09-05 17:10:48 +02:00
|
|
|
#ifdef CONFIG_SCHED_SPORADIC
|
2015-07-23 21:16:32 +02:00
|
|
|
/* Get the current sporadic scheduling parameters. Those will not be
|
|
|
|
* modified.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = set_getparam(pid, ¶m);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
int erroode = get_errno();
|
|
|
|
return -errcode;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-10-11 23:59:40 +02:00
|
|
|
param.sched_priority = CONFIG_LIB_USRWORKPRIORITY;
|
2014-10-10 22:52:04 +02:00
|
|
|
(void)pthread_attr_setschedparam(&attr, ¶m);
|
|
|
|
|
2015-07-23 21:16:32 +02:00
|
|
|
ret = pthread_create(&usrwork, &attr, work_usrthread, NULL);
|
|
|
|
if (ret != 0)
|
2014-10-10 22:52:04 +02:00
|
|
|
{
|
2015-07-23 21:16:32 +02:00
|
|
|
return -ret;
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Detach because the return value and completion status will not be
|
|
|
|
* requested.
|
|
|
|
*/
|
|
|
|
|
|
|
|
(void)pthread_detach(usrwork);
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2014-10-11 00:24:50 +02:00
|
|
|
g_usrwork.pid = (pid_t)usrwork;
|
|
|
|
return g_usrwork.pid;
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
#endif
|
2014-10-10 17:34:03 +02:00
|
|
|
}
|
|
|
|
|
2014-10-11 23:59:40 +02:00
|
|
|
#endif /* CONFIG_LIB_USRWORK && !__KERNEL__*/
|