2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
2021-06-25 08:24:45 +02:00
|
|
|
* sched/wqueue/kwork_thread.c
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2018-08-25 22:52:13 +02:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <sched.h>
|
2021-06-25 08:24:45 +02:00
|
|
|
#include <stdio.h>
|
2018-08-25 22:52:13 +02:00
|
|
|
#include <string.h>
|
2021-06-25 08:24:45 +02:00
|
|
|
#include <stdlib.h>
|
2014-10-10 17:34:03 +02:00
|
|
|
#include <errno.h>
|
2021-05-18 08:59:14 +02:00
|
|
|
#include <assert.h>
|
2014-10-10 17:34:03 +02:00
|
|
|
#include <debug.h>
|
|
|
|
|
2022-09-25 17:08:38 +02:00
|
|
|
#include <nuttx/queue.h>
|
2014-10-10 14:22:51 +02:00
|
|
|
#include <nuttx/wqueue.h>
|
2014-10-10 17:34:03 +02:00
|
|
|
#include <nuttx/kthread.h>
|
2021-06-19 11:29:30 +02:00
|
|
|
#include <nuttx/semaphore.h>
|
2014-10-10 14:22:51 +02:00
|
|
|
|
2023-04-07 12:52:57 +02:00
|
|
|
#include "sched/sched.h"
|
2014-10-10 14:22:51 +02:00
|
|
|
#include "wqueue/wqueue.h"
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
#if defined(CONFIG_SCHED_WORKQUEUE)
|
2014-10-10 14:22:51 +02:00
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-04-05 08:49:11 +02:00
|
|
|
#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE
|
|
|
|
# define CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE > 0
|
2021-06-19 11:29:30 +02:00
|
|
|
# define CALL_WORKER(worker, arg) \
|
|
|
|
do \
|
|
|
|
{ \
|
2023-09-28 10:37:27 +02:00
|
|
|
clock_t start; \
|
|
|
|
clock_t elapsed; \
|
|
|
|
start = perf_gettime(); \
|
2021-06-19 11:29:30 +02:00
|
|
|
worker(arg); \
|
2023-09-28 10:37:27 +02:00
|
|
|
elapsed = perf_gettime() - start; \
|
2021-06-19 11:29:30 +02:00
|
|
|
if (elapsed > CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE) \
|
|
|
|
{ \
|
2023-09-28 10:37:27 +02:00
|
|
|
CRITMONITOR_PANIC("WORKER %p execute too long %ju\n", \
|
|
|
|
worker, (uintmax_t)elapsed); \
|
2021-06-19 11:29:30 +02:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
#else
|
|
|
|
# define CALL_WORKER(worker, arg) worker(arg)
|
|
|
|
#endif
|
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Data
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
#if defined(CONFIG_SCHED_HPWORK)
|
2018-08-25 22:52:13 +02:00
|
|
|
/* The state of the kernel mode, high priority work queue(s). */
|
2014-10-10 16:35:58 +02:00
|
|
|
|
2021-12-01 16:40:16 +01:00
|
|
|
struct hp_wqueue_s g_hpwork =
|
|
|
|
{
|
2022-09-03 14:04:11 +02:00
|
|
|
{NULL, NULL},
|
2022-10-19 17:57:25 +02:00
|
|
|
SEM_INITIALIZER(0),
|
2021-12-01 16:40:16 +01:00
|
|
|
};
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
#endif /* CONFIG_SCHED_HPWORK */
|
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_LPWORK)
|
|
|
|
/* The state of the kernel mode, low priority work queue(s). */
|
|
|
|
|
2021-12-01 16:40:16 +01:00
|
|
|
struct lp_wqueue_s g_lpwork =
|
|
|
|
{
|
2022-09-03 14:04:11 +02:00
|
|
|
{NULL, NULL},
|
2022-10-19 17:57:25 +02:00
|
|
|
SEM_INITIALIZER(0),
|
2021-12-01 16:40:16 +01:00
|
|
|
};
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
#endif /* CONFIG_SCHED_LPWORK */
|
2014-10-10 16:35:58 +02:00
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2021-06-25 08:24:45 +02:00
|
|
|
* Name: work_thread
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2021-06-19 11:29:30 +02:00
|
|
|
* These are the worker threads that perform the actions placed on the
|
2020-03-11 21:23:38 +01:00
|
|
|
* high priority work queue.
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2018-08-25 22:52:13 +02:00
|
|
|
* These, along with the lower priority worker thread(s) are the kernel
|
2021-06-19 11:29:30 +02:00
|
|
|
* mode work queues (also built in the flat build).
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
* All kernel mode worker threads are started by the OS during normal
|
|
|
|
* bring up. This entry point is referenced by OS internally and should
|
|
|
|
* not be accessed by application logic.
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2021-06-19 11:29:30 +02:00
|
|
|
* argc, argv
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Does not return
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
static int work_thread(int argc, FAR char *argv[])
|
2014-10-10 14:22:51 +02:00
|
|
|
{
|
2021-06-19 11:29:30 +02:00
|
|
|
FAR struct kwork_wqueue_s *wqueue;
|
2023-08-28 13:29:54 +02:00
|
|
|
FAR struct kworker_s *kworker;
|
2021-06-19 11:29:30 +02:00
|
|
|
FAR struct work_s *work;
|
2022-12-15 00:02:44 +01:00
|
|
|
worker_t worker;
|
2021-06-19 11:29:30 +02:00
|
|
|
irqstate_t flags;
|
|
|
|
FAR void *arg;
|
2023-08-28 13:29:54 +02:00
|
|
|
int semcount;
|
2018-08-25 22:52:13 +02:00
|
|
|
|
2023-08-28 13:29:54 +02:00
|
|
|
/* Get the handle from argv */
|
|
|
|
|
|
|
|
wqueue = (FAR struct kwork_wqueue_s *)
|
|
|
|
((uintptr_t)strtoul(argv[1], NULL, 0));
|
|
|
|
kworker = (FAR struct kworker_s *)
|
|
|
|
((uintptr_t)strtoul(argv[2], NULL, 0));
|
2021-06-19 11:29:30 +02:00
|
|
|
|
|
|
|
flags = enter_critical_section();
|
2018-08-25 22:52:13 +02:00
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/* Loop forever */
|
|
|
|
|
2015-10-08 03:59:14 +02:00
|
|
|
for (; ; )
|
2014-10-10 14:22:51 +02:00
|
|
|
{
|
2021-06-19 11:29:30 +02:00
|
|
|
/* And check each entry in the work queue. Since we have disabled
|
|
|
|
* interrupts we know: (1) we will not be suspended unless we do
|
|
|
|
* so ourselves, and (2) there will be no changes to the work queue
|
2020-04-07 16:31:47 +02:00
|
|
|
*/
|
2014-10-10 14:22:51 +02:00
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
/* Remove the ready-to-execute work from the list */
|
|
|
|
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 14:55:28 +01:00
|
|
|
while ((work = (FAR struct work_s *)dq_remfirst(&wqueue->q)) != NULL)
|
2021-06-19 11:29:30 +02:00
|
|
|
{
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 14:55:28 +01:00
|
|
|
if (work->worker == NULL)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
/* Extract the work description from the entry (in case the work
|
|
|
|
* instance will be re-used after it has been de-queued).
|
|
|
|
*/
|
|
|
|
|
|
|
|
worker = work->worker;
|
|
|
|
|
|
|
|
/* Extract the work argument (before re-enabling interrupts) */
|
|
|
|
|
|
|
|
arg = work->arg;
|
|
|
|
|
|
|
|
/* Mark the work as no longer being queued */
|
|
|
|
|
|
|
|
work->worker = NULL;
|
|
|
|
|
2023-08-28 13:29:54 +02:00
|
|
|
/* Mark the thread busy */
|
|
|
|
|
|
|
|
kworker->work = work;
|
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
/* Do the work. Re-enable interrupts while the work is being
|
|
|
|
* performed... we don't have any idea how long this will take!
|
|
|
|
*/
|
|
|
|
|
|
|
|
leave_critical_section(flags);
|
|
|
|
CALL_WORKER(worker, arg);
|
|
|
|
flags = enter_critical_section();
|
2023-08-28 13:29:54 +02:00
|
|
|
|
|
|
|
/* Mark the thread un-busy */
|
|
|
|
|
|
|
|
kworker->work = NULL;
|
|
|
|
|
|
|
|
/* Check if someone is waiting, if so, wakeup it */
|
|
|
|
|
|
|
|
nxsem_get_value(&kworker->wait, &semcount);
|
|
|
|
while (semcount++ < 0)
|
|
|
|
{
|
|
|
|
nxsem_post(&kworker->wait);
|
|
|
|
}
|
2021-06-19 11:29:30 +02:00
|
|
|
}
|
2023-04-12 11:48:56 +02:00
|
|
|
|
|
|
|
/* Then process queued work. work_process will not return until: (1)
|
|
|
|
* there is no further work in the work queue, and (2) semaphore is
|
|
|
|
* posted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
nxsem_wait_uninterruptible(&wqueue->sem);
|
2014-10-10 14:22:51 +02:00
|
|
|
}
|
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
leave_critical_section(flags);
|
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
return OK; /* To keep some compilers happy */
|
|
|
|
}
|
|
|
|
|
2014-10-10 17:34:03 +02:00
|
|
|
/****************************************************************************
|
2021-06-25 08:24:45 +02:00
|
|
|
* Name: work_thread_create
|
2014-10-10 17:34:03 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2021-06-25 08:24:45 +02:00
|
|
|
* This function creates and activates a work thread task with kernel-
|
|
|
|
* mode privileges.
|
2014-10-10 17:34:03 +02:00
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2021-06-25 08:24:45 +02:00
|
|
|
* name - Name of the new task
|
|
|
|
* priority - Priority of the new task
|
|
|
|
* stack_size - size (in bytes) of the stack needed
|
|
|
|
* nthread - Number of work thread should be created
|
|
|
|
* wqueue - Work queue instance
|
2014-10-10 17:34:03 +02:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2021-06-25 08:24:45 +02:00
|
|
|
* A negated errno value is returned on failure.
|
2014-10-10 17:34:03 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
static int work_thread_create(FAR const char *name, int priority,
|
|
|
|
int stack_size, int nthread,
|
|
|
|
FAR struct kwork_wqueue_s *wqueue)
|
2014-10-10 17:34:03 +02:00
|
|
|
{
|
2023-08-28 13:29:54 +02:00
|
|
|
FAR char *argv[3];
|
|
|
|
char arg0[32];
|
|
|
|
char arg1[32];
|
2018-08-25 22:52:13 +02:00
|
|
|
int wndx;
|
2021-06-25 08:24:45 +02:00
|
|
|
int pid;
|
|
|
|
|
2018-08-25 22:52:13 +02:00
|
|
|
/* Don't permit any of the threads to run until we have fully initialized
|
2021-06-25 08:24:45 +02:00
|
|
|
* g_hpwork and g_lpwork.
|
2018-08-25 22:52:13 +02:00
|
|
|
*/
|
2014-10-10 20:27:11 +02:00
|
|
|
|
2018-08-25 22:52:13 +02:00
|
|
|
sched_lock();
|
2014-10-10 17:34:03 +02:00
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
for (wndx = 0; wndx < nthread; wndx++)
|
2014-10-10 17:34:03 +02:00
|
|
|
{
|
2023-08-28 13:29:54 +02:00
|
|
|
nxsem_init(&wqueue->worker[wndx].wait, 0, 0);
|
|
|
|
|
|
|
|
snprintf(arg0, sizeof(arg0), "%p", wqueue);
|
|
|
|
snprintf(arg1, sizeof(arg1), "%p", &wqueue->worker[wndx]);
|
|
|
|
argv[0] = arg0;
|
|
|
|
argv[1] = arg1;
|
|
|
|
argv[2] = NULL;
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
pid = kthread_create(name, priority, stack_size,
|
2022-10-15 19:48:35 +02:00
|
|
|
work_thread, argv);
|
2018-08-25 22:52:13 +02:00
|
|
|
|
|
|
|
DEBUGASSERT(pid > 0);
|
|
|
|
if (pid < 0)
|
|
|
|
{
|
2021-06-25 08:24:45 +02:00
|
|
|
serr("ERROR: work_thread_create %d failed: %d\n", wndx, pid);
|
2018-08-25 22:52:13 +02:00
|
|
|
sched_unlock();
|
2021-06-25 08:24:45 +02:00
|
|
|
return pid;
|
2018-08-25 22:52:13 +02:00
|
|
|
}
|
|
|
|
|
2023-08-28 13:29:54 +02:00
|
|
|
wqueue->worker[wndx].pid = pid;
|
2014-10-10 17:34:03 +02:00
|
|
|
}
|
|
|
|
|
2018-08-25 22:52:13 +02:00
|
|
|
sched_unlock();
|
2021-06-25 08:24:45 +02:00
|
|
|
return OK;
|
2014-10-10 17:34:03 +02:00
|
|
|
}
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-01-24 11:17:50 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_foreach
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Enumerate over each work thread and provide the tid of each task to a
|
|
|
|
* user callback functions.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* qid - The work queue ID
|
|
|
|
* handler - The function to be called with the pid of each task
|
|
|
|
* arg - The function callback
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void work_foreach(int qid, work_foreach_t handler, FAR void *arg)
|
|
|
|
{
|
|
|
|
FAR struct kwork_wqueue_s *wqueue;
|
|
|
|
int nthread;
|
|
|
|
int wndx;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_HPWORK
|
|
|
|
if (qid == HPWORK)
|
|
|
|
{
|
|
|
|
wqueue = (FAR struct kwork_wqueue_s *)&g_hpwork;
|
|
|
|
nthread = CONFIG_SCHED_HPNTHREADS;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_LPWORK
|
|
|
|
if (qid == LPWORK)
|
|
|
|
{
|
|
|
|
wqueue = (FAR struct kwork_wqueue_s *)&g_lpwork;
|
|
|
|
nthread = CONFIG_SCHED_LPNTHREADS;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (wndx = 0; wndx < nthread; wndx++)
|
|
|
|
{
|
|
|
|
handler(wqueue->worker[wndx].pid, arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-25 08:24:45 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_start_highpri
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Start the high-priority, kernel-mode worker thread(s)
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* A negated errno value is returned on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_HPWORK)
|
|
|
|
int work_start_highpri(void)
|
|
|
|
{
|
|
|
|
/* Start the high-priority, kernel mode worker thread(s) */
|
|
|
|
|
|
|
|
sinfo("Starting high-priority kernel worker thread(s)\n");
|
|
|
|
|
|
|
|
return work_thread_create(HPWORKNAME, CONFIG_SCHED_HPWORKPRIORITY,
|
|
|
|
CONFIG_SCHED_HPWORKSTACKSIZE,
|
|
|
|
CONFIG_SCHED_HPNTHREADS,
|
|
|
|
(FAR struct kwork_wqueue_s *)&g_hpwork);
|
|
|
|
}
|
2014-10-12 01:03:44 +02:00
|
|
|
#endif /* CONFIG_SCHED_HPWORK */
|
2021-06-25 08:24:45 +02:00
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: work_start_lowpri
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Start the low-priority, kernel-mode worker thread(s)
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* None
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* A negated errno value is returned on failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_LPWORK)
|
|
|
|
int work_start_lowpri(void)
|
|
|
|
{
|
|
|
|
/* Start the low-priority, kernel mode worker thread(s) */
|
|
|
|
|
|
|
|
sinfo("Starting low-priority kernel worker thread(s)\n");
|
|
|
|
|
|
|
|
return work_thread_create(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY,
|
|
|
|
CONFIG_SCHED_LPWORKSTACKSIZE,
|
|
|
|
CONFIG_SCHED_LPNTHREADS,
|
|
|
|
(FAR struct kwork_wqueue_s *)&g_lpwork);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SCHED_LPWORK */
|
|
|
|
|
|
|
|
#endif /* CONFIG_SCHED_WORKQUEUE */
|