2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
2014-10-10 16:35:58 +02:00
|
|
|
* sched/wqueue/kwork_queue.c
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2024-09-11 13:45:11 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2014-10-10 16:35:58 +02:00
|
|
|
#include <stdint.h>
|
2014-10-10 22:52:04 +02:00
|
|
|
#include <assert.h>
|
2014-10-10 16:35:58 +02:00
|
|
|
#include <errno.h>
|
|
|
|
|
2016-02-14 15:17:46 +01:00
|
|
|
#include <nuttx/irq.h>
|
2014-10-10 22:52:04 +02:00
|
|
|
#include <nuttx/arch.h>
|
|
|
|
#include <nuttx/clock.h>
|
2022-09-25 17:08:38 +02:00
|
|
|
#include <nuttx/queue.h>
|
2014-10-10 14:22:51 +02:00
|
|
|
#include <nuttx/wqueue.h>
|
|
|
|
|
|
|
|
#include "wqueue/wqueue.h"
|
|
|
|
|
2014-10-10 16:35:58 +02:00
|
|
|
#ifdef CONFIG_SCHED_WORKQUEUE
|
2014-10-10 14:22:51 +02:00
|
|
|
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 14:55:28 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#define queue_work(wqueue, work) \
|
|
|
|
do \
|
|
|
|
{ \
|
|
|
|
int sem_count; \
|
2023-12-07 06:00:41 +01:00
|
|
|
dq_addlast((FAR dq_entry_t *)(work), &(wqueue)->q); \
|
|
|
|
nxsem_get_value(&(wqueue)->sem, &sem_count); \
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 14:55:28 +01:00
|
|
|
if (sem_count < 0) /* There are threads waiting for sem. */ \
|
|
|
|
{ \
|
2023-12-07 06:00:41 +01:00
|
|
|
nxsem_post(&(wqueue)->sem); \
|
sched/wqueue: Do as much work as possible in work_thread
Decouple the semcount and the work queue length.
Previous Problem:
If a work is queued and cancelled in high priority threads (or queued
by timer and cancelled by another high priority thread) before
work_thread runs, the queue operation will mark work_thread as ready to
run, but the cancel operation minus the semcount back to -1 and makes
wqueue->q empty. Then the work_thread still runs, found empty queue,
and wait sem again, then semcount becomes -2 (being minused by 1)
This can be done multiple times, then semcount can become very small
value. Test case to produce incorrect semcount:
high_priority_task()
{
for (int i = 0; i < 10000; i++)
{
work_queue(LPWORK, &work, worker, NULL, 0);
work_cancel(LPWORK, &work);
usleep(1);
}
/* Now the g_lpwork.sem.semcount is a value near -10000 */
}
With incorrect semcount, any queue operation when the work_thread is
busy, will only increase semcount and push work into queue, but cannot
trigger work_thread (semcount is negative but work_thread is not
waiting), then there will be more and more works left in queue while
the work_thread is waiting sem and cannot call them.
Signed-off-by: Zhe Weng <wengzhe@xiaomi.com>
2023-03-15 14:55:28 +01:00
|
|
|
} \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-10 22:52:04 +02:00
|
|
|
/****************************************************************************
|
2024-06-25 15:01:48 +02:00
|
|
|
* Name: work_timer_expiry
|
2014-10-10 22:52:04 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
static void work_timer_expiry(wdparm_t arg)
|
2014-10-10 22:52:04 +02:00
|
|
|
{
|
2024-06-25 15:01:48 +02:00
|
|
|
FAR struct work_s *work = (FAR struct work_s *)arg;
|
2021-06-19 11:29:30 +02:00
|
|
|
irqstate_t flags = enter_critical_section();
|
2014-10-10 22:52:04 +02:00
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
queue_work(work->wq, work);
|
2016-02-14 15:17:46 +01:00
|
|
|
leave_critical_section(flags);
|
2014-10-10 22:52:04 +02:00
|
|
|
}
|
|
|
|
|
2023-12-07 06:00:41 +01:00
|
|
|
static bool work_is_canceling(FAR struct kworker_s *kworkers, int nthreads,
|
|
|
|
FAR struct work_s *work)
|
|
|
|
{
|
|
|
|
int semcount;
|
2024-06-25 15:01:48 +02:00
|
|
|
int wndx;
|
2023-12-07 06:00:41 +01:00
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
for (wndx = 0; wndx < nthreads; wndx++)
|
2023-12-07 06:00:41 +01:00
|
|
|
{
|
|
|
|
if (kworkers[wndx].work == work)
|
|
|
|
{
|
|
|
|
nxsem_get_value(&kworkers[wndx].wait, &semcount);
|
|
|
|
if (semcount < 0)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-10-10 14:22:51 +02:00
|
|
|
/****************************************************************************
|
2014-10-10 16:35:58 +02:00
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2024-06-25 15:01:48 +02:00
|
|
|
* Name: work_queue/work_queue_wq
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2024-06-25 15:01:48 +02:00
|
|
|
* Queue work to be performed at a later time. All queued work will be
|
|
|
|
* performed on the worker thread of execution (not the caller's).
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2017-08-28 15:46:48 +02:00
|
|
|
* The work structure is allocated and must be initialized to all zero by
|
|
|
|
* the caller. Otherwise, the work structure is completely managed by the
|
|
|
|
* work queue logic. The caller should never modify the contents of the
|
|
|
|
* work queue structure directly. If work_queue() is called before the
|
2021-07-05 23:19:30 +02:00
|
|
|
* previous work has been performed and removed from the queue, then any
|
2017-08-28 15:46:48 +02:00
|
|
|
* pending work will be canceled and lost.
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Input Parameters:
|
2024-06-25 15:01:48 +02:00
|
|
|
* qid - The work queue ID (must be HPWORK or LPWORK)
|
|
|
|
* wqueue - The work queue handle
|
2014-10-10 16:35:58 +02:00
|
|
|
* work - The work structure to queue
|
2021-07-05 23:19:30 +02:00
|
|
|
* worker - The worker callback to be invoked. The callback will be
|
|
|
|
* invoked on the worker thread of execution.
|
|
|
|
* arg - The argument that will be passed to the worker callback when
|
2024-06-25 15:01:48 +02:00
|
|
|
* it is invoked.
|
2014-10-10 16:35:58 +02:00
|
|
|
* delay - Delay (in clock ticks) from the time queue until the worker
|
|
|
|
* is invoked. Zero means to perform the work immediately.
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
* Returned Value:
|
2014-10-10 16:35:58 +02:00
|
|
|
* Zero on success, a negated errno on failure
|
2014-10-10 14:22:51 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
int work_queue_wq(FAR struct kwork_wqueue_s *wqueue,
|
|
|
|
FAR struct work_s *work, worker_t worker,
|
|
|
|
FAR void *arg, clock_t delay)
|
2014-10-10 16:35:58 +02:00
|
|
|
{
|
2021-06-19 11:29:30 +02:00
|
|
|
irqstate_t flags;
|
2021-12-22 06:26:10 +01:00
|
|
|
int ret = OK;
|
2021-06-19 11:29:30 +02:00
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
if (wqueue == NULL || work == NULL || worker == NULL)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-06-19 11:29:30 +02:00
|
|
|
/* Interrupts are disabled so that this logic can be called from with
|
|
|
|
* task logic or from interrupt handling logic.
|
|
|
|
*/
|
|
|
|
|
|
|
|
flags = enter_critical_section();
|
|
|
|
|
2022-08-17 09:42:00 +02:00
|
|
|
/* Remove the entry from the timer and work queue. */
|
|
|
|
|
2023-01-11 07:12:20 +01:00
|
|
|
if (work->worker != NULL)
|
|
|
|
{
|
2024-06-25 15:01:48 +02:00
|
|
|
work_cancel_wq(wqueue, work);
|
2023-01-11 07:12:20 +01:00
|
|
|
}
|
2022-08-17 09:42:00 +02:00
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
if (work_is_canceling(wqueue->worker, wqueue->nthreads, work))
|
2023-12-07 06:00:41 +01:00
|
|
|
{
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the work structure. */
|
2021-06-19 11:29:30 +02:00
|
|
|
|
2023-12-07 06:00:41 +01:00
|
|
|
work->worker = worker; /* Work callback. non-NULL means queued */
|
2024-06-25 15:01:48 +02:00
|
|
|
work->arg = arg; /* Callback argument */
|
|
|
|
work->wq = wqueue; /* Work queue */
|
2023-12-07 06:00:41 +01:00
|
|
|
|
|
|
|
/* Queue the new work */
|
|
|
|
|
|
|
|
if (!delay)
|
|
|
|
{
|
|
|
|
queue_work(wqueue, work);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-06-25 15:01:48 +02:00
|
|
|
wd_start(&work->u.timer, delay, work_timer_expiry, (wdparm_t)work);
|
2023-12-07 06:00:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
leave_critical_section(flags);
|
2021-12-22 06:26:10 +01:00
|
|
|
return ret;
|
2014-10-10 14:22:51 +02:00
|
|
|
}
|
|
|
|
|
2024-06-25 15:01:48 +02:00
|
|
|
int work_queue(int qid, FAR struct work_s *work, worker_t worker,
|
|
|
|
FAR void *arg, clock_t delay)
|
|
|
|
{
|
|
|
|
return work_queue_wq(work_qid2wq(qid), work, worker, arg, delay);
|
|
|
|
}
|
|
|
|
|
2014-10-10 16:35:58 +02:00
|
|
|
#endif /* CONFIG_SCHED_WORKQUEUE */
|