2014-10-07 15:41:52 +02:00
|
|
|
/****************************************************************************
|
2021-03-08 22:39:04 +01:00
|
|
|
* sched/wqueue/kwork_inherit.c
|
2014-10-07 15:41:52 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-10-07 15:41:52 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-10-07 15:41:52 +02:00
|
|
|
*
|
2020-03-11 21:23:38 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-10-07 15:41:52 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
|
|
|
#include <sched.h>
|
2021-05-14 04:45:57 +02:00
|
|
|
#include <debug.h>
|
2014-10-07 15:41:52 +02:00
|
|
|
|
2016-02-14 15:17:46 +01:00
|
|
|
#include <nuttx/irq.h>
|
2014-10-07 15:41:52 +02:00
|
|
|
#include <nuttx/wqueue.h>
|
|
|
|
|
|
|
|
#include "sched/sched.h"
|
2014-10-10 14:22:51 +02:00
|
|
|
#include "wqueue/wqueue.h"
|
2014-10-07 15:41:52 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_SCHED_WORKQUEUE) && defined(CONFIG_SCHED_LPWORK) && \
|
|
|
|
defined(CONFIG_PRIORITY_INHERITANCE)
|
|
|
|
|
|
|
|
/****************************************************************************
|
2014-10-11 01:47:39 +02:00
|
|
|
* Private Functions
|
2014-10-07 15:41:52 +02:00
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
2014-10-11 01:47:39 +02:00
|
|
|
* Name: lpwork_boostworker
|
2014-10-07 15:41:52 +02:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Called by the work queue client to assure that the priority of the low-
|
|
|
|
* priority worker thread is at least at the requested level, reqprio. This
|
|
|
|
* function would normally be called just before calling work_queue().
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-10-07 15:41:52 +02:00
|
|
|
* reqprio - Requested minimum worker thread priority
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Returned Value:
|
2014-10-07 15:41:52 +02:00
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-11 01:47:39 +02:00
|
|
|
static void lpwork_boostworker(pid_t wpid, uint8_t reqprio)
|
2014-10-07 15:41:52 +02:00
|
|
|
{
|
|
|
|
FAR struct tcb_s *wtcb;
|
|
|
|
|
2014-10-11 01:47:39 +02:00
|
|
|
/* Get the TCB of the low priority worker thread from the process ID. */
|
2014-10-08 01:11:26 +02:00
|
|
|
|
2020-05-09 16:04:45 +02:00
|
|
|
wtcb = nxsched_get_tcb(wpid);
|
2014-10-11 01:47:39 +02:00
|
|
|
DEBUGASSERT(wtcb);
|
2014-10-07 15:41:52 +02:00
|
|
|
|
|
|
|
#if CONFIG_SEM_NNESTPRIO > 0
|
|
|
|
/* If the priority of the client thread that is greater than the base
|
|
|
|
* priority of the worker thread, then we may need to adjust the worker
|
|
|
|
* thread's priority now or later to that priority.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (reqprio > wtcb->base_priority)
|
|
|
|
{
|
|
|
|
/* If the new priority is greater than the current, possibly already
|
|
|
|
* boosted priority of the worker thread, then we will have to raise
|
|
|
|
* the worker thread's priority now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (reqprio > wtcb->sched_priority)
|
|
|
|
{
|
|
|
|
/* If the current priority of worker thread has already been
|
|
|
|
* boosted, then add the boost priority to the list of restoration
|
|
|
|
* priorities. When the higher priority waiter thread gets its
|
|
|
|
* count, then we need to revert the worker thread to this saved
|
|
|
|
* priority (not to its base priority).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (wtcb->sched_priority > wtcb->base_priority)
|
|
|
|
{
|
|
|
|
/* Save the current, boosted priority of the worker thread. */
|
|
|
|
|
|
|
|
if (wtcb->npend_reprio < CONFIG_SEM_NNESTPRIO)
|
|
|
|
{
|
2020-03-11 21:23:38 +01:00
|
|
|
wtcb->pend_reprios[wtcb->npend_reprio] =
|
|
|
|
wtcb->sched_priority;
|
2014-10-07 15:41:52 +02:00
|
|
|
wtcb->npend_reprio++;
|
|
|
|
}
|
2017-03-15 18:42:55 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
serr("ERROR: CONFIG_SEM_NNESTPRIO exceeded\n");
|
|
|
|
DEBUGASSERT(wtcb->npend_reprio < CONFIG_SEM_NNESTPRIO);
|
|
|
|
}
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Raise the priority of the worker. This cannot cause a context
|
|
|
|
* switch because we have preemption disabled. The worker thread
|
|
|
|
* may be marked "pending" and the switch may occur during
|
|
|
|
* sched_unblock() processing.
|
|
|
|
*/
|
|
|
|
|
2020-05-09 20:40:14 +02:00
|
|
|
nxsched_set_priority(wtcb, reqprio);
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* The new priority is above the base priority of the worker,
|
|
|
|
* but not as high as its current working priority. Just put it
|
|
|
|
* in the list of pending restoration priorities so that when the
|
|
|
|
* higher priority thread gets its count, we can revert to this
|
|
|
|
* saved priority and not to the base priority.
|
|
|
|
*/
|
|
|
|
|
2017-03-15 18:42:55 +01:00
|
|
|
if (wtcb->npend_reprio < CONFIG_SEM_NNESTPRIO)
|
|
|
|
{
|
|
|
|
wtcb->pend_reprios[wtcb->npend_reprio] = reqprio;
|
|
|
|
wtcb->npend_reprio++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
serr("ERROR: CONFIG_SEM_NNESTPRIO exceeded\n");
|
|
|
|
DEBUGASSERT(wtcb->npend_reprio < CONFIG_SEM_NNESTPRIO);
|
|
|
|
}
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* If the priority of the client thread that is less than of equal to the
|
|
|
|
* priority of the worker thread, then do nothing because the thread is
|
|
|
|
* already running at a sufficient priority.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (reqprio > wtcb->sched_priority)
|
|
|
|
{
|
2017-06-28 21:33:04 +02:00
|
|
|
/* Raise the priority of the worker thread. This cannot cause
|
2014-10-07 15:41:52 +02:00
|
|
|
* context switch because we have preemption disabled. The task
|
|
|
|
* will be marked "pending" and the switch will occur during
|
|
|
|
* sched_unlock() processing.
|
|
|
|
*/
|
|
|
|
|
2020-05-09 20:40:14 +02:00
|
|
|
nxsched_set_priority(wtcb, reqprio);
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
2014-10-11 01:47:39 +02:00
|
|
|
* Name: lpwork_restoreworker
|
2014-10-07 15:41:52 +02:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function is called to restore the priority after it was previously
|
|
|
|
* boosted. This is often done by client logic on the worker thread when
|
|
|
|
* the scheduled work completes. It will check if we need to drop the
|
|
|
|
* priority of the worker thread.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-10-07 15:41:52 +02:00
|
|
|
* reqprio - Previously requested minimum worker thread priority to be
|
|
|
|
* "unboosted"
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Returned Value:
|
2014-10-07 15:41:52 +02:00
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2014-10-11 01:47:39 +02:00
|
|
|
static void lpwork_restoreworker(pid_t wpid, uint8_t reqprio)
|
2014-10-07 15:41:52 +02:00
|
|
|
{
|
|
|
|
FAR struct tcb_s *wtcb;
|
2014-11-25 21:10:35 +01:00
|
|
|
#if CONFIG_SEM_NNESTPRIO > 0
|
2014-10-07 15:41:52 +02:00
|
|
|
uint8_t wpriority;
|
|
|
|
int index;
|
|
|
|
int selected;
|
2014-11-25 21:10:35 +01:00
|
|
|
#endif
|
2014-10-07 15:41:52 +02:00
|
|
|
|
2014-10-11 01:47:39 +02:00
|
|
|
/* Get the TCB of the low priority worker thread from the process ID. */
|
2014-10-07 15:41:52 +02:00
|
|
|
|
2020-05-09 16:04:45 +02:00
|
|
|
wtcb = nxsched_get_tcb(wpid);
|
2014-10-11 01:47:39 +02:00
|
|
|
DEBUGASSERT(wtcb);
|
2014-10-07 15:41:52 +02:00
|
|
|
|
|
|
|
/* Was the priority of the worker thread boosted? If so, then drop its
|
|
|
|
* priority back to the correct level. What is the correct level?
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (wtcb->sched_priority != wtcb->base_priority)
|
|
|
|
{
|
|
|
|
#if CONFIG_SEM_NNESTPRIO > 0
|
|
|
|
/* Are there other, pending priority levels to revert to? */
|
|
|
|
|
|
|
|
if (wtcb->npend_reprio < 1)
|
|
|
|
{
|
|
|
|
/* No... the worker thread has only been boosted once.
|
|
|
|
* npend_reprio should be 0 and the boosted priority should be the
|
|
|
|
* priority of the client task (reqprio)
|
|
|
|
*
|
|
|
|
* That latter assumption may not be true if the client's priority
|
|
|
|
* was also boosted so that it no longer matches the wtcb's
|
|
|
|
* sched_priority. Or if CONFIG_SEM_NNESTPRIO is too small (so
|
|
|
|
* that we do not have a proper record of the reprioritizations).
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(/* wtcb->sched_priority == reqprio && */
|
|
|
|
wtcb->npend_reprio == 0);
|
|
|
|
|
|
|
|
/* Reset the worker's priority back to the base priority. */
|
|
|
|
|
2020-01-02 17:49:34 +01:00
|
|
|
nxsched_reprioritize(wtcb, wtcb->base_priority);
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* There are multiple pending priority levels. The worker thread's
|
2020-03-11 21:23:38 +01:00
|
|
|
* "boosted" priority could greater than or equal to "reqprio" (it
|
|
|
|
* could be greater if its priority we boosted because it also holds
|
|
|
|
* some semaphore).
|
2014-10-07 15:41:52 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
else if (wtcb->sched_priority <= reqprio)
|
|
|
|
{
|
|
|
|
/* The worker thread has been boosted to the same priority as the
|
|
|
|
* waiter thread that just received the count. We will simply
|
|
|
|
* reprioritize to the next highest pending priority.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Find the highest pending priority and remove it from the list */
|
|
|
|
|
|
|
|
for (index = 1, selected = 0; index < wtcb->npend_reprio; index++)
|
|
|
|
{
|
|
|
|
if (wtcb->pend_reprios[index] > wtcb->pend_reprios[selected])
|
|
|
|
{
|
|
|
|
selected = index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the highest priority pending priority from the list */
|
|
|
|
|
|
|
|
wpriority = wtcb->pend_reprios[selected];
|
|
|
|
index = wtcb->npend_reprio - 1;
|
|
|
|
if (index > 0)
|
|
|
|
{
|
|
|
|
wtcb->pend_reprios[selected] = wtcb->pend_reprios[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
wtcb->npend_reprio = index;
|
|
|
|
|
|
|
|
/* And apply that priority to the thread (while retaining the
|
|
|
|
* base_priority)
|
|
|
|
*/
|
|
|
|
|
2020-05-09 20:40:14 +02:00
|
|
|
nxsched_set_priority(wtcb, wpriority);
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* The worker thread has been boosted to a higher priority than the
|
|
|
|
* waiter task. The pending priority should be in the list (unless
|
2020-03-16 20:42:34 +01:00
|
|
|
* it was lost because of list overflow or because the worker was
|
|
|
|
* reprioritized again unbeknownst to the priority inheritance
|
2014-10-07 15:41:52 +02:00
|
|
|
* logic).
|
|
|
|
*
|
|
|
|
* Search the list for the matching priority.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (index = 0; index < wtcb->npend_reprio; index++)
|
|
|
|
{
|
|
|
|
/* Does this pending priority match the priority of the thread
|
|
|
|
* that just received the count?
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (wtcb->pend_reprios[index] == reqprio)
|
|
|
|
{
|
|
|
|
/* Yes, remove it from the list */
|
|
|
|
|
|
|
|
selected = wtcb->npend_reprio - 1;
|
|
|
|
if (selected > 0)
|
|
|
|
{
|
2020-03-11 21:23:38 +01:00
|
|
|
wtcb->pend_reprios[index] =
|
|
|
|
wtcb->pend_reprios[selected];
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
|
2017-08-15 01:19:27 +02:00
|
|
|
wtcb->npend_reprio = selected;
|
|
|
|
break;
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* There is no alternative restore priorities, drop the priority
|
|
|
|
* of the worker thread all the way back to the threads "base"
|
|
|
|
* priority.
|
|
|
|
*/
|
|
|
|
|
2020-01-02 17:49:34 +01:00
|
|
|
nxsched_reprioritize(wtcb, wtcb->base_priority);
|
2014-10-07 15:41:52 +02:00
|
|
|
#endif
|
|
|
|
}
|
2014-10-11 01:47:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: lpwork_boostpriority
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Called by the work queue client to assure that the priority of the low-
|
|
|
|
* priority worker thread is at least at the requested level, reqprio. This
|
|
|
|
* function would normally be called just before calling work_queue().
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-10-11 01:47:39 +02:00
|
|
|
* reqprio - Requested minimum worker thread priority
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Returned Value:
|
2014-10-11 01:47:39 +02:00
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void lpwork_boostpriority(uint8_t reqprio)
|
|
|
|
{
|
|
|
|
irqstate_t flags;
|
|
|
|
int wndx;
|
|
|
|
|
|
|
|
/* Clip to the configured maximum priority */
|
|
|
|
|
|
|
|
if (reqprio > CONFIG_SCHED_LPWORKPRIOMAX)
|
|
|
|
{
|
|
|
|
reqprio = CONFIG_SCHED_LPWORKPRIOMAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prevent context switches until we get the priorities right */
|
|
|
|
|
2016-02-14 15:17:46 +01:00
|
|
|
flags = enter_critical_section();
|
2014-10-11 01:47:39 +02:00
|
|
|
sched_lock();
|
|
|
|
|
|
|
|
/* Adjust the priority of every worker thread */
|
|
|
|
|
|
|
|
for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++)
|
|
|
|
{
|
|
|
|
lpwork_boostworker(g_lpwork.worker[wndx].pid, reqprio);
|
|
|
|
}
|
|
|
|
|
|
|
|
sched_unlock();
|
2016-02-14 15:17:46 +01:00
|
|
|
leave_critical_section(flags);
|
2014-10-11 01:47:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: lpwork_restorepriority
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function is called to restore the priority after it was previously
|
|
|
|
* boosted. This is often done by client logic on the worker thread when
|
|
|
|
* the scheduled work completes. It will check if we need to drop the
|
|
|
|
* priority of the worker thread.
|
|
|
|
*
|
2018-03-13 16:52:27 +01:00
|
|
|
* Input Parameters:
|
2014-10-11 01:47:39 +02:00
|
|
|
* reqprio - Previously requested minimum worker thread priority to be
|
|
|
|
* "unboosted"
|
|
|
|
*
|
2018-02-01 17:00:02 +01:00
|
|
|
* Returned Value:
|
2014-10-11 01:47:39 +02:00
|
|
|
* None
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
void lpwork_restorepriority(uint8_t reqprio)
|
|
|
|
{
|
|
|
|
irqstate_t flags;
|
|
|
|
int wndx;
|
|
|
|
|
|
|
|
/* Clip to the configured maximum priority */
|
|
|
|
|
|
|
|
if (reqprio > CONFIG_SCHED_LPWORKPRIOMAX)
|
|
|
|
{
|
|
|
|
reqprio = CONFIG_SCHED_LPWORKPRIOMAX;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Prevent context switches until we get the priorities right */
|
|
|
|
|
2016-02-14 15:17:46 +01:00
|
|
|
flags = enter_critical_section();
|
2014-10-11 01:47:39 +02:00
|
|
|
sched_lock();
|
|
|
|
|
|
|
|
/* Adjust the priority of every worker thread */
|
|
|
|
|
|
|
|
for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++)
|
|
|
|
{
|
|
|
|
lpwork_restoreworker(g_lpwork.worker[wndx].pid, reqprio);
|
|
|
|
}
|
2014-10-07 15:41:52 +02:00
|
|
|
|
|
|
|
sched_unlock();
|
2016-02-14 15:17:46 +01:00
|
|
|
leave_critical_section(flags);
|
2014-10-07 15:41:52 +02:00
|
|
|
}
|
|
|
|
|
2017-06-14 21:42:56 +02:00
|
|
|
#endif /* CONFIG_SCHED_WORKQUEUE && CONFIG_SCHED_LPWORK && \
|
|
|
|
* CONFIG_PRIORITY_INHERITANCE */
|