2014-06-03 20:41:34 +02:00
|
|
|
/****************************************************************************
|
2017-05-09 15:34:59 +02:00
|
|
|
* mm/iob/iob_free.c
|
2014-06-03 20:41:34 +02:00
|
|
|
*
|
2024-09-12 16:44:55 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
2021-02-08 13:50:10 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2014-06-03 20:41:34 +02:00
|
|
|
*
|
2021-02-08 13:50:10 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2014-06-03 20:41:34 +02:00
|
|
|
*
|
2021-02-08 13:50:10 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2014-06-03 20:41:34 +02:00
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
#include <nuttx/config.h>
|
|
|
|
|
2018-09-12 14:38:11 +02:00
|
|
|
#include <stdbool.h>
|
2014-06-05 19:19:00 +02:00
|
|
|
#include <assert.h>
|
2014-06-23 02:53:18 +02:00
|
|
|
#include <debug.h>
|
2014-06-03 20:41:34 +02:00
|
|
|
|
2016-02-14 15:38:44 +01:00
|
|
|
#include <nuttx/irq.h>
|
2014-06-06 17:35:31 +02:00
|
|
|
#include <nuttx/arch.h>
|
2024-04-01 08:09:10 +02:00
|
|
|
#ifdef CONFIG_IOB_ALLOC
|
|
|
|
# include <nuttx/kmalloc.h>
|
|
|
|
#endif
|
2017-05-09 15:34:59 +02:00
|
|
|
#include <nuttx/mm/iob.h>
|
2014-06-03 20:41:34 +02:00
|
|
|
|
|
|
|
#include "iob.h"
|
|
|
|
|
2018-09-13 14:53:58 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2019-05-25 19:45:22 +02:00
|
|
|
#ifdef CONFIG_IOB_NOTIFIER
|
|
|
|
# if !defined(CONFIG_IOB_NOTIFIER_DIV) || CONFIG_IOB_NOTIFIER_DIV < 2
|
|
|
|
# define IOB_DIVIDER 1
|
|
|
|
# elif CONFIG_IOB_NOTIFIER_DIV < 4
|
|
|
|
# define IOB_DIVIDER 2
|
|
|
|
# elif CONFIG_IOB_NOTIFIER_DIV < 8
|
|
|
|
# define IOB_DIVIDER 4
|
|
|
|
# elif CONFIG_IOB_NOTIFIER_DIV < 16
|
|
|
|
# define IOB_DIVIDER 8
|
|
|
|
# elif CONFIG_IOB_NOTIFIER_DIV < 32
|
|
|
|
# define IOB_DIVIDER 16
|
|
|
|
# elif CONFIG_IOB_NOTIFIER_DIV < 64
|
|
|
|
# define IOB_DIVIDER 32
|
|
|
|
# else
|
|
|
|
# define IOB_DIVIDER 64
|
|
|
|
# endif
|
2018-09-13 14:53:58 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define IOB_MASK (IOB_DIVIDER - 1)
|
|
|
|
|
2014-06-03 20:41:34 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: iob_free
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Free the I/O buffer at the head of a buffer chain returning it to the
|
|
|
|
* free list. The link to the next I/O buffer in the chain is return.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-08-08 04:21:03 +02:00
|
|
|
FAR struct iob_s *iob_free(FAR struct iob_s *iob)
|
2014-06-03 20:41:34 +02:00
|
|
|
{
|
2014-06-06 17:35:31 +02:00
|
|
|
FAR struct iob_s *next = iob->io_flink;
|
|
|
|
irqstate_t flags;
|
2018-09-13 14:53:58 +02:00
|
|
|
#ifdef CONFIG_IOB_NOTIFIER
|
|
|
|
int16_t navail;
|
|
|
|
#endif
|
2024-06-12 22:18:17 +02:00
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
bool committed_thottled = false;
|
|
|
|
#endif
|
2014-06-05 19:19:00 +02:00
|
|
|
|
2017-04-21 00:08:49 +02:00
|
|
|
iobinfo("iob=%p io_pktlen=%u io_len=%u next=%p\n",
|
|
|
|
iob, iob->io_pktlen, iob->io_len, next);
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2014-06-05 19:19:00 +02:00
|
|
|
/* Copy the data that only exists in the head of a I/O buffer chain into
|
|
|
|
* the next entry.
|
|
|
|
*/
|
|
|
|
|
2017-05-16 19:03:35 +02:00
|
|
|
if (next != NULL)
|
2014-06-05 19:19:00 +02:00
|
|
|
{
|
|
|
|
/* Copy and decrement the total packet length, being careful to
|
|
|
|
* do nothing too crazy.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (iob->io_pktlen > iob->io_len)
|
|
|
|
{
|
2014-06-05 19:45:55 +02:00
|
|
|
/* Adjust packet length and move it to the next entry */
|
|
|
|
|
2014-06-05 19:19:00 +02:00
|
|
|
next->io_pktlen = iob->io_pktlen - iob->io_len;
|
|
|
|
DEBUGASSERT(next->io_pktlen >= next->io_len);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-01-26 16:56:27 +01:00
|
|
|
/* This can only happen if the free entry isn't first entry in the
|
|
|
|
* chain...
|
2014-06-05 19:45:55 +02:00
|
|
|
*/
|
|
|
|
|
2014-06-05 19:19:00 +02:00
|
|
|
next->io_pktlen = 0;
|
|
|
|
}
|
2014-06-23 02:53:18 +02:00
|
|
|
|
2017-04-21 00:08:49 +02:00
|
|
|
iobinfo("next=%p io_pktlen=%u io_len=%u\n",
|
|
|
|
next, next->io_pktlen, next->io_len);
|
2014-06-05 19:19:00 +02:00
|
|
|
}
|
|
|
|
|
2024-04-01 08:09:10 +02:00
|
|
|
#ifdef CONFIG_IOB_ALLOC
|
|
|
|
if (iob->io_free != NULL)
|
|
|
|
{
|
|
|
|
iob->io_free(iob->io_data);
|
|
|
|
kmm_free(iob);
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-05-16 19:03:35 +02:00
|
|
|
/* Free the I/O buffer by adding it to the head of the free or the
|
|
|
|
* committed list. We don't know what context we are called from so
|
|
|
|
* we use extreme measures to protect the free list: We disable
|
|
|
|
* interrupts very briefly.
|
2014-06-06 17:35:31 +02:00
|
|
|
*/
|
2014-06-03 20:41:34 +02:00
|
|
|
|
2023-10-21 11:20:29 +02:00
|
|
|
flags = spin_lock_irqsave(&g_iob_lock);
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2017-05-16 19:03:35 +02:00
|
|
|
/* Which list? If there is a task waiting for an IOB, then put
|
|
|
|
* the IOB on either the free list or on the committed list where
|
|
|
|
* it is reserved for that allocation (and not available to
|
2024-06-12 22:18:17 +02:00
|
|
|
* iob_tryalloc()). This is true for both throttled and non-throttled
|
|
|
|
* cases.
|
2017-05-16 19:03:35 +02:00
|
|
|
*/
|
|
|
|
|
2024-06-12 22:18:17 +02:00
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
if ((g_iob_sem.semcount < 0) ||
|
|
|
|
((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
|
|
|
|
(g_throttle_sem.semcount < 0)))
|
|
|
|
#else
|
2017-05-16 19:03:35 +02:00
|
|
|
if (g_iob_sem.semcount < 0)
|
2024-06-12 22:18:17 +02:00
|
|
|
#endif
|
2017-05-16 19:03:35 +02:00
|
|
|
{
|
|
|
|
iob->io_flink = g_iob_committed;
|
|
|
|
g_iob_committed = iob;
|
2024-06-12 22:18:17 +02:00
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
|
|
|
if ((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
|
|
|
|
(g_throttle_sem.semcount < 0))
|
|
|
|
{
|
|
|
|
committed_thottled = true;
|
|
|
|
}
|
|
|
|
#endif
|
2017-05-16 19:03:35 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
iob->io_flink = g_iob_freelist;
|
|
|
|
g_iob_freelist = iob;
|
|
|
|
}
|
|
|
|
|
2023-10-21 11:20:29 +02:00
|
|
|
spin_unlock_irqrestore(&g_iob_lock, flags);
|
|
|
|
|
2024-06-12 22:18:17 +02:00
|
|
|
/* Signal that an IOB is available. This is done with schedule locked
|
|
|
|
* to make sure that both g_iob_sem and g_throttle_sem are incremented
|
|
|
|
* together (if applicable). After the schedule is unlocked, if there
|
|
|
|
* is a thread blocked, waiting for an IOB, this will wake up exactly
|
|
|
|
* one thread. The semaphore count will correctly indicate that the
|
|
|
|
* awakened task owns an IOB and should find it in the committed list.
|
2017-05-16 19:03:35 +02:00
|
|
|
*/
|
2014-06-22 19:27:57 +02:00
|
|
|
|
2024-06-12 22:18:17 +02:00
|
|
|
sched_lock();
|
|
|
|
|
2017-10-03 23:35:24 +02:00
|
|
|
nxsem_post(&g_iob_sem);
|
2018-09-10 19:32:09 +02:00
|
|
|
DEBUGASSERT(g_iob_sem.semcount <= CONFIG_IOB_NBUFFERS);
|
|
|
|
|
2014-06-24 23:38:00 +02:00
|
|
|
#if CONFIG_IOB_THROTTLE > 0
|
2024-06-12 22:18:17 +02:00
|
|
|
flags = spin_lock_irqsave(&g_iob_lock);
|
|
|
|
|
|
|
|
if (g_iob_sem.semcount > CONFIG_IOB_THROTTLE)
|
|
|
|
{
|
|
|
|
/* If posting to the the throttled semaphore is going to awake a
|
|
|
|
* waiting task, then the g_iob_sem count should be decremented
|
|
|
|
* because an I/O buffer (from the head of the g_iob_committed list)
|
|
|
|
* will be allocated to this waiting task.
|
|
|
|
* Decrementing the g_throttled_sem (when posting to the g_iob_sem)
|
|
|
|
* is not necessary because this condition can only occur when the
|
|
|
|
* g_throttled_sem is less or equal to zero. On the other hand, if
|
|
|
|
* the g_iob_sem is greater than the CONFIG_IOB_THROTTLE and there
|
|
|
|
* is a waiting thread, then the I/O buffer just freed will be
|
|
|
|
* committed to a waiting task and is not available for general use.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (committed_thottled)
|
|
|
|
{
|
|
|
|
g_iob_sem.semcount--;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&g_iob_lock, flags);
|
|
|
|
|
|
|
|
nxsem_post(&g_throttle_sem);
|
|
|
|
DEBUGASSERT(g_throttle_sem.semcount <=
|
2020-02-13 14:58:07 +01:00
|
|
|
(CONFIG_IOB_NBUFFERS - CONFIG_IOB_THROTTLE));
|
2024-06-12 22:18:17 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
spin_unlock_irqrestore(&g_iob_lock, flags);
|
|
|
|
}
|
2018-09-10 19:32:09 +02:00
|
|
|
#endif
|
2018-09-08 19:21:18 +02:00
|
|
|
|
2024-06-12 22:18:17 +02:00
|
|
|
sched_unlock();
|
|
|
|
|
2018-09-08 19:21:18 +02:00
|
|
|
#ifdef CONFIG_IOB_NOTIFIER
|
|
|
|
/* Check if the IOB was claimed by a thread that is blocked waiting
|
|
|
|
* for an IOB.
|
|
|
|
*/
|
|
|
|
|
2018-09-13 14:53:58 +02:00
|
|
|
navail = iob_navail(false);
|
|
|
|
if (navail > 0 && (navail & IOB_MASK) == 0)
|
2018-09-08 19:21:18 +02:00
|
|
|
{
|
|
|
|
/* Signal any threads that have requested a signal notification
|
|
|
|
* when an IOB becomes available.
|
|
|
|
*/
|
|
|
|
|
2018-09-09 16:32:37 +02:00
|
|
|
iob_notifier_signal();
|
2018-09-08 19:21:18 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-05 19:19:00 +02:00
|
|
|
/* And return the I/O buffer after the one that was freed */
|
|
|
|
|
|
|
|
return next;
|
2014-06-03 20:41:34 +02:00
|
|
|
}
|