mm/iob: Simplify IOB alloc/free logic

- `g_iob_sem.semcount` must be equal to the total number of free IOBs.
It can also be negative if there are no free IOBs and there are threads
waiting for an IOB.
- g_throttle_sem.semcount represents the number of IOBs available for
throttled IOB allocations. Like any other semaphore, it should only go
negative if there is a thread waiting for it.
- Both semaphores are related to the same resource (free IOBs), hence,
they must be incremented/decremented simultaneously:
  - Whenever a IOB buffer is freed, if a thread is waiting for a
non-throttled IOB or a thread is waiting for a throttled IOB and we
have at least `CONFIG_IOB_THROTTLE` buffers available, the IOB is put
in the committed list (`g_iob_committed`). Otherwise, it is put in the
common free list (`g_iob_freelist`).
  - `g_iob_sem` is always incremented when an IOB buffer is freed, but
`g_throttle_sem` is incremented only if we have at least CONFIG_IOB_THROTTLE
buffers free.
  - Both semaphores are posted with the schedule locked to avoid any
mismatches in the semaphores count.
  - If a task is waiting for an IOB semaphore (`iob_allocwait`) is
awakened and would check the `g_iob_committed`. The highest priority
task waiting for a semaphore will be awakened first.
This commit is contained in:
Tiago Medicci Serrano 2024-06-12 17:18:17 -03:00 committed by Xiang Xiao
parent 69d8a17dda
commit 50aeea2dc0
2 changed files with 68 additions and 45 deletions

View File

@ -163,36 +163,6 @@ static FAR struct iob_s *iob_allocwait(bool throttled, unsigned int timeout)
*/
iob = iob_alloc_committed();
if (iob == NULL)
{
/* We need release our count so that it is available to
* iob_tryalloc(), perhaps allowing another thread to take our
* count. In that event, iob_tryalloc() will fail above and
* we will have to wait again.
*/
sem->semcount++;
iob = iob_tryalloc(throttled);
}
/* REVISIT: I think this logic should be moved inside of
* iob_alloc_committed, so that it can exist inside of the critical
* section along with all other sem count changes.
*/
#if CONFIG_IOB_THROTTLE > 0
else
{
if (throttled)
{
g_iob_sem.semcount--;
}
else
{
g_throttle_sem.semcount--;
}
}
#endif
}
}
@ -297,8 +267,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
#if CONFIG_IOB_THROTTLE > 0
/* If there are free I/O buffers for this allocation */
if (sem->semcount > 0 ||
(throttled && g_iob_sem.semcount - CONFIG_IOB_THROTTLE > 0))
if (sem->semcount > 0)
#endif
{
/* Take the I/O buffer from the head of the free list */
@ -324,14 +293,17 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
DEBUGASSERT(g_iob_sem.semcount >= 0);
#if CONFIG_IOB_THROTTLE > 0
/* The throttle semaphore is a little more complicated because
* it can be negative! Decrementing is still safe, however.
*
* Note: usually g_throttle_sem.semcount >= -CONFIG_IOB_THROTTLE.
* But it can be smaller than that if there are blocking threads.
/* The throttle semaphore is used to throttle the number of
* free buffers that are available. It is used to prevent
* the overrunning of the free buffer list. Please note that
* it can only be decremented to zero, which indicates no
* throttled buffers are available.
*/
g_throttle_sem.semcount--;
if (g_throttle_sem.semcount > 0)
{
g_throttle_sem.semcount--;
}
#endif
spin_unlock_irqrestore(&g_iob_lock, flags);

View File

@ -81,6 +81,9 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
#ifdef CONFIG_IOB_NOTIFIER
int16_t navail;
#endif
#if CONFIG_IOB_THROTTLE > 0
bool committed_thottled = false;
#endif
iobinfo("iob=%p io_pktlen=%u io_len=%u next=%p\n",
iob, iob->io_pktlen, iob->io_len, next);
@ -135,13 +138,27 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
/* Which list? If there is a task waiting for an IOB, then put
* the IOB on either the free list or on the committed list where
* it is reserved for that allocation (and not available to
* iob_tryalloc()).
* iob_tryalloc()). This is true for both throttled and non-throttled
* cases.
*/
#if CONFIG_IOB_THROTTLE > 0
if ((g_iob_sem.semcount < 0) ||
((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
(g_throttle_sem.semcount < 0)))
#else
if (g_iob_sem.semcount < 0)
#endif
{
iob->io_flink = g_iob_committed;
g_iob_committed = iob;
#if CONFIG_IOB_THROTTLE > 0
if ((g_iob_sem.semcount >= CONFIG_IOB_THROTTLE) &&
(g_throttle_sem.semcount < 0))
{
committed_thottled = true;
}
#endif
}
else
{
@ -151,21 +168,55 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Signal that an IOB is available. If there is a thread blocked,
* waiting for an IOB, this will wake up exactly one thread. The
* semaphore count will correctly indicated that the awakened task
* owns an IOB and should find it in the committed list.
/* Signal that an IOB is available. This is done with schedule locked
* to make sure that both g_iob_sem and g_throttle_sem are incremented
* together (if applicable). After the schedule is unlocked, if there
* is a thread blocked, waiting for an IOB, this will wake up exactly
* one thread. The semaphore count will correctly indicate that the
* awakened task owns an IOB and should find it in the committed list.
*/
sched_lock();
nxsem_post(&g_iob_sem);
DEBUGASSERT(g_iob_sem.semcount <= CONFIG_IOB_NBUFFERS);
#if CONFIG_IOB_THROTTLE > 0
nxsem_post(&g_throttle_sem);
DEBUGASSERT(g_throttle_sem.semcount <=
flags = spin_lock_irqsave(&g_iob_lock);
if (g_iob_sem.semcount > CONFIG_IOB_THROTTLE)
{
/* If posting to the the throttled semaphore is going to awake a
* waiting task, then the g_iob_sem count should be decremented
* because an I/O buffer (from the head of the g_iob_committed list)
* will be allocated to this waiting task.
* Decrementing the g_throttled_sem (when posting to the g_iob_sem)
* is not necessary because this condition can only occur when the
* g_throttled_sem is less or equal to zero. On the other hand, if
* the g_iob_sem is greater than the CONFIG_IOB_THROTTLE and there
* is a waiting thread, then the I/O buffer just freed will be
* committed to a waiting task and is not available for general use.
*/
if (committed_thottled)
{
g_iob_sem.semcount--;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
nxsem_post(&g_throttle_sem);
DEBUGASSERT(g_throttle_sem.semcount <=
(CONFIG_IOB_NBUFFERS - CONFIG_IOB_THROTTLE));
}
else
{
spin_unlock_irqrestore(&g_iob_lock, flags);
}
#endif
sched_unlock();
#ifdef CONFIG_IOB_NOTIFIER
/* Check if the IOB was claimed by a thread that is blocked waiting
* for an IOB.