mm/iob: Replace the critical section with spin lock

Base on discusion: https://github.com/apache/nuttx/issues/10981

Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
This commit is contained in:
Xiang Xiao 2023-10-21 17:20:29 +08:00 committed by Petro Karashchenko
parent d5d4006c6b
commit 9e4a1be8d4
9 changed files with 34 additions and 21 deletions

View File

@ -30,6 +30,7 @@
#include <debug.h>
#include <nuttx/mm/iob.h>
#include <nuttx/spinlock.h>
#include <nuttx/semaphore.h>
#ifdef CONFIG_MM_IOB
@ -80,6 +81,8 @@ extern sem_t g_throttle_sem; /* Counts available I/O buffers when throttled */
extern sem_t g_qentry_sem; /* Counts free I/O buffer queue containers */
#endif
extern spinlock_t g_iob_lock;
/****************************************************************************
* Public Function Prototypes
****************************************************************************/

View File

@ -61,7 +61,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
qentry->qe_flink = NULL;
irqstate_t flags = enter_critical_section();
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
if (!iobq->qh_head)
{
iobq->qh_head = qentry;
@ -74,7 +74,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
iobq->qh_tail = qentry;
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return 0;
}

View File

@ -73,7 +73,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
* to protect the committed list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Take the I/O buffer from the head of the committed list */
@ -92,7 +92,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
iob->io_pktlen = 0; /* Total length of the packet */
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iob;
}
@ -272,7 +272,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
* to protect the free list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
#if CONFIG_IOB_THROTTLE > 0
/* If there are free I/O buffers for this allocation */
@ -314,7 +314,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
g_throttle_sem.semcount--;
#endif
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Put the I/O buffer in a known state */
@ -326,6 +326,6 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
}
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return NULL;
}

View File

@ -57,7 +57,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
* to protect the committed list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Take the I/O buffer from the head of the committed list */
@ -73,7 +73,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
iobq->qe_head = NULL; /* Nothing is contained */
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iobq;
}
@ -199,7 +199,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
* to protect the free list: We disable interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
iobq = g_iob_freeqlist;
if (iobq)
{
@ -225,7 +225,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
iobq->qe_head = NULL; /* Nothing is contained */
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
return iobq;
}

View File

@ -118,7 +118,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
* interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Which list? If there is a task waiting for an IOB, then put
* the IOB on either the free list or on the committed list where
@ -137,6 +137,8 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
g_iob_freelist = iob;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Signal that an IOB is available. If there is a thread blocked,
* waiting for an IOB, this will wake up exactly one thread. The
* semaphore count will correctly indicated that the awakened task
@ -168,8 +170,6 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
}
#endif
leave_critical_section(flags);
/* And return the I/O buffer after the one that was freed */
return next;

View File

@ -58,7 +58,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
* interrupts very briefly.
*/
flags = enter_critical_section();
flags = spin_lock_irqsave(&g_iob_lock);
/* Which list? If there is a task waiting for an IOB chain, then put
* the IOB chain on either the free list or on the committed list where
@ -77,6 +77,8 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
g_iob_freeqlist = iobq;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Signal that an I/O buffer chain container is available. If there
* is a thread waiting for an I/O buffer chain container, this will
* wake up exactly one thread. The semaphore count will correctly
@ -85,7 +87,6 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
*/
nxsem_post(&g_qentry_sem);
leave_critical_section(flags);
/* And return the I/O buffer chain container after the one that was freed */

View File

@ -51,7 +51,7 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
FAR struct iob_qentry_s *prev = NULL;
FAR struct iob_qentry_s *qentry;
irqstate_t flags = enter_critical_section();
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
for (qentry = iobq->qh_head; qentry != NULL;
prev = qentry, qentry = qentry->qe_flink)
{
@ -73,6 +73,8 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
iobq->qh_tail = prev;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Remove the queue container */
iob_free_qentry(qentry);
@ -81,11 +83,11 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
iob_free_chain(iob);
break;
return;
}
}
leave_critical_section(flags);
spin_unlock_irqrestore(&g_iob_lock, flags);
}
#endif /* CONFIG_IOB_NCHAINS > 0 */

View File

@ -102,6 +102,8 @@ sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS);
#endif
spinlock_t g_iob_lock = SP_UNLOCKED;
/****************************************************************************
* Public Functions
****************************************************************************/

View File

@ -56,7 +56,7 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
/* Remove the I/O buffer chain from the head of the queue */
irqstate_t flags = enter_critical_section();
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
qentry = iobq->qh_head;
if (qentry)
{
@ -66,6 +66,8 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
iobq->qh_tail = NULL;
}
spin_unlock_irqrestore(&g_iob_lock, flags);
/* Extract the I/O buffer chain from the container and free the
* container.
*/
@ -73,8 +75,11 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
iob = qentry->qe_head;
iob_free_qentry(qentry);
}
else
{
spin_unlock_irqrestore(&g_iob_lock, flags);
}
leave_critical_section(flags);
return iob;
}