mm/mempool: using inline list instead of queue to avoid kasan report

the crash backtrace:
kasan_report (addr=0xf3d02fd4, size=4, is_write=false) at
kasan/kasan.c:106
0x5658518d in __asan_loadN_noabort (addr=0xf3d02fd4, size=4) at
kasan/kasan.c:300
0x565851ee in __asan_load4_noabort (addr=0xf3d02fd4) at
kasan/kasan.c:334
0x56580b02 in sq_remfirst (queue=0xf3d02b08) at
queue/sq_remfirst.c:45
0x565e0e0b in mempool_alloc (pool=0xf3d02aec) at
mempool/mempool.c:161
0x566033d2 in mempool_multiple_alloc (mpool=0xf3d02a30, size=16) at
mempool/mempool_multiple.c:147

Signed-off-by: dongjiuzhu1 <dongjiuzhu1@xiaomi.com>
This commit is contained in:
dongjiuzhu1 2022-10-30 08:41:08 +08:00 committed by Xiang Xiao
parent 04f15d9dc1
commit adaca6a5ce
2 changed files with 30 additions and 29 deletions

View File

@ -27,7 +27,7 @@
#include <sys/types.h>
#include <nuttx/queue.h>
#include <nuttx/list.h>
#include <nuttx/fs/procfs.h>
#include <nuttx/spinlock.h>
#include <nuttx/semaphore.h>
@ -64,9 +64,9 @@ struct mempool_s
/* Private data for memory pool */
sq_queue_t list; /* The free block list in normal mempool */
sq_queue_t ilist; /* The free block list in interrupt mempool */
sq_queue_t elist; /* The expand block list for normal mempool */
struct list_node list; /* The free block list in normal mempool */
struct list_node ilist; /* The free block list in interrupt mempool */
struct list_node elist; /* The expand block list for normal mempool */
size_t nused; /* The number of used block in mempool */
spinlock_t lock; /* The protect lock to mempool */
sem_t waitsem; /* The semaphore of waiter get free block */

View File

@ -33,13 +33,14 @@
* Private Functions
****************************************************************************/
static inline void mempool_add_list(FAR sq_queue_t *list, FAR void *base,
size_t nblks, size_t bsize)
static inline void mempool_add_list(FAR struct list_node *list,
FAR void *base, size_t nblks,
size_t bsize)
{
while (nblks-- > 0)
{
sq_addfirst(((FAR sq_entry_t *)((FAR char *)base + bsize * nblks)),
list);
list_add_head(list, ((FAR struct list_node *)((FAR char *)base +
bsize * nblks)));
}
}
@ -91,15 +92,15 @@ static inline void mempool_mfree(FAR struct mempool_s *pool, FAR void *addr)
int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
{
FAR sq_entry_t *base;
FAR struct list_node *base;
size_t count;
DEBUGASSERT(pool != NULL && pool->bsize != 0);
pool->nused = 0;
sq_init(&pool->list);
sq_init(&pool->ilist);
sq_init(&pool->elist);
list_initialize(&pool->list);
list_initialize(&pool->ilist);
list_initialize(&pool->elist);
count = pool->ninitial + pool->ninterrupt;
if (count != 0)
@ -111,7 +112,7 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
return -ENOMEM;
}
sq_addfirst(base, &pool->elist);
list_add_head(&pool->elist, base);
mempool_add_list(&pool->ilist, base + 1,
pool->ninterrupt, pool->bsize);
mempool_add_list(&pool->list, (FAR char *)(base + 1) +
@ -151,19 +152,19 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
FAR void *mempool_alloc(FAR struct mempool_s *pool)
{
FAR sq_entry_t *blk;
FAR struct list_node *blk;
irqstate_t flags;
DEBUGASSERT(pool != NULL);
retry:
flags = spin_lock_irqsave(&pool->lock);
blk = sq_remfirst(&pool->list);
blk = list_remove_head(&pool->list);
if (blk == NULL)
{
if (up_interrupt_context())
{
blk = sq_remfirst(&pool->ilist);
blk = list_remove_head(&pool->ilist);
if (blk == NULL)
{
goto out_with_lock;
@ -183,10 +184,10 @@ retry:
kasan_poison(blk + 1, pool->bsize * pool->nexpand);
flags = spin_lock_irqsave(&pool->lock);
sq_addlast(blk, &pool->elist);
list_add_head(&pool->elist, blk);
mempool_add_list(&pool->list, blk + 1, pool->nexpand,
pool->bsize);
blk = sq_remfirst(&pool->list);
blk = list_remove_head(&pool->list);
}
else if (!pool->wait ||
nxsem_wait_uninterruptible(&pool->waitsem) < 0)
@ -229,20 +230,20 @@ void mempool_free(FAR struct mempool_s *pool, FAR void *blk)
{
FAR char *base;
base = (FAR char *)(sq_peek(&pool->elist) + 1);
base = (FAR char *)(list_peek_head(&pool->elist) + 1);
if ((FAR char *)blk >= base &&
(FAR char *)blk < base + pool->ninterrupt * pool->bsize)
{
sq_addfirst(blk, &pool->ilist);
list_add_head(&pool->ilist, blk);
}
else
{
sq_addfirst(blk, &pool->list);
list_add_head(&pool->list, blk);
}
}
else
{
sq_addfirst(blk, &pool->list);
list_add_head(&pool->list, blk);
}
pool->nused--;
@ -281,8 +282,8 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info)
DEBUGASSERT(pool != NULL && info != NULL);
flags = spin_lock_irqsave(&pool->lock);
info->ordblks = sq_count(&pool->list);
info->iordblks = sq_count(&pool->ilist);
info->ordblks = list_length(&pool->list);
info->iordblks = list_length(&pool->ilist);
info->aordblks = pool->nused;
info->arena = (pool->nused + info->ordblks + info->iordblks) * pool->bsize;
spin_unlock_irqrestore(&pool->lock, flags);
@ -314,7 +315,7 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info)
int mempool_deinit(FAR struct mempool_s *pool)
{
FAR sq_entry_t *blk;
FAR struct list_node *blk;
DEBUGASSERT(pool != NULL);
@ -327,7 +328,7 @@ int mempool_deinit(FAR struct mempool_s *pool)
mempool_procfs_unregister(&pool->procfs);
#endif
while ((blk = sq_remfirst(&pool->elist)) != NULL)
while ((blk = list_remove_head(&pool->elist)) != NULL)
{
kasan_unpoison(blk, mm_malloc_size(blk));
mempool_mfree(pool, blk);