2022-07-20 15:37:26 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* mm/mempool/mempool.c
|
|
|
|
*
|
2024-09-12 16:44:55 +02:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*
|
2022-07-20 15:37:26 +02:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Included Files
|
|
|
|
****************************************************************************/
|
|
|
|
|
2023-03-10 01:20:48 +01:00
|
|
|
#include <assert.h>
|
2024-06-18 12:03:15 +02:00
|
|
|
#include <execinfo.h>
|
2022-07-20 15:37:26 +02:00
|
|
|
#include <stdbool.h>
|
2022-11-06 16:27:44 +01:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <syslog.h>
|
2022-07-20 15:37:26 +02:00
|
|
|
|
|
|
|
#include <nuttx/kmalloc.h>
|
2024-03-10 16:20:46 +01:00
|
|
|
#include <nuttx/mm/kasan.h>
|
2022-07-20 15:37:26 +02:00
|
|
|
#include <nuttx/mm/mempool.h>
|
2023-05-09 07:46:05 +02:00
|
|
|
#include <nuttx/sched.h>
|
2022-07-20 15:37:26 +02:00
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Pre-processor Definitions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-11-23 15:08:13 +01:00
|
|
|
#undef ALIGN_UP
|
|
|
|
#define ALIGN_UP(x, a) (((x) + ((a) - 1)) & (~((a) - 1)))
|
2022-11-06 16:27:44 +01:00
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
|
|
|
#define MEMPOOL_MAGIC_FREE 0xAAAAAAAA
|
|
|
|
#define MEMPOOL_MAGIC_ALLOC 0x55555555
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Private Types
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
typedef void (*mempool_callback_t)(FAR struct mempool_s *pool,
|
|
|
|
FAR struct mempool_backtrace_s *buf,
|
|
|
|
FAR const void *input, FAR void *output);
|
|
|
|
#endif
|
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2023-10-22 16:21:18 +02:00
|
|
|
static inline FAR sq_entry_t *
|
|
|
|
mempool_remove_queue(FAR struct mempool_s *pool, FAR sq_queue_t *queue)
|
2022-11-25 17:22:12 +01:00
|
|
|
{
|
2023-10-22 16:21:18 +02:00
|
|
|
FAR sq_entry_t *ret = queue->head;
|
2022-11-25 17:22:12 +01:00
|
|
|
|
2023-10-22 16:21:18 +02:00
|
|
|
if (ret)
|
2022-11-25 17:22:12 +01:00
|
|
|
{
|
2023-10-22 16:21:18 +02:00
|
|
|
queue->head = ret->flink;
|
|
|
|
if (!queue->head)
|
|
|
|
{
|
|
|
|
queue->tail = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
pool->check(pool, queue->head);
|
|
|
|
}
|
2022-11-25 17:22:12 +01:00
|
|
|
|
2023-10-22 16:21:18 +02:00
|
|
|
ret->flink = NULL;
|
|
|
|
}
|
2022-11-25 17:22:12 +01:00
|
|
|
|
2023-10-22 16:21:18 +02:00
|
|
|
return ret;
|
2022-11-25 17:22:12 +01:00
|
|
|
}
|
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
static inline void mempool_add_queue(FAR struct mempool_s *pool,
|
|
|
|
FAR sq_queue_t *queue,
|
2022-11-25 17:22:12 +01:00
|
|
|
FAR char *base, size_t nblks,
|
|
|
|
size_t blocksize)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
|
|
|
while (nblks-- > 0)
|
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
|
|
|
FAR struct mempool_backtrace_s *buf =
|
|
|
|
(FAR struct mempool_backtrace_s *)
|
|
|
|
(base + nblks * blocksize + pool->blocksize);
|
|
|
|
|
|
|
|
buf->magic = MEMPOOL_MAGIC_FREE;
|
|
|
|
#endif
|
2023-10-22 16:21:18 +02:00
|
|
|
sq_addlast((FAR sq_entry_t *)(base + blocksize * nblks), queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-06 16:27:44 +01:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
|
|
|
static inline void mempool_add_backtrace(FAR struct mempool_s *pool,
|
|
|
|
FAR struct mempool_backtrace_s *buf)
|
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
DEBUGASSERT(buf->magic == MEMPOOL_MAGIC_FREE);
|
|
|
|
buf->magic = MEMPOOL_MAGIC_ALLOC;
|
2023-02-01 11:05:58 +01:00
|
|
|
buf->pid = _SCHED_GETTID();
|
2023-05-05 10:52:30 +02:00
|
|
|
buf->seqno = g_mm_seqno++;
|
2022-11-06 16:27:44 +01:00
|
|
|
# if CONFIG_MM_BACKTRACE > 0
|
|
|
|
if (pool->procfs.backtrace)
|
|
|
|
{
|
2023-05-10 16:13:20 +02:00
|
|
|
int result = sched_backtrace(buf->pid, buf->backtrace,
|
|
|
|
CONFIG_MM_BACKTRACE,
|
|
|
|
CONFIG_MM_HEAP_MEMPOOL_BACKTRACE_SKIP);
|
2022-11-24 12:36:21 +01:00
|
|
|
if (result < CONFIG_MM_BACKTRACE)
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2022-11-24 12:36:21 +01:00
|
|
|
buf->backtrace[result] = NULL;
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
buf->backtrace[0] = NULL;
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
}
|
2024-06-05 16:21:29 +02:00
|
|
|
|
|
|
|
static void mempool_foreach(FAR struct mempool_s *pool,
|
|
|
|
mempool_callback_t callback,
|
|
|
|
FAR const void *input, FAR void *output)
|
|
|
|
{
|
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
|
|
|
FAR struct mempool_backtrace_s *buf;
|
|
|
|
FAR sq_entry_t *entry;
|
|
|
|
FAR char *base ;
|
|
|
|
size_t nblks;
|
|
|
|
|
|
|
|
if (pool->ibase != NULL)
|
|
|
|
{
|
|
|
|
nblks = pool->interruptsize / blocksize;
|
|
|
|
while (nblks--)
|
|
|
|
{
|
|
|
|
buf = (FAR struct mempool_backtrace_s *)
|
|
|
|
pool->ibase + nblks * blocksize + pool->blocksize;
|
|
|
|
|
|
|
|
callback(pool, buf, input, output);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sq_for_every(&pool->equeue, entry)
|
|
|
|
{
|
|
|
|
nblks = (pool->expandsize - sizeof(sq_entry_t)) / blocksize;
|
|
|
|
base = (FAR char *)entry - (nblks * blocksize);
|
|
|
|
|
|
|
|
while (nblks--)
|
|
|
|
{
|
|
|
|
buf = (FAR struct mempool_backtrace_s *)
|
|
|
|
(base + nblks * blocksize + pool->blocksize);
|
|
|
|
callback(pool, buf, input, output);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mempool_info_task_callback(FAR struct mempool_s *pool,
|
|
|
|
FAR struct mempool_backtrace_s *buf,
|
|
|
|
FAR const void *input,
|
|
|
|
FAR void *output)
|
|
|
|
{
|
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
|
|
|
FAR const struct malltask *task = input;
|
|
|
|
FAR struct mallinfo_task *info = output;
|
|
|
|
|
|
|
|
if (buf->magic == MEMPOOL_MAGIC_FREE)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((MM_DUMP_ASSIGN(task->pid, buf->pid) ||
|
|
|
|
MM_DUMP_ALLOC(task->pid, buf->pid) ||
|
|
|
|
MM_DUMP_LEAK(task->pid, buf->pid)) &&
|
|
|
|
buf->seqno >= task->seqmin && buf->seqno <= task->seqmax)
|
|
|
|
{
|
|
|
|
info->aordblks++;
|
|
|
|
info->uordblks += blocksize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mempool_memdump_callback(FAR struct mempool_s *pool,
|
|
|
|
FAR struct mempool_backtrace_s *buf,
|
|
|
|
FAR const void *input, FAR void *output)
|
|
|
|
{
|
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
|
|
|
FAR const struct mm_memdump_s *dump = input;
|
|
|
|
|
|
|
|
if (buf->magic == MEMPOOL_MAGIC_FREE)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((MM_DUMP_ASSIGN(dump->pid, buf->pid) ||
|
|
|
|
MM_DUMP_ALLOC(dump->pid, buf->pid) ||
|
|
|
|
MM_DUMP_LEAK(dump->pid, buf->pid)) &&
|
|
|
|
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
|
|
|
|
{
|
2024-06-18 12:03:15 +02:00
|
|
|
char tmp[CONFIG_MM_BACKTRACE * BACKTRACE_PTR_FMT_WIDTH + 1] = "";
|
2024-06-05 16:21:29 +02:00
|
|
|
|
|
|
|
# if CONFIG_MM_BACKTRACE > 0
|
|
|
|
FAR const char *format = " %0*p";
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < CONFIG_MM_BACKTRACE &&
|
|
|
|
buf->backtrace[i]; i++)
|
|
|
|
{
|
2024-06-18 12:03:15 +02:00
|
|
|
snprintf(tmp + i * BACKTRACE_PTR_FMT_WIDTH,
|
|
|
|
sizeof(tmp) - i * BACKTRACE_PTR_FMT_WIDTH,
|
|
|
|
format, BACKTRACE_PTR_FMT_WIDTH - 1,
|
2024-06-05 16:21:29 +02:00
|
|
|
buf->backtrace[i]);
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
|
|
|
|
syslog(LOG_INFO, "%6d%12zu%12lu%*p%s\n",
|
|
|
|
buf->pid, blocksize, buf->seqno,
|
2024-06-18 12:03:15 +02:00
|
|
|
BACKTRACE_PTR_FMT_WIDTH,
|
2024-08-06 03:56:04 +02:00
|
|
|
((FAR char *)buf - pool->blocksize), tmp);
|
2024-06-05 16:21:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mempool_memdump_free_callback(FAR struct mempool_s *pool,
|
|
|
|
FAR struct mempool_backtrace_s *buf,
|
|
|
|
FAR const void *input, FAR void *output)
|
|
|
|
{
|
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
|
|
|
|
|
|
|
if (buf->magic == MEMPOOL_MAGIC_FREE)
|
|
|
|
{
|
|
|
|
syslog(LOG_INFO, "%12zu%*p\n",
|
2024-06-18 12:03:15 +02:00
|
|
|
blocksize, BACKTRACE_PTR_FMT_WIDTH,
|
2024-08-06 03:56:04 +02:00
|
|
|
((FAR char *)buf - pool->blocksize));
|
2024-06-05 16:21:29 +02:00
|
|
|
}
|
|
|
|
}
|
2022-11-06 16:27:44 +01:00
|
|
|
#endif
|
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: mempool_init
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Initialize a memory pool.
|
2022-08-14 04:28:05 +02:00
|
|
|
* The user needs to specify the initialization information of mempool
|
2022-10-28 16:42:14 +02:00
|
|
|
* including blocksize, initialsize, expandsize, interruptsize.
|
2022-07-20 15:37:26 +02:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
2022-08-14 04:28:05 +02:00
|
|
|
* pool - Address of the memory pool to be used.
|
|
|
|
* name - The name of memory pool.
|
2022-07-20 15:37:26 +02:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* Zero on success; A negated errno value is returned on any failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-08-14 04:28:05 +02:00
|
|
|
int mempool_init(FAR struct mempool_s *pool, FAR const char *name)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2023-02-01 13:00:25 +01:00
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
2022-07-20 15:37:26 +02:00
|
|
|
|
2022-11-25 17:22:12 +01:00
|
|
|
sq_init(&pool->queue);
|
|
|
|
sq_init(&pool->iqueue);
|
|
|
|
sq_init(&pool->equeue);
|
|
|
|
pool->nalloc = 0;
|
2023-05-28 16:01:43 +02:00
|
|
|
if (pool->interruptsize >= blocksize)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2023-05-28 16:01:43 +02:00
|
|
|
size_t ninterrupt = pool->interruptsize / blocksize;
|
|
|
|
size_t size = ninterrupt * blocksize;
|
2022-11-25 17:22:12 +01:00
|
|
|
|
2022-11-26 16:44:38 +01:00
|
|
|
pool->ibase = pool->alloc(pool, size);
|
2022-11-25 17:22:12 +01:00
|
|
|
if (pool->ibase == NULL)
|
|
|
|
{
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
mempool_add_queue(pool, &pool->iqueue,
|
|
|
|
pool->ibase, ninterrupt, blocksize);
|
2022-11-25 17:22:12 +01:00
|
|
|
kasan_poison(pool->ibase, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
pool->ibase = NULL;
|
|
|
|
}
|
|
|
|
|
2023-05-28 17:54:01 +02:00
|
|
|
if (pool->initialsize >= blocksize + sizeof(sq_entry_t))
|
2022-11-25 17:22:12 +01:00
|
|
|
{
|
|
|
|
size_t ninitial = (pool->initialsize - sizeof(sq_entry_t)) / blocksize;
|
|
|
|
size_t size = ninitial * blocksize + sizeof(sq_entry_t);
|
2022-10-31 08:09:47 +01:00
|
|
|
FAR char *base;
|
|
|
|
|
2022-11-26 16:44:38 +01:00
|
|
|
base = pool->alloc(pool, size);
|
2022-09-16 10:55:09 +02:00
|
|
|
if (base == NULL)
|
|
|
|
{
|
2023-02-16 05:28:54 +01:00
|
|
|
if (pool->ibase)
|
|
|
|
{
|
|
|
|
pool->free(pool, pool->ibase);
|
|
|
|
}
|
|
|
|
|
2022-09-16 10:55:09 +02:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
mempool_add_queue(pool, &pool->queue,
|
|
|
|
base, ninitial, blocksize);
|
2022-11-25 17:22:12 +01:00
|
|
|
sq_addlast((FAR sq_entry_t *)(base + ninitial * blocksize),
|
|
|
|
&pool->equeue);
|
|
|
|
kasan_poison(base, size);
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
|
|
|
|
2023-09-15 20:57:03 +02:00
|
|
|
spin_initialize(&pool->lock, SP_UNLOCKED);
|
2022-10-28 16:42:14 +02:00
|
|
|
if (pool->wait && pool->expandsize == 0)
|
2022-08-14 04:28:05 +02:00
|
|
|
{
|
|
|
|
nxsem_init(&pool->waitsem, 0, 0);
|
|
|
|
}
|
2022-07-20 15:37:26 +02:00
|
|
|
|
2022-11-05 17:44:53 +01:00
|
|
|
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMPOOL)
|
2022-07-29 06:17:14 +02:00
|
|
|
mempool_procfs_register(&pool->procfs, name);
|
2022-11-06 16:27:44 +01:00
|
|
|
# ifdef CONFIG_MM_BACKTRACE_DEFAULT
|
|
|
|
pool->procfs.backtrace = true;
|
|
|
|
# endif
|
2022-07-29 06:17:14 +02:00
|
|
|
#endif
|
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
2023-10-08 14:05:59 +02:00
|
|
|
* Name: mempool_allocate
|
2022-07-20 15:37:26 +02:00
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Allocate an block from a specific memory pool.
|
|
|
|
*
|
|
|
|
* If there isn't enough memory blocks, This function will expand memory
|
2022-10-28 16:42:14 +02:00
|
|
|
* pool if expandsize isn't zero.
|
2022-07-20 15:37:26 +02:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* pool - Address of the memory pool to be used.
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* The pointer to the allocated block on success; NULL on any failure.
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2023-10-08 14:05:59 +02:00
|
|
|
FAR void *mempool_allocate(FAR struct mempool_s *pool)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2022-11-25 17:22:12 +01:00
|
|
|
FAR sq_entry_t *blk;
|
2022-07-20 15:37:26 +02:00
|
|
|
irqstate_t flags;
|
|
|
|
|
|
|
|
retry:
|
|
|
|
flags = spin_lock_irqsave(&pool->lock);
|
2023-10-22 16:21:18 +02:00
|
|
|
blk = mempool_remove_queue(pool, &pool->queue);
|
2022-07-20 15:37:26 +02:00
|
|
|
if (blk == NULL)
|
|
|
|
{
|
|
|
|
if (up_interrupt_context())
|
|
|
|
{
|
2023-10-22 16:21:18 +02:00
|
|
|
blk = mempool_remove_queue(pool, &pool->iqueue);
|
2022-07-20 15:37:26 +02:00
|
|
|
if (blk == NULL)
|
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
|
return blk;
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-05-28 17:54:01 +02:00
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
2023-05-28 17:54:01 +02:00
|
|
|
if (pool->expandsize >= blocksize + sizeof(sq_entry_t))
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2022-11-25 17:22:12 +01:00
|
|
|
size_t nexpand = (pool->expandsize - sizeof(sq_entry_t)) /
|
|
|
|
blocksize;
|
|
|
|
size_t size = nexpand * blocksize + sizeof(sq_entry_t);
|
2022-11-26 16:44:38 +01:00
|
|
|
FAR char *base = pool->alloc(pool, size);
|
2022-10-31 08:09:47 +01:00
|
|
|
|
2022-11-25 17:22:12 +01:00
|
|
|
if (base == NULL)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-11-25 17:22:12 +01:00
|
|
|
kasan_poison(base, size);
|
2022-07-20 15:37:26 +02:00
|
|
|
flags = spin_lock_irqsave(&pool->lock);
|
2024-06-05 16:21:29 +02:00
|
|
|
mempool_add_queue(pool, &pool->queue,
|
|
|
|
base, nexpand, blocksize);
|
2022-11-25 17:22:12 +01:00
|
|
|
sq_addlast((FAR sq_entry_t *)(base + nexpand * blocksize),
|
|
|
|
&pool->equeue);
|
2023-10-22 16:21:18 +02:00
|
|
|
blk = mempool_remove_queue(pool, &pool->queue);
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
2022-08-14 04:28:05 +02:00
|
|
|
else if (!pool->wait ||
|
|
|
|
nxsem_wait_uninterruptible(&pool->waitsem) < 0)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
pool->nalloc++;
|
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
2024-03-14 09:40:52 +01:00
|
|
|
blk = kasan_unpoison(blk, pool->blocksize);
|
2023-09-05 07:44:14 +02:00
|
|
|
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
2024-03-04 05:17:29 +01:00
|
|
|
memset(blk, MM_ALLOC_MAGIC, pool->blocksize);
|
2023-09-05 07:44:14 +02:00
|
|
|
#endif
|
|
|
|
|
2022-11-06 16:27:44 +01:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
|
|
|
mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *)
|
|
|
|
((FAR char *)blk + pool->blocksize));
|
|
|
|
#endif
|
2024-06-05 16:21:29 +02:00
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
return blk;
|
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
2023-10-08 14:05:59 +02:00
|
|
|
* Name: mempool_release
|
2022-07-20 15:37:26 +02:00
|
|
|
*
|
|
|
|
* Description:
|
2023-11-24 16:56:47 +01:00
|
|
|
* Release a memory block to the pool.
|
2022-07-20 15:37:26 +02:00
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* pool - Address of the memory pool to be used.
|
|
|
|
* blk - The pointer of memory block.
|
|
|
|
****************************************************************************/
|
|
|
|
|
2023-10-08 14:05:59 +02:00
|
|
|
void mempool_release(FAR struct mempool_s *pool, FAR void *blk)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2022-11-30 17:35:25 +01:00
|
|
|
irqstate_t flags = spin_lock_irqsave(&pool->lock);
|
2023-02-01 13:00:25 +01:00
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
2022-11-06 16:27:44 +01:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
|
|
|
FAR struct mempool_backtrace_s *buf =
|
2022-11-25 17:22:12 +01:00
|
|
|
(FAR struct mempool_backtrace_s *)((FAR char *)blk + pool->blocksize);
|
2022-11-06 16:27:44 +01:00
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
/* Check double free or out of out of bounds */
|
|
|
|
|
|
|
|
DEBUGASSERT(buf->magic == MEMPOOL_MAGIC_ALLOC);
|
|
|
|
buf->magic = MEMPOOL_MAGIC_FREE;
|
2023-08-11 08:42:59 +02:00
|
|
|
|
2022-11-06 16:27:44 +01:00
|
|
|
#endif
|
2022-07-20 15:37:26 +02:00
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
pool->nalloc--;
|
|
|
|
|
2023-09-05 07:44:14 +02:00
|
|
|
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
2024-03-04 05:17:29 +01:00
|
|
|
memset(blk, MM_FREE_MAGIC, pool->blocksize);
|
2023-09-05 07:44:14 +02:00
|
|
|
#endif
|
|
|
|
|
2022-11-25 17:22:12 +01:00
|
|
|
if (pool->interruptsize > blocksize)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2022-11-25 17:22:12 +01:00
|
|
|
if ((FAR char *)blk >= pool->ibase &&
|
|
|
|
(FAR char *)blk < pool->ibase + pool->interruptsize - blocksize)
|
2022-09-09 10:01:49 +02:00
|
|
|
{
|
2023-10-22 16:21:18 +02:00
|
|
|
sq_addlast(blk, &pool->iqueue);
|
2022-09-09 10:01:49 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-10-22 16:21:18 +02:00
|
|
|
sq_addlast(blk, &pool->queue);
|
2022-09-09 10:01:49 +02:00
|
|
|
}
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-10-22 16:21:18 +02:00
|
|
|
sq_addlast(blk, &pool->queue);
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
|
|
|
|
2022-10-28 16:42:14 +02:00
|
|
|
kasan_poison(blk, pool->blocksize);
|
2022-07-20 15:37:26 +02:00
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
2022-10-28 16:42:14 +02:00
|
|
|
if (pool->wait && pool->expandsize == 0)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
|
|
|
int semcount;
|
|
|
|
|
2022-08-14 04:28:05 +02:00
|
|
|
nxsem_get_value(&pool->waitsem, &semcount);
|
2022-07-20 15:37:26 +02:00
|
|
|
if (semcount < 1)
|
|
|
|
{
|
2022-08-14 04:28:05 +02:00
|
|
|
nxsem_post(&pool->waitsem);
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-29 06:17:14 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: mempool_info
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* mempool_info returns a copy of updated current mempool information.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* pool - Address of the memory pool to be used.
|
|
|
|
* info - The pointer of mempoolinfo.
|
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* OK on success; A negated errno value on any failure.
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info)
|
|
|
|
{
|
2023-05-28 16:01:43 +02:00
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
2022-07-29 06:17:14 +02:00
|
|
|
irqstate_t flags;
|
|
|
|
|
2022-09-09 10:01:49 +02:00
|
|
|
DEBUGASSERT(pool != NULL && info != NULL);
|
2022-07-29 06:17:14 +02:00
|
|
|
|
|
|
|
flags = spin_lock_irqsave(&pool->lock);
|
2024-04-12 05:42:05 +02:00
|
|
|
info->ordblks = sq_count(&pool->queue);
|
|
|
|
info->iordblks = sq_count(&pool->iqueue);
|
2022-11-25 17:22:12 +01:00
|
|
|
info->aordblks = pool->nalloc;
|
2024-04-12 05:42:05 +02:00
|
|
|
info->arena = sq_count(&pool->equeue) * sizeof(sq_entry_t) +
|
2023-05-28 16:01:43 +02:00
|
|
|
(info->aordblks + info->ordblks + info->iordblks) * blocksize;
|
2022-07-29 06:17:14 +02:00
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
2023-05-28 16:01:43 +02:00
|
|
|
info->sizeblks = blocksize;
|
2022-10-28 16:42:14 +02:00
|
|
|
if (pool->wait && pool->expandsize == 0)
|
2022-07-29 06:17:14 +02:00
|
|
|
{
|
|
|
|
int semcount;
|
|
|
|
|
2022-08-14 04:28:05 +02:00
|
|
|
nxsem_get_value(&pool->waitsem, &semcount);
|
2022-07-29 06:17:14 +02:00
|
|
|
info->nwaiter = -semcount;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
info->nwaiter = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-11-06 16:27:44 +01:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: mempool_info_task
|
|
|
|
****************************************************************************/
|
|
|
|
|
2023-05-28 13:30:37 +02:00
|
|
|
struct mallinfo_task
|
2023-05-05 10:52:30 +02:00
|
|
|
mempool_info_task(FAR struct mempool_s *pool,
|
2023-05-28 13:23:56 +02:00
|
|
|
FAR const struct malltask *task)
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2023-05-28 16:01:43 +02:00
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
2023-05-28 13:30:37 +02:00
|
|
|
struct mallinfo_task info =
|
2023-05-05 10:52:30 +02:00
|
|
|
{
|
|
|
|
0, 0
|
|
|
|
};
|
2023-02-07 13:49:04 +01:00
|
|
|
|
2023-05-28 14:09:42 +02:00
|
|
|
if (task->pid == PID_MM_FREE)
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2024-06-25 05:22:44 +02:00
|
|
|
irqstate_t flags = spin_lock_irqsave(&pool->lock);
|
2024-04-12 05:42:05 +02:00
|
|
|
size_t count = sq_count(&pool->queue) +
|
|
|
|
sq_count(&pool->iqueue);
|
2024-06-25 05:22:44 +02:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&pool->lock, flags);
|
2023-05-05 10:52:30 +02:00
|
|
|
info.aordblks += count;
|
2023-05-28 16:01:43 +02:00
|
|
|
info.uordblks += count * blocksize;
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
2023-05-28 14:09:42 +02:00
|
|
|
else if (task->pid == PID_MM_ALLOC)
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2023-05-28 14:09:42 +02:00
|
|
|
info.aordblks += pool->nalloc;
|
2023-05-28 16:01:43 +02:00
|
|
|
info.uordblks += pool->nalloc * blocksize;
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
2024-06-05 16:21:29 +02:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
2022-11-06 16:27:44 +01:00
|
|
|
else
|
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
mempool_foreach(pool, mempool_info_task_callback, task, &info);
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-05-05 10:52:30 +02:00
|
|
|
return info;
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/****************************************************************************
|
|
|
|
* Name: mempool_memdump
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* mempool_memdump returns a memory info about specified pid of
|
|
|
|
* task/thread. if pid equals -1, this function will dump all allocated
|
|
|
|
* node and output backtrace for every allocated node for this mempool,
|
|
|
|
* if pid equals -2, this function will dump all free node for this
|
|
|
|
* mempool, and if pid is greater than or equal to 0, will dump pid
|
|
|
|
* allocated node and output backtrace.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* pool - Address of the memory pool to be used.
|
2023-05-05 10:52:30 +02:00
|
|
|
* dump - The information of what need dump.
|
2022-11-06 16:27:44 +01:00
|
|
|
*
|
|
|
|
* Returned Value:
|
|
|
|
* OK on success; A negated errno value on any failure.
|
|
|
|
****************************************************************************/
|
|
|
|
|
2023-05-05 10:52:30 +02:00
|
|
|
void mempool_memdump(FAR struct mempool_s *pool,
|
|
|
|
FAR const struct mm_memdump_s *dump)
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
2023-05-28 14:09:42 +02:00
|
|
|
if (dump->pid == PID_MM_FREE)
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
mempool_foreach(pool, mempool_memdump_free_callback, NULL, NULL);
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-06-05 16:21:29 +02:00
|
|
|
mempool_foreach(pool, mempool_memdump_callback, dump, NULL);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
2022-11-06 16:27:44 +01:00
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
/* Avoid race condition */
|
2022-11-06 16:27:44 +01:00
|
|
|
|
2024-06-05 16:21:29 +02:00
|
|
|
syslog(LOG_INFO, "%12zu%*p skip block dump\n",
|
2024-06-18 12:03:15 +02:00
|
|
|
blocksize, BACKTRACE_PTR_FMT_WIDTH, pool);
|
2022-11-06 16:27:44 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Name: mempool_deinit
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* Deallocate a memory pool.
|
|
|
|
*
|
|
|
|
* Input Parameters:
|
|
|
|
* pool - Address of the memory pool to be used.
|
|
|
|
****************************************************************************/
|
|
|
|
|
|
|
|
int mempool_deinit(FAR struct mempool_s *pool)
|
|
|
|
{
|
2023-02-01 13:00:25 +01:00
|
|
|
size_t blocksize = MEMPOOL_REALBLOCKSIZE(pool);
|
2022-11-25 17:22:12 +01:00
|
|
|
FAR sq_entry_t *blk;
|
|
|
|
size_t count = 0;
|
2022-07-20 15:37:26 +02:00
|
|
|
|
2022-11-25 17:22:12 +01:00
|
|
|
if (pool->nalloc != 0)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2023-05-28 17:54:01 +02:00
|
|
|
if (pool->initialsize >= blocksize + sizeof(sq_entry_t))
|
2022-11-06 16:27:44 +01:00
|
|
|
{
|
2022-11-25 17:22:12 +01:00
|
|
|
count = (pool->initialsize - sizeof(sq_entry_t)) / blocksize;
|
2022-11-06 16:27:44 +01:00
|
|
|
}
|
|
|
|
|
2022-10-31 08:09:47 +01:00
|
|
|
if (count == 0)
|
|
|
|
{
|
2023-05-28 17:54:01 +02:00
|
|
|
if (pool->expandsize >= blocksize + sizeof(sq_entry_t))
|
2022-11-25 17:22:12 +01:00
|
|
|
{
|
|
|
|
count = (pool->expandsize - sizeof(sq_entry_t)) / blocksize;
|
|
|
|
}
|
2022-10-31 08:09:47 +01:00
|
|
|
}
|
|
|
|
|
2022-11-25 17:22:12 +01:00
|
|
|
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMPOOL)
|
|
|
|
mempool_procfs_unregister(&pool->procfs);
|
|
|
|
#endif
|
|
|
|
|
2023-10-22 16:21:18 +02:00
|
|
|
while ((blk = mempool_remove_queue(pool, &pool->equeue)) != NULL)
|
2022-07-20 15:37:26 +02:00
|
|
|
{
|
2022-11-25 17:22:12 +01:00
|
|
|
blk = (FAR sq_entry_t *)((FAR char *)blk - count * blocksize);
|
2024-06-06 08:23:56 +02:00
|
|
|
|
2024-03-14 09:40:52 +01:00
|
|
|
blk = kasan_unpoison(blk, count * blocksize + sizeof(sq_entry_t));
|
2022-11-26 16:44:38 +01:00
|
|
|
pool->free(pool, blk);
|
2023-05-28 17:54:01 +02:00
|
|
|
if (pool->expandsize >= blocksize + sizeof(sq_entry_t))
|
2022-11-25 17:22:12 +01:00
|
|
|
{
|
|
|
|
count = (pool->expandsize - sizeof(sq_entry_t)) / blocksize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pool->ibase)
|
|
|
|
{
|
2024-03-14 09:40:52 +01:00
|
|
|
pool->ibase = kasan_unpoison(pool->ibase,
|
|
|
|
pool->interruptsize / blocksize * blocksize);
|
2022-11-26 16:44:38 +01:00
|
|
|
pool->free(pool, pool->ibase);
|
2022-07-20 15:37:26 +02:00
|
|
|
}
|
|
|
|
|
2022-10-28 16:42:14 +02:00
|
|
|
if (pool->wait && pool->expandsize == 0)
|
2022-08-14 04:28:05 +02:00
|
|
|
{
|
|
|
|
nxsem_destroy(&pool->waitsem);
|
|
|
|
}
|
|
|
|
|
2022-07-20 15:37:26 +02:00
|
|
|
return 0;
|
|
|
|
}
|