mm: seperate mm_pool and mm_pool init

make possible enable pool, but disabled in system heap.
also compatible for further extra heap use/not use pool.
Now use MM_HEAP_MEMPOOL_THRESHOLD -1 to indicate pool disabled.

Signed-off-by: buxiasen <buxiasen@xiaomi.com>
This commit is contained in:
buxiasen 2024-06-18 12:26:27 +08:00 committed by Xiang Xiao
parent ed78646798
commit 870d989d6c
14 changed files with 134 additions and 90 deletions

View File

@ -339,7 +339,7 @@ struct mempool_multiple_s;
FAR struct mempool_multiple_s *
mempool_multiple_init(FAR const char *name,
FAR size_t *poolsize, size_t npools,
FAR const size_t *poolsize, size_t npools,
mempool_multiple_alloc_t alloc,
mempool_multiple_alloc_size_t alloc_size,
mempool_multiple_free_t free, FAR void *arg,

View File

@ -39,6 +39,10 @@
/* Configuration ************************************************************/
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD >= 0
# define CONFIG_MM_HEAP_MEMPOOL
#endif
/* If the MCU has a small (16-bit) address capability, then we will use
* a smaller chunk header that contains 16-bit size/offset information.
* We will also use the smaller header on MCUs with wider addresses if

View File

@ -206,17 +206,20 @@ config MM_KMAP
for user.
config MM_HEAP_MEMPOOL_THRESHOLD
int "The size of threshold to avoid using multiple mempool in heap"
default 0
int "The default size of threshold to avoid using multiple mempool in heap"
default -1
---help---
If the size of the memory requested by the user is less
than the threshold, the memory will be requested from the
multiple mempool by default.
> 0 Enable pool feature, and set umm/kmm pool threshold size.
== 0 Enable pool feature, but disable the umm/kmm pool function.
< 0 Disable pool feature.
config MM_HEAP_MEMPOOL_EXPAND_SIZE
int "The expand size for each mempool in multiple mempool"
default 4096
depends on MM_HEAP_MEMPOOL_THRESHOLD != 0
depends on MM_HEAP_MEMPOOL_THRESHOLD > 0
---help---
This size describes the size of each expansion of each memory
pool with insufficient memory in the multi-level memory pool.
@ -224,14 +227,14 @@ config MM_HEAP_MEMPOOL_EXPAND_SIZE
config MM_HEAP_MEMPOOL_DICTIONARY_EXPAND_SIZE
int "The expand size for multiple mempool's dictionary"
default MM_HEAP_MEMPOOL_EXPAND_SIZE
depends on MM_HEAP_MEMPOOL_THRESHOLD != 0
depends on MM_HEAP_MEMPOOL_THRESHOLD > 0
---help---
This size describes the multiple mempool dictionary expand.
config MM_HEAP_MEMPOOL_CHUNK_SIZE
int "The multiples pool chunk size"
default 0
depends on MM_HEAP_MEMPOOL_THRESHOLD != 0
depends on MM_HEAP_MEMPOOL_THRESHOLD > 0
---help---
This size describes the multiple mempool chunk size.

View File

@ -370,7 +370,7 @@ mempool_multiple_get_dict(FAR struct mempool_multiple_s *mpool,
FAR struct mempool_multiple_s *
mempool_multiple_init(FAR const char *name,
FAR size_t *poolsize, size_t npools,
FAR const size_t *poolsize, size_t npools,
mempool_multiple_alloc_t alloc,
mempool_multiple_alloc_size_t alloc_size,
mempool_multiple_free_t free, FAR void *arg,
@ -774,12 +774,15 @@ mempool_multiple_info_task(FAR struct mempool_multiple_s *mpool,
0, 0
};
if (mpool != NULL)
{
for (i = 0; i < mpool->npools; i++)
{
info = mempool_info_task(mpool->pools + i, task);
ret.aordblks += info.aordblks;
ret.uordblks += info.uordblks;
}
}
return ret;
}

View File

@ -261,7 +261,8 @@ struct mm_heap_s
/* The is a multiple mempool of the heap */
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
size_t mm_threshold;
FAR struct mempool_multiple_s *mm_mpool;
#endif

View File

@ -225,11 +225,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
DEBUGASSERT(mm_heapmember(heap, mem));
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
if (mempool_multiple_free(heap->mm_mpool, mem) >= 0)
{
return;
}
}
#endif
mm_delayfree(heap, mem, CONFIG_MM_FREE_DELAYCOUNT_MAX > 0);

View File

@ -37,7 +37,7 @@
* Pre-processor Definitions
****************************************************************************/
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
# define MEMPOOL_NPOOLS (CONFIG_MM_HEAP_MEMPOOL_THRESHOLD / MM_MIN_CHUNK)
#endif
@ -45,7 +45,7 @@
* Private Functions
****************************************************************************/
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 && CONFIG_MM_BACKTRACE >= 0
#if defined(CONFIG_MM_HEAP_MEMPOOL) && CONFIG_MM_BACKTRACE >= 0
/****************************************************************************
* Name: mempool_memalign
@ -224,7 +224,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
FAR struct mm_heap_s *mm_initialize(FAR const char *name,
FAR void *heapstart, size_t heapsize)
{
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
size_t poolsize[MEMPOOL_NPOOLS];
#endif
FAR struct mm_heap_s *heap;
@ -288,12 +288,13 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
/* Initialize the multiple mempool in heap */
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
for (i = 0; i < MEMPOOL_NPOOLS; i++)
{
poolsize[i] = (i + 1) * MM_MIN_CHUNK;
}
heap->mm_threshold = CONFIG_MM_HEAP_MEMPOOL_THRESHOLD;
heap->mm_mpool = mempool_multiple_init(name, poolsize, MEMPOOL_NPOOLS,
(mempool_multiple_alloc_t)mempool_memalign,
(mempool_multiple_alloc_size_t)mm_malloc_size,
@ -322,7 +323,7 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
void mm_uninitialize(FAR struct mm_heap_s *heap)
{
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
mempool_multiple_deinit(heap->mm_mpool);
#endif

View File

@ -136,7 +136,7 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node,
struct mallinfo mm_mallinfo(FAR struct mm_heap_s *heap)
{
struct mallinfo info;
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
struct mallinfo poolinfo;
#endif
@ -147,7 +147,7 @@ struct mallinfo mm_mallinfo(FAR struct mm_heap_s *heap)
info.uordblks += sizeof(struct mm_heap_s);
info.usmblks = heap->mm_maxused + sizeof(struct mm_heap_s);
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
poolinfo = mempool_multiple_mallinfo(heap->mm_mpool);
info.uordblks -= poolinfo.fordblks;
@ -177,7 +177,7 @@ struct mallinfo_task mm_mallinfo_task(FAR struct mm_heap_s *heap,
0, 0
};
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
info = mempool_multiple_info_task(heap->mm_mpool, task);
#endif

View File

@ -121,7 +121,7 @@ void mm_dump_handler(FAR struct tcb_s *tcb, FAR void *arg)
}
#endif
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
void mm_mempool_dump_handle(FAR struct mempool_s *pool, FAR void *arg)
{
struct mempoolinfo_s info;
@ -160,12 +160,15 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
free_delaylist(heap, false);
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
ret = mempool_multiple_alloc(heap->mm_mpool, size);
if (ret != NULL)
{
return ret;
}
}
#endif
/* Adjust the size to account for (1) the size of the allocated node and
@ -344,7 +347,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
nxsched_foreach(mm_dump_handler, heap);
mm_dump_handler(NULL, heap);
# endif
# if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
# ifdef CONFIG_MM_HEAP_MEMPOOL
mwarn("%11s%9s%9s%9s%9s%9s\n",
"bsize", "total", "nused",
"nfree", "nifree", "nwaiter");

View File

@ -38,13 +38,15 @@
size_t mm_malloc_size(FAR struct mm_heap_s *heap, FAR void *mem)
{
FAR struct mm_freenode_s *node;
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
ssize_t size = mempool_multiple_alloc_size(heap->mm_mpool, mem);
if (size >= 0)
{
return size;
}
}
#endif
/* Protect against attempts to query a NULL reference */

View File

@ -72,12 +72,15 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
return NULL;
}
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
node = mempool_multiple_memalign(heap->mm_mpool, alignment, size);
if (node != NULL)
{
return node;
}
}
#endif
/* If this requested alinement's less than or equal to the natural

View File

@ -156,7 +156,7 @@ void mm_memdump(FAR struct mm_heap_s *heap,
syslog(LOG_INFO, "%12s%*s\n", "Size", MM_PTR_FMT_WIDTH, "Address");
}
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
mempool_multiple_memdump(heap->mm_mpool, dump);
#endif
mm_foreach(heap, memdump_handler, (FAR void *)dump);

View File

@ -83,24 +83,28 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
DEBUGASSERT(mm_heapmember(heap, oldmem));
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
newmem = mempool_multiple_realloc(heap->mm_mpool, oldmem, size);
if (newmem != NULL)
{
return newmem;
}
else if (size <= CONFIG_MM_HEAP_MEMPOOL_THRESHOLD ||
else if (size <= heap->mm_threshold ||
mempool_multiple_alloc_size(heap->mm_mpool, oldmem) >= 0)
{
newmem = mm_malloc(heap, size);
if (newmem != NULL)
{
memcpy(newmem, oldmem, MIN(size, mm_malloc_size(heap, oldmem)));
memcpy(newmem, oldmem,
MIN(size, mm_malloc_size(heap, oldmem)));
mm_free(heap, oldmem);
}
return newmem;
}
}
#endif
/* Adjust the size to account for (1) the size of the allocated node and

View File

@ -53,7 +53,7 @@
# define MM_PTR_FMT_WIDTH 19
#endif
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
# define MEMPOOL_NPOOLS (CONFIG_MM_HEAP_MEMPOOL_THRESHOLD / tlsf_align_size())
#endif
@ -99,7 +99,8 @@ struct mm_heap_s
/* The is a multiple mempool of the heap */
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
size_t mm_threshold;
FAR struct mempool_multiple_s *mm_mpool;
#endif
@ -257,7 +258,7 @@ static bool free_delaylist(FAR struct mm_heap_s *heap, bool force)
return ret;
}
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 && CONFIG_MM_BACKTRACE >= 0
#if defined(CONFIG_MM_HEAP_MEMPOOL) && CONFIG_MM_BACKTRACE >= 0
/****************************************************************************
* Name: mempool_memalign
@ -771,11 +772,14 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
DEBUGASSERT(mm_heapmember(heap, mem));
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
if (mempool_multiple_free(heap->mm_mpool, mem) >= 0)
{
return;
}
}
#endif
mm_delayfree(heap, mem, CONFIG_MM_FREE_DELAYCOUNT_MAX > 0);
@ -861,7 +865,7 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
FAR void *heapstart, size_t heapsize)
{
FAR struct mm_heap_s *heap;
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
size_t poolsize[MEMPOOL_NPOOLS];
int i;
#endif
@ -904,12 +908,13 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
#endif
#endif
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD > 0
for (i = 0; i < MEMPOOL_NPOOLS; i++)
{
poolsize[i] = (i + 1) * tlsf_align_size();
}
heap->mm_threshold = CONFIG_MM_HEAP_MEMPOOL_THRESHOLD;
heap->mm_mpool = mempool_multiple_init(name, poolsize, MEMPOOL_NPOOLS,
(mempool_multiple_alloc_t)mempool_memalign,
(mempool_multiple_alloc_size_t)mm_malloc_size,
@ -933,7 +938,7 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name,
struct mallinfo mm_mallinfo(FAR struct mm_heap_s *heap)
{
struct mallinfo info;
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
struct mallinfo poolinfo;
#endif
#if CONFIG_MM_REGIONS > 1
@ -963,7 +968,7 @@ struct mallinfo mm_mallinfo(FAR struct mm_heap_s *heap)
info.uordblks = info.arena - info.fordblks;
info.usmblks = heap->mm_maxused;
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
poolinfo = mempool_multiple_mallinfo(heap->mm_mpool);
info.uordblks -= poolinfo.fordblks;
@ -988,7 +993,7 @@ struct mallinfo_task mm_mallinfo_task(FAR struct mm_heap_s *heap,
#define region 0
#endif
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
info = mempool_multiple_info_task(heap->mm_mpool, task);
#endif
@ -1047,7 +1052,7 @@ void mm_memdump(FAR struct mm_heap_s *heap,
syslog(LOG_INFO, "%12s%*s\n", "Size", MM_PTR_FMT_WIDTH, "Address");
}
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
mempool_multiple_memdump(heap->mm_mpool, dump);
#endif
@ -1073,12 +1078,15 @@ void mm_memdump(FAR struct mm_heap_s *heap,
size_t mm_malloc_size(FAR struct mm_heap_s *heap, FAR void *mem)
{
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
ssize_t size = mempool_multiple_alloc_size(heap->mm_mpool, mem);
if (size >= 0)
{
return size;
}
}
#endif
#if CONFIG_MM_BACKTRACE >= 0
@ -1110,12 +1118,15 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
size = 1;
}
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
ret = mempool_multiple_alloc(heap->mm_mpool, size);
if (ret != NULL)
{
return ret;
}
}
#endif
/* Free the delay list first */
@ -1184,12 +1195,15 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
{
FAR void *ret;
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
ret = mempool_multiple_memalign(heap->mm_mpool, alignment, size);
if (ret != NULL)
{
return ret;
}
}
#endif
/* Free the delay list first */
@ -1280,13 +1294,15 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
size = 1;
}
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
if (heap->mm_mpool)
{
newmem = mempool_multiple_realloc(heap->mm_mpool, oldmem, size);
if (newmem != NULL)
{
return newmem;
}
else if (size <= CONFIG_MM_HEAP_MEMPOOL_THRESHOLD ||
else if (size <= heap->mm_threshold ||
mempool_multiple_alloc_size(heap->mm_mpool, oldmem) >= 0)
{
newmem = mm_malloc(heap, size);
@ -1297,6 +1313,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
return newmem;
}
}
}
#endif
#ifdef CONFIG_MM_KASAN
@ -1374,7 +1391,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
void mm_uninitialize(FAR struct mm_heap_s *heap)
{
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
#ifdef CONFIG_MM_HEAP_MEMPOOL
mempool_multiple_deinit(heap->mm_mpool);
#endif