From c386a1a2d9351bc4535bb7e4536190de1e4cf1ad Mon Sep 17 00:00:00 2001 From: dongjiuzhu1 Date: Sun, 6 Nov 2022 23:27:44 +0800 Subject: [PATCH] mm/mempool: support backtrace function for mempool Signed-off-by: dongjiuzhu1 Signed-off-by: anjiahao --- include/nuttx/mm/mempool.h | 82 ++++++++++ mm/mempool/mempool.c | 279 ++++++++++++++++++++++++++++++---- mm/mempool/mempool_multiple.c | 43 ++++++ mm/mm_heap/mm_mallinfo.c | 5 + mm/mm_heap/mm_memdump.c | 3 + mm/tlsf/mm_tlsf.c | 9 ++ 6 files changed, 388 insertions(+), 33 deletions(-) diff --git a/include/nuttx/mm/mempool.h b/include/nuttx/mm/mempool.h index 761d388c24..aff7b0dc19 100644 --- a/include/nuttx/mm/mempool.h +++ b/include/nuttx/mm/mempool.h @@ -25,6 +25,7 @@ * Included Files ****************************************************************************/ +#include #include #include @@ -47,6 +48,14 @@ struct mempool_procfs_entry_s { FAR const char *name; FAR struct mempool_procfs_entry_s *next; +#if CONFIG_MM_BACKTRACE >= 0 + + /* This is dynamic control flag whether to turn on backtrace in the heap, + * you can set it by /proc/mempool. + */ + + bool backtrace; +#endif }; #endif @@ -67,6 +76,9 @@ struct mempool_s struct list_node list; /* The free block list in normal mempool */ struct list_node ilist; /* The free block list in interrupt mempool */ struct list_node elist; /* The expand block list for normal mempool */ +#if CONFIG_MM_BACKTRACE >= 0 + struct list_node alist; /* The used block list in mempool */ +#endif size_t nused; /* The number of used block in mempool */ spinlock_t lock; /* The protect lock to mempool */ sem_t waitsem; /* The semaphore of waiter get free block */ @@ -102,6 +114,8 @@ struct mempoolinfo_s unsigned long nwaiter; /* This is the number of waiter for mempool */ }; +#define mempoolinfo_task mallinfo_task + /**************************************************************************** * Public Function Prototypes ****************************************************************************/ @@ -182,6 +196,27 @@ void mempool_free(FAR struct mempool_s *pool, FAR void *blk); int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info); +/**************************************************************************** + * Name: mempool_memdump + * + * Description: + * mempool_memdump returns a memory info about specified pid of + * task/thread. if pid equals -1, this function will dump all allocated + * node and output backtrace for every allocated node for this mempool, + * if pid equals -2, this function will dump all free node for this + * mempool, and if pid is greater than or equal to 0, will dump pid + * allocated node and output backtrace. + * + * Input Parameters: + * pool - Address of the memory pool to be used. + * info - The pointer of mempoolinfo. + * + * Returned Value: + * OK on success; A negated errno value on any failure. + ****************************************************************************/ + +void mempool_memdump(FAR struct mempool_s *pool, pid_t pid); + /**************************************************************************** * Name: mempool_deinit * @@ -194,6 +229,20 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info); int mempool_deinit(FAR struct mempool_s *pool); +/**************************************************************************** + * Name: mempool_info_task + * + * Description: + * Get memory pool's memory used info. + * + * Input Parameters: + * pool - Address of the memory pool to be used. + * info - Memory info. + ****************************************************************************/ + +int mempool_info_task(FAR struct mempool_s *pool, + FAR struct mempoolinfo_task *info); + /**************************************************************************** * Name: mempool_procfs_register * @@ -352,6 +401,26 @@ size_t mempool_multiple_alloc_size(FAR void *blk); FAR void *mempool_multiple_memalign(FAR struct mempool_multiple_s *mpool, size_t alignment, size_t size); +/**************************************************************************** + * Name: mempool_multiple_memdump + * + * Description: + * mempool_multiple_memdump returns a memory info about specified pid of + * task/thread. if pid equals -1, this function will dump all allocated + * node and output backtrace for every allocated node for this multiple + * mempool, if pid equals -2, this function will dump all free node for + * this multiple mempool, and if pid is greater than or equal to 0, will + * dump pid allocated node and output backtrace. + * + * Input Parameters: + * mpool - The handle of multiple memory pool to be used. + * pid - The pid of task. + * + ****************************************************************************/ + +void mempool_multiple_memdump(FAR struct mempool_multiple_s *mpool, + pid_t pid); + /**************************************************************************** * Name: mempool_multiple_fixed_alloc * @@ -426,6 +495,19 @@ void mempool_multiple_fixed_free(FAR struct mempool_multiple_s *mpool, int mempool_multiple_deinit(FAR struct mempool_multiple_s *mpool); +/**************************************************************************** + * Name: mempool_multiple_info_task + * Description: + * Get multiple memory pool's memory used info. + * + * Input Parameters: + * mpool - The handle of multiple memory pool to be used. + * info - Memory info. + ****************************************************************************/ + +void mempool_multiple_info_task(FAR struct mempool_multiple_s *mpool, + FAR struct mempoolinfo_task *info); + #undef EXTERN #if defined(__cplusplus) } diff --git a/mm/mempool/mempool.c b/mm/mempool/mempool.c index d9892ba200..04bbc0148f 100644 --- a/mm/mempool/mempool.c +++ b/mm/mempool/mempool.c @@ -22,13 +22,41 @@ * Included Files ****************************************************************************/ +#include #include +#include +#include #include #include #include "kasan/kasan.h" +#if UINTPTR_MAX <= UINT32_MAX +# define MM_PTR_FMT_WIDTH 11 +#elif UINTPTR_MAX <= UINT64_MAX +# define MM_PTR_FMT_WIDTH 19 +#endif + +#ifndef ALIGN_UP +# define ALIGN_UP(x, a) (((x) + ((a) - 1)) & (~((a) - 1))) +#endif + +/**************************************************************************** + * Private Types + ****************************************************************************/ + +#if CONFIG_MM_BACKTRACE >= 0 +struct mempool_backtrace_s +{ + FAR struct list_node node; + pid_t pid; +# if CONFIG_MM_BACKTRACE > 0 + FAR void *backtrace[CONFIG_MM_BACKTRACE]; +# endif +}; +#endif + /**************************************************************************** * Private Functions ****************************************************************************/ @@ -73,6 +101,29 @@ static inline void mempool_mfree(FAR struct mempool_s *pool, FAR void *addr) } } +#if CONFIG_MM_BACKTRACE >= 0 +static inline void mempool_add_backtrace(FAR struct mempool_s *pool, + FAR struct mempool_backtrace_s *buf) +{ + list_add_head(&pool->alist, &buf->node); + buf->pid = gettid(); +# if CONFIG_MM_BACKTRACE > 0 + if (pool->procfs.backtrace) + { + int result = backtrace(buf->backtrace, CONFIG_MM_BACKTRACE); + while (result < CONFIG_MM_BACKTRACE) + { + buf->backtrace[result++] = NULL; + } + } + else + { + buf->backtrace[0] = NULL; + } +# endif +} +#endif + /**************************************************************************** * Public Functions ****************************************************************************/ @@ -96,43 +147,53 @@ static inline void mempool_mfree(FAR struct mempool_s *pool, FAR void *addr) int mempool_init(FAR struct mempool_s *pool, FAR const char *name) { +#if CONFIG_MM_BACKTRACE >= 0 + size_t blocksize = pool->blocksize + sizeof(struct mempool_backtrace_s); +#else + size_t blocksize = pool->blocksize; +#endif + size_t alignment = 0; size_t ninterrupt; size_t ninitial; size_t count; - DEBUGASSERT(pool != NULL && pool->blocksize != 0); + DEBUGASSERT(pool->blocksize != 0); pool->nused = 0; list_initialize(&pool->list); list_initialize(&pool->ilist); list_initialize(&pool->elist); - ninitial = pool->initialsize / pool->blocksize; - ninterrupt = pool->interruptsize / pool->blocksize; +#if CONFIG_MM_BACKTRACE >= 0 + list_initialize(&pool->alist); +#endif + + if ((pool->blocksize & (pool->blocksize - 1)) == 0) + { + alignment = pool->blocksize; + blocksize = ALIGN_UP(blocksize, alignment); + } + + ninitial = pool->initialsize / blocksize; + ninterrupt = pool->interruptsize / blocksize; count = ninitial + ninterrupt; if (count != 0) { - size_t alignment = 0; FAR char *base; - if ((pool->blocksize & (pool->blocksize - 1)) == 0) - { - alignment = pool->blocksize; - } - - base = mempool_malloc(pool, alignment, pool->blocksize * count + - sizeof(struct list_node)); + base = mempool_malloc(pool, alignment, + blocksize * count + sizeof(struct list_node)); if (base == NULL) { return -ENOMEM; } - mempool_add_list(&pool->ilist, base, ninterrupt, pool->blocksize); - mempool_add_list(&pool->list, base + ninterrupt * pool->blocksize, - ninitial, pool->blocksize); + mempool_add_list(&pool->ilist, base, ninterrupt, blocksize); + mempool_add_list(&pool->list, base + ninterrupt * blocksize, + ninitial, blocksize); list_add_head(&pool->elist, (FAR struct list_node *) - (base + count * pool->blocksize)); - kasan_poison(base, pool->blocksize * count); + (base + count * blocksize)); + kasan_poison(base, blocksize * count); } if (pool->wait && pool->expandsize == 0) @@ -142,6 +203,9 @@ int mempool_init(FAR struct mempool_s *pool, FAR const char *name) #if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMPOOL) mempool_procfs_register(&pool->procfs, name); +# ifdef CONFIG_MM_BACKTRACE_DEFAULT + pool->procfs.backtrace = true; +# endif #endif return 0; @@ -169,8 +233,6 @@ FAR void *mempool_alloc(FAR struct mempool_s *pool) FAR struct list_node *blk; irqstate_t flags; - DEBUGASSERT(pool != NULL); - retry: flags = spin_lock_irqsave(&pool->lock); blk = list_remove_head(&pool->list); @@ -189,26 +251,34 @@ retry: spin_unlock_irqrestore(&pool->lock, flags); if (pool->expandsize != 0) { - size_t nexpand = pool->expandsize / pool->blocksize; +#if CONFIG_MM_BACKTRACE >= 0 + size_t blocksize = pool->blocksize + + sizeof(struct mempool_backtrace_s); +#else + size_t blocksize = pool->blocksize; +#endif size_t alignment = 0; + size_t nexpand; if ((pool->blocksize & (pool->blocksize - 1)) == 0) { alignment = pool->blocksize; + blocksize = ALIGN_UP(blocksize, alignment); } + nexpand = pool->expandsize / blocksize; blk = mempool_malloc(pool, alignment, - pool->blocksize * nexpand + sizeof(*blk)); + blocksize * nexpand + sizeof(*blk)); if (blk == NULL) { return NULL; } - kasan_poison(blk, pool->blocksize * nexpand); + kasan_poison(blk, blocksize * nexpand); flags = spin_lock_irqsave(&pool->lock); - mempool_add_list(&pool->list, blk, nexpand, pool->blocksize); + mempool_add_list(&pool->list, blk, nexpand, blocksize); list_add_head(&pool->elist, (FAR struct list_node *) - ((FAR char *)blk + nexpand * pool->blocksize)); + ((FAR char *)blk + nexpand * blocksize)); blk = list_remove_head(&pool->list); } else if (!pool->wait || @@ -224,6 +294,10 @@ retry: } pool->nused++; +#if CONFIG_MM_BACKTRACE >= 0 + mempool_add_backtrace(pool, (FAR struct mempool_backtrace_s *) + ((FAR char *)blk + pool->blocksize)); +#endif kasan_unpoison(blk, pool->blocksize); out_with_lock: spin_unlock_irqrestore(&pool->lock, flags); @@ -244,19 +318,31 @@ out_with_lock: void mempool_free(FAR struct mempool_s *pool, FAR void *blk) { irqstate_t flags; +#if CONFIG_MM_BACKTRACE >= 0 + size_t blocksize = pool->blocksize + sizeof(struct mempool_backtrace_s); + FAR struct mempool_backtrace_s *buf = + (FAR struct mempool_backtrace_s *)((FAR char *)blk + pool->blocksize); - DEBUGASSERT(pool != NULL && blk != NULL); + list_delete(&buf->node); +#else + size_t blocksize = pool->blocksize; +#endif flags = spin_lock_irqsave(&pool->lock); + if ((pool->blocksize & (pool->blocksize - 1)) == 0) + { + blocksize = ALIGN_UP(blocksize, pool->blocksize); + } + if (pool->interruptsize != 0) { FAR char *base; size_t ninterrupt; base = (FAR char *)(list_peek_head(&pool->elist) + 1); - ninterrupt = pool->interruptsize / pool->blocksize; + ninterrupt = pool->interruptsize / blocksize; if ((FAR char *)blk >= base && - (FAR char *)blk < base + ninterrupt * pool->blocksize) + (FAR char *)blk < base + ninterrupt * blocksize) { list_add_head(&pool->ilist, blk); } @@ -328,6 +414,125 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info) return 0; } +/**************************************************************************** + * Name: mempool_info_task + ****************************************************************************/ + +int mempool_info_task(FAR struct mempool_s *pool, + FAR struct mempoolinfo_task *info) +{ + DEBUGASSERT(info); + if (info->pid == -2) + { + size_t count = list_length(&pool->list); + + info->aordblks += count; + info->uordblks += count * pool->blocksize; + } + else if (info->pid == -1) + { +#if CONFIG_MM_BACKTRACE >= 0 + size_t blocksize = pool->blocksize + + sizeof(struct mempool_backtrace_s); +#else + size_t blocksize = pool->blocksize; +#endif + size_t count; + + if ((pool->blocksize & (pool->blocksize - 1)) == 0) + { + blocksize = ALIGN_UP(blocksize, pool->blocksize); + } + + count = (pool->initialsize + pool->interruptsize) / blocksize + + (list_length(&pool->elist) - 1) - list_length(&pool->list); + + info->aordblks += count; + info->uordblks += count * pool->blocksize; + } +#if CONFIG_MM_BACKTRACE >= 0 + else + { + FAR struct mempool_backtrace_s *buf; + list_for_every_entry(&pool->alist, buf, struct mempool_backtrace_s, + node) + { + if (buf->pid == info->pid) + { + info->aordblks++; + info->uordblks += pool->blocksize; + } + } + } +#endif + + return OK; +} + +/**************************************************************************** + * Name: mempool_memdump + * + * Description: + * mempool_memdump returns a memory info about specified pid of + * task/thread. if pid equals -1, this function will dump all allocated + * node and output backtrace for every allocated node for this mempool, + * if pid equals -2, this function will dump all free node for this + * mempool, and if pid is greater than or equal to 0, will dump pid + * allocated node and output backtrace. + * + * Input Parameters: + * pool - Address of the memory pool to be used. + * pid - The task of pid. + * + * Returned Value: + * OK on success; A negated errno value on any failure. + ****************************************************************************/ + +void mempool_memdump(FAR struct mempool_s *pool, pid_t pid) +{ + if (pid == -2) + { + FAR struct list_node *node; + list_for_every(&pool->list, node) + { + syslog(LOG_INFO, "%12zu%*p\n", + pool->blocksize, MM_PTR_FMT_WIDTH, + (FAR char *)node); + } + } +#if CONFIG_MM_BACKTRACE >= 0 + else + { + FAR struct mempool_backtrace_s *buf; + list_for_every_entry(&pool->alist, buf, struct mempool_backtrace_s, + node) + { + if (buf->pid == pid || pid == -1) + { +# if CONFIG_MM_BACKTRACE > 0 + int i; + FAR const char *format = " %0*p"; +# endif + char bt[CONFIG_MM_BACKTRACE * MM_PTR_FMT_WIDTH + 1]; + + bt[0] = '\0'; +# if CONFIG_MM_BACKTRACE > 0 + for (i = 0; i < CONFIG_MM_BACKTRACE && buf->backtrace[i]; i++) + { + sprintf(bt + i * MM_PTR_FMT_WIDTH, format, + MM_PTR_FMT_WIDTH - 1, buf->backtrace[i]); + } +# endif + + syslog(LOG_INFO, "%6d%12zu%*p%s\n", + (int)buf->pid, pool->blocksize, MM_PTR_FMT_WIDTH, + ((FAR char *)buf - pool->blocksize), bt); + } + } + } +#endif +} + /**************************************************************************** * Name: mempool_deinit * @@ -340,13 +545,16 @@ int mempool_info(FAR struct mempool_s *pool, FAR struct mempoolinfo_s *info) int mempool_deinit(FAR struct mempool_s *pool) { +#if CONFIG_MM_BACKTRACE >= 0 + size_t blocksize = pool->blocksize + sizeof(struct mempool_backtrace_s); +#else + size_t blocksize = pool->blocksize; +#endif FAR struct list_node *blk; size_t ninterrupt; size_t ninitial; size_t count; - DEBUGASSERT(pool != NULL); - if (pool->nused != 0) { return -EBUSY; @@ -356,21 +564,26 @@ int mempool_deinit(FAR struct mempool_s *pool) mempool_procfs_unregister(&pool->procfs); #endif - ninitial = pool->initialsize / pool->blocksize; - ninterrupt = pool->interruptsize / pool->blocksize; + if ((pool->blocksize & (pool->blocksize - 1)) == 0) + { + blocksize = ALIGN_UP(blocksize, pool->blocksize); + } + + ninitial = pool->initialsize / blocksize; + ninterrupt = pool->interruptsize / blocksize; count = ninitial + ninterrupt; if (count == 0) { - count = pool->expandsize / pool->blocksize; + count = pool->expandsize / blocksize; } while ((blk = list_remove_head(&pool->elist)) != NULL) { blk = (FAR struct list_node *)((FAR char *)blk - - count * pool->blocksize); + count * blocksize); kasan_unpoison(blk, mm_malloc_size(blk)); mempool_mfree(pool, blk); - count = pool->expandsize / pool->blocksize; + count = pool->expandsize / blocksize; } if (pool->wait && pool->expandsize == 0) diff --git a/mm/mempool/mempool_multiple.c b/mm/mempool/mempool_multiple.c index 35bdc9724d..72eceaca40 100644 --- a/mm/mempool/mempool_multiple.c +++ b/mm/mempool/mempool_multiple.c @@ -361,6 +361,49 @@ FAR void *mempool_multiple_memalign(FAR struct mempool_multiple_s *mpool, return NULL; } +/**************************************************************************** + * Name: mempool_multiple_info_task + ****************************************************************************/ + +void mempool_multiple_info_task(FAR struct mempool_multiple_s *mpool, + FAR struct mempoolinfo_task *info) +{ + size_t i; + + for (i = 0; i < mpool->npools; i++) + { + mempool_info_task(mpool->pools + i, info); + } +} + +/**************************************************************************** + * Name: mempool_multiple_memdump + * + * Description: + * mempool_multiple_memdump returns a memory info about specified pid of + * task/thread. if pid equals -1, this function will dump all allocated + * node and output backtrace for every allocated node for this multiple + * mempool, if pid equals -2, this function will dump all free node for + * this multiple mempool, and if pid is greater than or equal to 0, will + * dump pid allocated node and output backtrace. + * + * Input Parameters: + * mpool - The handle of multiple memory pool to be used. + * pid - The pid of task. + * + ****************************************************************************/ + +void mempool_multiple_memdump(FAR struct mempool_multiple_s *mpool, + pid_t pid) +{ + size_t i; + + for (i = 0; i < mpool->npools; i++) + { + mempool_memdump(mpool->pools + i, pid); + } +} + /**************************************************************************** * Name: mempool_multiple_fixed_alloc * diff --git a/mm/mm_heap/mm_mallinfo.c b/mm/mm_heap/mm_mallinfo.c index 3a7fc5f4c2..616983fb9e 100644 --- a/mm/mm_heap/mm_mallinfo.c +++ b/mm/mm_heap/mm_mallinfo.c @@ -151,6 +151,11 @@ int mm_mallinfo_task(FAR struct mm_heap_s *heap, info->uordblks = 0; info->aordblks = 0; + +#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 + mempool_multiple_info_task(&heap->mm_mpool, info); +#endif + mm_foreach(heap, mallinfo_task_handler, info); return OK; } diff --git a/mm/mm_heap/mm_memdump.c b/mm/mm_heap/mm_memdump.c index 93b8fdb53a..b9fa2342fc 100644 --- a/mm/mm_heap/mm_memdump.c +++ b/mm/mm_heap/mm_memdump.c @@ -147,6 +147,9 @@ void mm_memdump(FAR struct mm_heap_s *heap, pid_t pid) syslog(LOG_INFO, "%12s%*s\n", "Size", MM_PTR_FMT_WIDTH, "Address"); } +#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 + mempool_multiple_memdump(&heap->mm_mpool, pid); +#endif mm_foreach(heap, memdump_handler, &pid); info.pid = pid; diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c index 2bbe14f028..67cffc44e8 100644 --- a/mm/tlsf/mm_tlsf.c +++ b/mm/tlsf/mm_tlsf.c @@ -861,6 +861,11 @@ int mm_mallinfo_task(FAR struct mm_heap_s *heap, DEBUGASSERT(info); info->uordblks = 0; info->aordblks = 0; + +#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 + mempool_multiple_info_task(&heap->mm_mpool, info); +#endif + #if CONFIG_MM_REGIONS > 1 for (region = 0; region < heap->mm_nregions; region++) #endif @@ -913,6 +918,10 @@ void mm_memdump(FAR struct mm_heap_s *heap, pid_t pid) syslog(LOG_INFO, "%12s%*s\n", "Size", MM_PTR_FMT_WIDTH, "Address"); } +#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0 + mempool_multiple_memdump(&heap->mm_mpool, pid); +#endif + #if CONFIG_MM_REGIONS > 1 for (region = 0; region < heap->mm_nregions; region++) #endif