diff --git a/fs/procfs/fs_procfsmeminfo.c b/fs/procfs/fs_procfsmeminfo.c index 5153d6fe1f..c81df55947 100644 --- a/fs/procfs/fs_procfsmeminfo.c +++ b/fs/procfs/fs_procfsmeminfo.c @@ -469,7 +469,7 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer, FAR struct meminfo_file_s *procfile; struct mm_memdump_s dump = { - MM_BACKTRACE_ALLOC_PID, + PID_MM_ALLOC, #if CONFIG_MM_BACKTRACE >= 0 0, ULONG_MAX @@ -540,7 +540,7 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer, switch (buffer[0]) { case 'u': - dump.pid = MM_BACKTRACE_ALLOC_PID; + dump.pid = PID_MM_ALLOC; #if CONFIG_MM_BACKTRACE >= 0 p = (FAR char *)buffer + 4; @@ -549,7 +549,7 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer, break; case 'f': - dump.pid = MM_BACKTRACE_FREE_PID; + dump.pid = PID_MM_FREE; #if CONFIG_MM_BACKTRACE >= 0 p = (FAR char *)buffer + 4; diff --git a/include/malloc.h b/include/malloc.h index 41049c3373..ff6047c922 100644 --- a/include/malloc.h +++ b/include/malloc.h @@ -31,6 +31,13 @@ * Pre-processor Definitions ****************************************************************************/ +/* Special PID to query the info about alloc, free and mempool */ + +#define PID_MM_INVALID ((pid_t)-4) +#define PID_MM_MEMPOOL ((pid_t)-3) +#define PID_MM_FREE ((pid_t)-2) +#define PID_MM_ALLOC ((pid_t)-1) + /* For Linux and MacOS compatibility */ #define malloc_usable_size malloc_size diff --git a/include/nuttx/mm/mm.h b/include/nuttx/mm/mm.h index 004c65bbc2..193888f3f5 100644 --- a/include/nuttx/mm/mm.h +++ b/include/nuttx/mm/mm.h @@ -101,11 +101,6 @@ #define mm_memdump_s malltask -#define MM_BACKTRACE_INVALID_PID ((pid_t)-4) -#define MM_BACKTRACE_MEMPOOL_PID ((pid_t)-3) -#define MM_BACKTRACE_FREE_PID ((pid_t)-2) -#define MM_BACKTRACE_ALLOC_PID ((pid_t)-1) - /**************************************************************************** * Public Types ****************************************************************************/ diff --git a/mm/mempool/mempool.c b/mm/mempool/mempool.c index 862842354d..60994326ba 100644 --- a/mm/mempool/mempool.c +++ b/mm/mempool/mempool.c @@ -401,7 +401,7 @@ mempool_info_task(FAR struct mempool_s *pool, 0, 0 }; - if (task->pid == MM_BACKTRACE_FREE_PID) + if (task->pid == PID_MM_FREE) { size_t count = mempool_queue_lenth(&pool->queue) + mempool_queue_lenth(&pool->iqueue); @@ -410,7 +410,7 @@ mempool_info_task(FAR struct mempool_s *pool, info.uordblks += count * pool->blocksize; } #if CONFIG_MM_BACKTRACE < 0 - else if (task->pid == MM_BACKTRACE_ALLOC_PID) + else if (task->pid == PID_MM_ALLOC) { size_t count = pool->nalloc; @@ -425,8 +425,8 @@ mempool_info_task(FAR struct mempool_s *pool, list_for_every_entry(&pool->alist, buf, struct mempool_backtrace_s, node) { - if (task->pid == buf->pid || task->pid == MM_BACKTRACE_ALLOC_PID || - (task->pid == MM_BACKTRACE_INVALID_PID && + if (task->pid == buf->pid || task->pid == PID_MM_ALLOC || + (task->pid == PID_MM_INVALID && nxsched_get_tcb(buf->pid) == NULL)) { if (buf->seqno >= task->seqmin && buf->seqno <= task->seqmax) @@ -465,7 +465,7 @@ mempool_info_task(FAR struct mempool_s *pool, void mempool_memdump(FAR struct mempool_s *pool, FAR const struct mm_memdump_s *dump) { - if (dump->pid == MM_BACKTRACE_FREE_PID) + if (dump->pid == PID_MM_FREE) { FAR sq_entry_t *entry; @@ -491,9 +491,8 @@ void mempool_memdump(FAR struct mempool_s *pool, list_for_every_entry(&pool->alist, buf, struct mempool_backtrace_s, node) { - if ((buf->pid == dump->pid || - dump->pid == MM_BACKTRACE_ALLOC_PID) && - buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax) + if ((buf->pid == dump->pid || dump->pid == PID_MM_ALLOC) && + buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax) { # if CONFIG_MM_BACKTRACE > 0 int i; diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c index 5055eb3f01..7bc8629216 100644 --- a/mm/mm_heap/mm_initialize.c +++ b/mm/mm_heap/mm_initialize.c @@ -66,7 +66,7 @@ static FAR void *mempool_memalign(FAR void *arg, size_t alignment, { node = (FAR struct mm_allocnode_s *) ((FAR char *)ret - SIZEOF_MM_ALLOCNODE); - node->pid = MM_BACKTRACE_MEMPOOL_PID; + node->pid = PID_MM_MEMPOOL; } return ret; diff --git a/mm/mm_heap/mm_mallinfo.c b/mm/mm_heap/mm_mallinfo.c index 83bb4d7168..539ab75769 100644 --- a/mm/mm_heap/mm_mallinfo.c +++ b/mm/mm_heap/mm_mallinfo.c @@ -96,15 +96,15 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node, { DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE); #if CONFIG_MM_BACKTRACE < 0 - if (handle->task->pid == MM_BACKTRACE_ALLOC_PID) + if (handle->task->pid == PID_MM_ALLOC) { handle->info->aordblks++; handle->info->uordblks += nodesize; } #else - if (handle->task->pid == MM_BACKTRACE_ALLOC_PID || + if (handle->task->pid == PID_MM_ALLOC || handle->task->pid == node->pid || - (handle->task->pid == MM_BACKTRACE_INVALID_PID && + (handle->task->pid == PID_MM_INVALID && nxsched_get_tcb(node->pid) == NULL)) { if (node->seqno >= handle->task->seqmin && @@ -116,7 +116,7 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node, } #endif } - else if (handle->task->pid == MM_BACKTRACE_FREE_PID) + else if (handle->task->pid == PID_MM_FREE) { handle->info->aordblks++; handle->info->uordblks += nodesize; diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c index ea4f4a0f8d..4ab3f50ce9 100644 --- a/mm/mm_heap/mm_malloc.c +++ b/mm/mm_heap/mm_malloc.c @@ -80,7 +80,7 @@ void mm_dump_handler(FAR struct tcb_s *tcb, FAR void *arg) struct mallinfo_task info; struct malltask task; - task.pid = tcb ? tcb->pid : MM_BACKTRACE_INVALID_PID; + task.pid = tcb ? tcb->pid : PID_MM_INVALID; task.seqmin = 0; task.seqmax = ULONG_MAX; info = mm_mallinfo_task(arg, &task); @@ -274,7 +274,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) # ifdef CONFIG_MM_DUMP_DETAILS_ON_FAILURE struct mm_memdump_s dump = { - MM_BACKTRACE_ALLOC_PID, 0, ULONG_MAX + PID_MM_ALLOC, 0, ULONG_MAX }; # endif #endif diff --git a/mm/mm_heap/mm_memdump.c b/mm/mm_heap/mm_memdump.c index 088bf85347..0de72465be 100644 --- a/mm/mm_heap/mm_memdump.c +++ b/mm/mm_heap/mm_memdump.c @@ -59,9 +59,9 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg) { DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE); #if CONFIG_MM_BACKTRACE < 0 - if (dump->pid == MM_BACKTRACE_ALLOC_PID) + if (dump->pid == PID_MM_ALLOC) #else - if ((dump->pid == MM_BACKTRACE_ALLOC_PID || node->pid == dump->pid) && + if ((dump->pid == PID_MM_ALLOC || node->pid == dump->pid) && node->seqno >= dump->seqmin && node->seqno <= dump->seqmax) #endif { @@ -106,7 +106,7 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg) SIZEOF_MM_NODE(fnode->flink) == 0 || SIZEOF_MM_NODE(fnode->flink) >= nodesize); - if (dump->pid <= MM_BACKTRACE_FREE_PID) + if (dump->pid <= PID_MM_FREE) { syslog(LOG_INFO, "%12zu%*p\n", nodesize, MM_PTR_FMT_WIDTH, diff --git a/mm/tlsf/mm_tlsf.c b/mm/tlsf/mm_tlsf.c index 02711b0780..efc7f9bab2 100644 --- a/mm/tlsf/mm_tlsf.c +++ b/mm/tlsf/mm_tlsf.c @@ -240,7 +240,7 @@ static FAR void *mempool_memalign(FAR void *arg, size_t alignment, if (ret) { buf = ret + mm_malloc_size(arg, ret); - buf->pid = MM_BACKTRACE_MEMPOOL_PID; + buf->pid = PID_MM_MEMPOOL; } return ret; @@ -292,15 +292,15 @@ static void mallinfo_task_handler(FAR void *ptr, size_t size, int used, if (used) { #if CONFIG_MM_BACKTRACE < 0 - if (handler->task->pid = MM_BACKTRACE_ALLOC_PID) + if (handler->task->pid = PID_MM_ALLOC) { handler->info->aordblks++; handler->info->uordblks += size; } #else - if (handler->task->pid == MM_BACKTRACE_ALLOC_PID || + if (handler->task->pid == PID_MM_ALLOC || handler->task->pid == buf->pid || - (handler->task->pid == MM_BACKTRACE_INVALID_PID && + (handler->task->pid == PID_MM_INVALID && nxsched_get_tcb(buf->pid) == NULL)) { if (buf->seqno >= handler->task->seqmin && @@ -312,7 +312,7 @@ static void mallinfo_task_handler(FAR void *ptr, size_t size, int used, } #endif } - else if (handler->task->pid == MM_BACKTRACE_FREE_PID) + else if (handler->task->pid == PID_MM_FREE) { handler->info->aordblks++; handler->info->uordblks += size; @@ -417,9 +417,9 @@ static void memdump_handler(FAR void *ptr, size_t size, int used, if (used) { #if CONFIG_MM_BACKTRACE < 0 - if (pid == MM_BACKTRACE_ALLOC_PID) + if (pid == PID_MM_ALLOC) #else - if ((dump->pid == MM_BACKTRACE_ALLOC_PID || + if ((dump->pid == PID_MM_ALLOC || buf->pid == dump->pid) && buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax) @@ -450,7 +450,7 @@ static void memdump_handler(FAR void *ptr, size_t size, int used, #endif } } - else if (dump->pid <= MM_BACKTRACE_FREE_PID) + else if (dump->pid <= PID_MM_FREE) { syslog(LOG_INFO, "%12zu%*p\n", size, MM_PTR_FMT_WIDTH, ptr); } @@ -962,7 +962,7 @@ void mm_memdump(FAR struct mm_heap_s *heap, #endif struct mallinfo_task info; - if (dump->pid >= MM_BACKTRACE_ALLOC_PID) + if (dump->pid >= PID_MM_ALLOC) { syslog(LOG_INFO, "Dump all used memory node info:\n"); #if CONFIG_MM_BACKTRACE < 0