mm: Rename MM_BACKTRACE_XXX_PID to PID_MM_XXX
and move the definition to malloc.h Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
This commit is contained in:
parent
ddbe9eb6ab
commit
f01deff80f
@ -469,7 +469,7 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer,
|
||||
FAR struct meminfo_file_s *procfile;
|
||||
struct mm_memdump_s dump =
|
||||
{
|
||||
MM_BACKTRACE_ALLOC_PID,
|
||||
PID_MM_ALLOC,
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
0,
|
||||
ULONG_MAX
|
||||
@ -540,7 +540,7 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer,
|
||||
switch (buffer[0])
|
||||
{
|
||||
case 'u':
|
||||
dump.pid = MM_BACKTRACE_ALLOC_PID;
|
||||
dump.pid = PID_MM_ALLOC;
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
p = (FAR char *)buffer + 4;
|
||||
@ -549,7 +549,7 @@ static ssize_t memdump_write(FAR struct file *filep, FAR const char *buffer,
|
||||
break;
|
||||
|
||||
case 'f':
|
||||
dump.pid = MM_BACKTRACE_FREE_PID;
|
||||
dump.pid = PID_MM_FREE;
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
p = (FAR char *)buffer + 4;
|
||||
|
@ -31,6 +31,13 @@
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
/* Special PID to query the info about alloc, free and mempool */
|
||||
|
||||
#define PID_MM_INVALID ((pid_t)-4)
|
||||
#define PID_MM_MEMPOOL ((pid_t)-3)
|
||||
#define PID_MM_FREE ((pid_t)-2)
|
||||
#define PID_MM_ALLOC ((pid_t)-1)
|
||||
|
||||
/* For Linux and MacOS compatibility */
|
||||
|
||||
#define malloc_usable_size malloc_size
|
||||
|
@ -101,11 +101,6 @@
|
||||
|
||||
#define mm_memdump_s malltask
|
||||
|
||||
#define MM_BACKTRACE_INVALID_PID ((pid_t)-4)
|
||||
#define MM_BACKTRACE_MEMPOOL_PID ((pid_t)-3)
|
||||
#define MM_BACKTRACE_FREE_PID ((pid_t)-2)
|
||||
#define MM_BACKTRACE_ALLOC_PID ((pid_t)-1)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
****************************************************************************/
|
||||
|
@ -401,7 +401,7 @@ mempool_info_task(FAR struct mempool_s *pool,
|
||||
0, 0
|
||||
};
|
||||
|
||||
if (task->pid == MM_BACKTRACE_FREE_PID)
|
||||
if (task->pid == PID_MM_FREE)
|
||||
{
|
||||
size_t count = mempool_queue_lenth(&pool->queue) +
|
||||
mempool_queue_lenth(&pool->iqueue);
|
||||
@ -410,7 +410,7 @@ mempool_info_task(FAR struct mempool_s *pool,
|
||||
info.uordblks += count * pool->blocksize;
|
||||
}
|
||||
#if CONFIG_MM_BACKTRACE < 0
|
||||
else if (task->pid == MM_BACKTRACE_ALLOC_PID)
|
||||
else if (task->pid == PID_MM_ALLOC)
|
||||
{
|
||||
size_t count = pool->nalloc;
|
||||
|
||||
@ -425,8 +425,8 @@ mempool_info_task(FAR struct mempool_s *pool,
|
||||
list_for_every_entry(&pool->alist, buf, struct mempool_backtrace_s,
|
||||
node)
|
||||
{
|
||||
if (task->pid == buf->pid || task->pid == MM_BACKTRACE_ALLOC_PID ||
|
||||
(task->pid == MM_BACKTRACE_INVALID_PID &&
|
||||
if (task->pid == buf->pid || task->pid == PID_MM_ALLOC ||
|
||||
(task->pid == PID_MM_INVALID &&
|
||||
nxsched_get_tcb(buf->pid) == NULL))
|
||||
{
|
||||
if (buf->seqno >= task->seqmin && buf->seqno <= task->seqmax)
|
||||
@ -465,7 +465,7 @@ mempool_info_task(FAR struct mempool_s *pool,
|
||||
void mempool_memdump(FAR struct mempool_s *pool,
|
||||
FAR const struct mm_memdump_s *dump)
|
||||
{
|
||||
if (dump->pid == MM_BACKTRACE_FREE_PID)
|
||||
if (dump->pid == PID_MM_FREE)
|
||||
{
|
||||
FAR sq_entry_t *entry;
|
||||
|
||||
@ -491,9 +491,8 @@ void mempool_memdump(FAR struct mempool_s *pool,
|
||||
list_for_every_entry(&pool->alist, buf, struct mempool_backtrace_s,
|
||||
node)
|
||||
{
|
||||
if ((buf->pid == dump->pid ||
|
||||
dump->pid == MM_BACKTRACE_ALLOC_PID) &&
|
||||
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
|
||||
if ((buf->pid == dump->pid || dump->pid == PID_MM_ALLOC) &&
|
||||
buf->seqno >= dump->seqmin && buf->seqno <= dump->seqmax)
|
||||
{
|
||||
# if CONFIG_MM_BACKTRACE > 0
|
||||
int i;
|
||||
|
@ -66,7 +66,7 @@ static FAR void *mempool_memalign(FAR void *arg, size_t alignment,
|
||||
{
|
||||
node = (FAR struct mm_allocnode_s *)
|
||||
((FAR char *)ret - SIZEOF_MM_ALLOCNODE);
|
||||
node->pid = MM_BACKTRACE_MEMPOOL_PID;
|
||||
node->pid = PID_MM_MEMPOOL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -96,15 +96,15 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node,
|
||||
{
|
||||
DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE);
|
||||
#if CONFIG_MM_BACKTRACE < 0
|
||||
if (handle->task->pid == MM_BACKTRACE_ALLOC_PID)
|
||||
if (handle->task->pid == PID_MM_ALLOC)
|
||||
{
|
||||
handle->info->aordblks++;
|
||||
handle->info->uordblks += nodesize;
|
||||
}
|
||||
#else
|
||||
if (handle->task->pid == MM_BACKTRACE_ALLOC_PID ||
|
||||
if (handle->task->pid == PID_MM_ALLOC ||
|
||||
handle->task->pid == node->pid ||
|
||||
(handle->task->pid == MM_BACKTRACE_INVALID_PID &&
|
||||
(handle->task->pid == PID_MM_INVALID &&
|
||||
nxsched_get_tcb(node->pid) == NULL))
|
||||
{
|
||||
if (node->seqno >= handle->task->seqmin &&
|
||||
@ -116,7 +116,7 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node,
|
||||
}
|
||||
#endif
|
||||
}
|
||||
else if (handle->task->pid == MM_BACKTRACE_FREE_PID)
|
||||
else if (handle->task->pid == PID_MM_FREE)
|
||||
{
|
||||
handle->info->aordblks++;
|
||||
handle->info->uordblks += nodesize;
|
||||
|
@ -80,7 +80,7 @@ void mm_dump_handler(FAR struct tcb_s *tcb, FAR void *arg)
|
||||
struct mallinfo_task info;
|
||||
struct malltask task;
|
||||
|
||||
task.pid = tcb ? tcb->pid : MM_BACKTRACE_INVALID_PID;
|
||||
task.pid = tcb ? tcb->pid : PID_MM_INVALID;
|
||||
task.seqmin = 0;
|
||||
task.seqmax = ULONG_MAX;
|
||||
info = mm_mallinfo_task(arg, &task);
|
||||
@ -274,7 +274,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
# ifdef CONFIG_MM_DUMP_DETAILS_ON_FAILURE
|
||||
struct mm_memdump_s dump =
|
||||
{
|
||||
MM_BACKTRACE_ALLOC_PID, 0, ULONG_MAX
|
||||
PID_MM_ALLOC, 0, ULONG_MAX
|
||||
};
|
||||
# endif
|
||||
#endif
|
||||
|
@ -59,9 +59,9 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg)
|
||||
{
|
||||
DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE);
|
||||
#if CONFIG_MM_BACKTRACE < 0
|
||||
if (dump->pid == MM_BACKTRACE_ALLOC_PID)
|
||||
if (dump->pid == PID_MM_ALLOC)
|
||||
#else
|
||||
if ((dump->pid == MM_BACKTRACE_ALLOC_PID || node->pid == dump->pid) &&
|
||||
if ((dump->pid == PID_MM_ALLOC || node->pid == dump->pid) &&
|
||||
node->seqno >= dump->seqmin && node->seqno <= dump->seqmax)
|
||||
#endif
|
||||
{
|
||||
@ -106,7 +106,7 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg)
|
||||
SIZEOF_MM_NODE(fnode->flink) == 0 ||
|
||||
SIZEOF_MM_NODE(fnode->flink) >= nodesize);
|
||||
|
||||
if (dump->pid <= MM_BACKTRACE_FREE_PID)
|
||||
if (dump->pid <= PID_MM_FREE)
|
||||
{
|
||||
syslog(LOG_INFO, "%12zu%*p\n",
|
||||
nodesize, MM_PTR_FMT_WIDTH,
|
||||
|
@ -240,7 +240,7 @@ static FAR void *mempool_memalign(FAR void *arg, size_t alignment,
|
||||
if (ret)
|
||||
{
|
||||
buf = ret + mm_malloc_size(arg, ret);
|
||||
buf->pid = MM_BACKTRACE_MEMPOOL_PID;
|
||||
buf->pid = PID_MM_MEMPOOL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -292,15 +292,15 @@ static void mallinfo_task_handler(FAR void *ptr, size_t size, int used,
|
||||
if (used)
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE < 0
|
||||
if (handler->task->pid = MM_BACKTRACE_ALLOC_PID)
|
||||
if (handler->task->pid = PID_MM_ALLOC)
|
||||
{
|
||||
handler->info->aordblks++;
|
||||
handler->info->uordblks += size;
|
||||
}
|
||||
#else
|
||||
if (handler->task->pid == MM_BACKTRACE_ALLOC_PID ||
|
||||
if (handler->task->pid == PID_MM_ALLOC ||
|
||||
handler->task->pid == buf->pid ||
|
||||
(handler->task->pid == MM_BACKTRACE_INVALID_PID &&
|
||||
(handler->task->pid == PID_MM_INVALID &&
|
||||
nxsched_get_tcb(buf->pid) == NULL))
|
||||
{
|
||||
if (buf->seqno >= handler->task->seqmin &&
|
||||
@ -312,7 +312,7 @@ static void mallinfo_task_handler(FAR void *ptr, size_t size, int used,
|
||||
}
|
||||
#endif
|
||||
}
|
||||
else if (handler->task->pid == MM_BACKTRACE_FREE_PID)
|
||||
else if (handler->task->pid == PID_MM_FREE)
|
||||
{
|
||||
handler->info->aordblks++;
|
||||
handler->info->uordblks += size;
|
||||
@ -417,9 +417,9 @@ static void memdump_handler(FAR void *ptr, size_t size, int used,
|
||||
if (used)
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE < 0
|
||||
if (pid == MM_BACKTRACE_ALLOC_PID)
|
||||
if (pid == PID_MM_ALLOC)
|
||||
#else
|
||||
if ((dump->pid == MM_BACKTRACE_ALLOC_PID ||
|
||||
if ((dump->pid == PID_MM_ALLOC ||
|
||||
buf->pid == dump->pid) &&
|
||||
buf->seqno >= dump->seqmin &&
|
||||
buf->seqno <= dump->seqmax)
|
||||
@ -450,7 +450,7 @@ static void memdump_handler(FAR void *ptr, size_t size, int used,
|
||||
#endif
|
||||
}
|
||||
}
|
||||
else if (dump->pid <= MM_BACKTRACE_FREE_PID)
|
||||
else if (dump->pid <= PID_MM_FREE)
|
||||
{
|
||||
syslog(LOG_INFO, "%12zu%*p\n", size, MM_PTR_FMT_WIDTH, ptr);
|
||||
}
|
||||
@ -962,7 +962,7 @@ void mm_memdump(FAR struct mm_heap_s *heap,
|
||||
#endif
|
||||
struct mallinfo_task info;
|
||||
|
||||
if (dump->pid >= MM_BACKTRACE_ALLOC_PID)
|
||||
if (dump->pid >= PID_MM_ALLOC)
|
||||
{
|
||||
syslog(LOG_INFO, "Dump all used memory node info:\n");
|
||||
#if CONFIG_MM_BACKTRACE < 0
|
||||
|
Loading…
Reference in New Issue
Block a user