note: add memory tracing event support
Record all memory allocation and release, save to ram, used to analyze memory allocation rate and memory usage Its absolute value is not trustworthy because the memory will be allocated in thread A and released in thread B netinit-5 [0] 0.105984392: tracing_mark_write: C|5|Heap Usage|96|free: heap: 0x606000000020 size:24, address: 0x603000000370 netinit-5 [0] 0.105996874: tracing_mark_write: C|5|Heap Usage|24|free: heap: 0x606000000020 size:72, address: 0x6070000008e0 nsh_main-4 [0] 3.825169408: tracing_mark_write: C|4|Heap Usage|2177665|free: heap: 0x606000000020 size:424, address: 0x614000000840 nsh_main-4 [0] 3.825228525: tracing_mark_write: C|4|Heap Usage|14977|free: heap: 0x606000000020 size:2162688, address: 0x7f80a639f800 nsh_main-4 [0] 3.825298789: tracing_mark_write: C|4|Heap Usage|15189|malloc: heap: 0x606000000020 size:20, address: 0x6030000003a0 Signed-off-by: yinshengkai <yinshengkai@xiaomi.com> Signed-off-by: Neo Xu <neo.xu1990@gmail.com>
This commit is contained in:
parent
803489b546
commit
2c0e2ac36b
@ -33,6 +33,7 @@
|
||||
#include <nuttx/atomic.h>
|
||||
#include <nuttx/fs/procfs.h>
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "sim_internal.h"
|
||||
|
||||
@ -185,6 +186,7 @@ static void mm_delayfree(struct mm_heap_s *heap, void *mem, bool delay)
|
||||
int size = host_mallocsize(mem);
|
||||
atomic_fetch_sub(&heap->aordblks, 1);
|
||||
atomic_fetch_sub(&heap->uordblks, size);
|
||||
sched_note_heap(false, heap, mem, size);
|
||||
host_free(mem);
|
||||
}
|
||||
}
|
||||
@ -364,6 +366,7 @@ void *mm_realloc(struct mm_heap_s *heap, void *oldmem,
|
||||
int uordblks;
|
||||
int usmblks;
|
||||
int newsize;
|
||||
int oldsize;
|
||||
|
||||
free_delaylist(heap, false);
|
||||
|
||||
@ -373,13 +376,23 @@ void *mm_realloc(struct mm_heap_s *heap, void *oldmem,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_fetch_sub(&heap->uordblks, host_mallocsize(oldmem));
|
||||
oldsize = host_mallocsize(oldmem);
|
||||
atomic_fetch_sub(&heap->uordblks, oldsize);
|
||||
mem = host_realloc(oldmem, size);
|
||||
|
||||
atomic_fetch_add(&heap->aordblks, oldmem == NULL && mem != NULL);
|
||||
newsize = host_mallocsize(mem ? mem : oldmem);
|
||||
atomic_fetch_add(&heap->uordblks, newsize);
|
||||
usmblks = atomic_load(&heap->usmblks);
|
||||
if (mem != NULL)
|
||||
{
|
||||
if (oldmem != NULL)
|
||||
{
|
||||
sched_note_heap(false, heap, oldmem, oldsize);
|
||||
}
|
||||
|
||||
sched_note_heap(true, heap, mem, newsize);
|
||||
}
|
||||
|
||||
do
|
||||
{
|
||||
@ -470,6 +483,7 @@ void *mm_memalign(struct mm_heap_s *heap, size_t alignment, size_t size)
|
||||
}
|
||||
|
||||
size = host_mallocsize(mem);
|
||||
sched_note_heap(true, heap, mem, size);
|
||||
atomic_fetch_add(&heap->aordblks, 1);
|
||||
atomic_fetch_add(&heap->uordblks, size);
|
||||
usmblks = atomic_load(&heap->usmblks);
|
||||
|
@ -92,6 +92,8 @@
|
||||
#define note_irqhandler(drv, irq, handler, enter) \
|
||||
((drv)->ops->irqhandler && \
|
||||
((drv)->ops->irqhandler(drv, irq, handler, enter), true))
|
||||
#define note_heap(drv, alloc, data, mem, size) \
|
||||
((drv)->ops->heap && ((drv)->ops->heap(drv, alloc, data, mem, size), true))
|
||||
#define note_string(drv, ip, buf) \
|
||||
((drv)->ops->string && ((drv)->ops->string(drv, ip, buf), true))
|
||||
#define note_event(drv, ip, event, buf, len) \
|
||||
@ -1352,6 +1354,50 @@ void sched_note_irqhandler(int irq, FAR void *handler, bool enter)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_HEAP
|
||||
void sched_note_heap(bool alloc, FAR void *heap, FAR void *mem, size_t size)
|
||||
{
|
||||
FAR struct note_driver_s **driver;
|
||||
struct note_heap_s note;
|
||||
bool formatted = false;
|
||||
FAR struct tcb_s *tcb = this_task();
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_FILTER
|
||||
if (!note_isenabled())
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (driver = g_note_drivers; *driver; driver++)
|
||||
{
|
||||
if (note_heap(*driver, alloc, heap, mem, size))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((*driver)->ops->add == NULL)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!formatted)
|
||||
{
|
||||
enum note_type_e type = alloc ? NOTE_ALLOC : NOTE_FREE;
|
||||
formatted = true;
|
||||
note_common(tcb, ¬e.nmm_cmn, sizeof(note), type);
|
||||
note.heap = heap;
|
||||
note.mem = mem;
|
||||
note.size = size;
|
||||
}
|
||||
|
||||
/* Add the note to circular buffer */
|
||||
|
||||
note_add(*driver, ¬e, sizeof(note));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_DUMP
|
||||
void sched_note_string_ip(uint32_t tag, uintptr_t ip, FAR const char *buf)
|
||||
{
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include <nuttx/spinlock.h>
|
||||
#include <nuttx/sched.h>
|
||||
@ -97,9 +98,17 @@ struct noteram_dump_cpu_context_s
|
||||
uint8_t next_priority; /* Task Priority of the next line */
|
||||
};
|
||||
|
||||
struct noteram_dump_task_context_s
|
||||
{
|
||||
FAR struct noteram_dump_task_context_s *next;
|
||||
pid_t pid;
|
||||
size_t mm_used;
|
||||
};
|
||||
|
||||
struct noteram_dump_context_s
|
||||
{
|
||||
struct noteram_dump_cpu_context_s cpu[NCPUS];
|
||||
struct noteram_dump_task_context_s *task;
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
@ -419,6 +428,14 @@ static int noteram_open(FAR struct file *filep)
|
||||
int noteram_close(FAR struct file *filep)
|
||||
{
|
||||
FAR struct noteram_dump_context_s *ctx = filep->f_priv;
|
||||
|
||||
while (ctx->task != NULL)
|
||||
{
|
||||
FAR struct noteram_dump_task_context_s *task = ctx->task;
|
||||
ctx->task = task->next;
|
||||
kmm_free(task);
|
||||
}
|
||||
|
||||
kmm_free(ctx);
|
||||
return OK;
|
||||
}
|
||||
@ -620,6 +637,59 @@ static void noteram_dump_init_context(FAR struct noteram_dump_context_s *ctx)
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: noteram_dump_find_task_context
|
||||
****************************************************************************/
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_HEAP
|
||||
static FAR struct noteram_dump_task_context_s *
|
||||
noteram_dump_find_task_context(FAR struct noteram_dump_context_s *ctx,
|
||||
pid_t pid)
|
||||
{
|
||||
FAR struct noteram_dump_task_context_s *task;
|
||||
FAR struct noteram_dump_task_context_s *prev;
|
||||
|
||||
if (ctx->task == NULL)
|
||||
{
|
||||
ctx->task = kmm_zalloc(sizeof(*ctx->task));
|
||||
if (ctx->task == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx->task->pid = pid;
|
||||
ctx->task->next = NULL;
|
||||
return ctx->task;
|
||||
}
|
||||
else
|
||||
{
|
||||
task = ctx->task;
|
||||
}
|
||||
|
||||
while (task != NULL)
|
||||
{
|
||||
if (task->pid == pid)
|
||||
{
|
||||
return task;
|
||||
}
|
||||
|
||||
prev = task;
|
||||
task = task->next;
|
||||
}
|
||||
|
||||
prev->next = kmm_zalloc(sizeof(*prev));
|
||||
if (prev->next == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
task = prev->next;
|
||||
task->pid = pid;
|
||||
task->next = NULL;
|
||||
return task;
|
||||
}
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Name: get_task_name
|
||||
****************************************************************************/
|
||||
@ -1015,7 +1085,34 @@ static int noteram_dump_one(FAR uint8_t *p, FAR struct lib_outstream_s *s,
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_HEAP
|
||||
case NOTE_ALLOC:
|
||||
case NOTE_FREE:
|
||||
{
|
||||
FAR struct note_heap_s *nmm = (FAR struct note_heap_s *)p;
|
||||
FAR struct noteram_dump_task_context_s *tctx;
|
||||
int used = 0;
|
||||
FAR const char *name[] =
|
||||
{
|
||||
"malloc", "free"
|
||||
};
|
||||
|
||||
tctx = noteram_dump_find_task_context(ctx, pid);
|
||||
if (tctx != NULL)
|
||||
{
|
||||
tctx->mm_used += note->nc_type == NOTE_FREE ?
|
||||
-nmm->size : nmm->size;
|
||||
used = tctx->mm_used;
|
||||
}
|
||||
|
||||
ret += noteram_dump_header(s, &nmm->nmm_cmn, ctx);
|
||||
ret += lib_sprintf(s, "tracing_mark_write: C|%d|Heap Usage|%d|%s"
|
||||
": heap: %p size:%" PRIiPTR ", address: %p\n",
|
||||
pid, used, name[note->nc_type - NOTE_ALLOC],
|
||||
nmm->heap, nmm->size, nmm->mem);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -89,6 +89,10 @@ struct note_driver_ops_s
|
||||
CODE void (*irqhandler)(FAR struct note_driver_s *drv, int irq,
|
||||
FAR void *handler, bool enter);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_HEAP
|
||||
CODE void (*heap)(FAR struct note_driver_s *drv, bool alloc,
|
||||
FAR void *heap, FAR void *mem, size_t size);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_DUMP
|
||||
CODE void (*string)(FAR struct note_driver_s *drv, uintptr_t ip,
|
||||
FAR const char *buf);
|
||||
|
@ -164,34 +164,40 @@
|
||||
|
||||
enum note_type_e
|
||||
{
|
||||
NOTE_START = 0,
|
||||
NOTE_STOP = 1,
|
||||
NOTE_SUSPEND = 2,
|
||||
NOTE_RESUME = 3,
|
||||
NOTE_CPU_START = 4,
|
||||
NOTE_CPU_STARTED = 5,
|
||||
NOTE_CPU_PAUSE = 6,
|
||||
NOTE_CPU_PAUSED = 7,
|
||||
NOTE_CPU_RESUME = 8,
|
||||
NOTE_CPU_RESUMED = 9,
|
||||
NOTE_PREEMPT_LOCK = 10,
|
||||
NOTE_PREEMPT_UNLOCK = 11,
|
||||
NOTE_CSECTION_ENTER = 12,
|
||||
NOTE_CSECTION_LEAVE = 13,
|
||||
NOTE_SPINLOCK_LOCK = 14,
|
||||
NOTE_SPINLOCK_LOCKED = 15,
|
||||
NOTE_SPINLOCK_UNLOCK = 16,
|
||||
NOTE_SPINLOCK_ABORT = 17,
|
||||
NOTE_SYSCALL_ENTER = 18,
|
||||
NOTE_SYSCALL_LEAVE = 19,
|
||||
NOTE_IRQ_ENTER = 20,
|
||||
NOTE_IRQ_LEAVE = 21,
|
||||
NOTE_DUMP_STRING = 22,
|
||||
NOTE_DUMP_BINARY = 23,
|
||||
NOTE_DUMP_BEGIN = 24,
|
||||
NOTE_DUMP_END = 25,
|
||||
NOTE_DUMP_MARK = 28,
|
||||
NOTE_DUMP_COUNTER = 29,
|
||||
NOTE_START,
|
||||
NOTE_STOP,
|
||||
NOTE_SUSPEND,
|
||||
NOTE_RESUME,
|
||||
NOTE_CPU_START,
|
||||
NOTE_CPU_STARTED,
|
||||
NOTE_CPU_PAUSE,
|
||||
NOTE_CPU_PAUSED,
|
||||
NOTE_CPU_RESUME,
|
||||
NOTE_CPU_RESUMED,
|
||||
NOTE_PREEMPT_LOCK,
|
||||
NOTE_PREEMPT_UNLOCK,
|
||||
NOTE_CSECTION_ENTER,
|
||||
NOTE_CSECTION_LEAVE,
|
||||
NOTE_SPINLOCK_LOCK,
|
||||
NOTE_SPINLOCK_LOCKED,
|
||||
NOTE_SPINLOCK_UNLOCK,
|
||||
NOTE_SPINLOCK_ABORT,
|
||||
NOTE_SYSCALL_ENTER,
|
||||
NOTE_SYSCALL_LEAVE,
|
||||
NOTE_IRQ_ENTER,
|
||||
NOTE_IRQ_LEAVE,
|
||||
NOTE_ALLOC,
|
||||
NOTE_FREE,
|
||||
NOTE_REALLOC,
|
||||
NOTE_DUMP_STRING,
|
||||
NOTE_DUMP_BINARY,
|
||||
NOTE_DUMP_BEGIN,
|
||||
NOTE_DUMP_END,
|
||||
NOTE_DUMP_MARK,
|
||||
NOTE_DUMP_COUNTER,
|
||||
|
||||
/* Always last */
|
||||
|
||||
NOTE_TYPE_LAST
|
||||
};
|
||||
|
||||
@ -397,6 +403,14 @@ struct note_binary_s
|
||||
#define SIZEOF_NOTE_BINARY(n) (sizeof(struct note_binary_s) + \
|
||||
((n) - 1) * sizeof(uint8_t))
|
||||
|
||||
struct note_heap_s
|
||||
{
|
||||
struct note_common_s nmm_cmn; /* Common note parameters */
|
||||
FAR void *heap;
|
||||
FAR void *mem;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct note_counter_s
|
||||
{
|
||||
long int value;
|
||||
@ -539,6 +553,12 @@ void sched_note_irqhandler(int irq, FAR void *handler, bool enter);
|
||||
# define sched_note_irqhandler(i,h,e)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_HEAP
|
||||
void sched_note_heap(bool alloc, FAR void *heap, FAR void *mem, size_t size);
|
||||
#else
|
||||
# define sched_note_heap(a,h,m,s)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_DUMP
|
||||
void sched_note_string_ip(uint32_t tag, uintptr_t ip, FAR const char *buf);
|
||||
void sched_note_event_ip(uint32_t tag, uintptr_t ip, uint8_t event,
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <nuttx/sched.h>
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/kasan.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
@ -99,11 +100,12 @@ void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem, bool delay)
|
||||
return;
|
||||
}
|
||||
|
||||
nodesize = mm_malloc_size(heap, mem);
|
||||
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
||||
memset(mem, MM_FREE_MAGIC, mm_malloc_size(heap, mem));
|
||||
memset(mem, MM_FREE_MAGIC, nodesize);
|
||||
#endif
|
||||
|
||||
kasan_poison(mem, mm_malloc_size(heap, mem));
|
||||
kasan_poison(mem, nodesize);
|
||||
|
||||
if (delay)
|
||||
{
|
||||
@ -126,6 +128,7 @@ void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem, bool delay)
|
||||
/* Update heap statistics */
|
||||
|
||||
heap->mm_curused -= nodesize;
|
||||
sched_note_heap(false, heap, mem, nodesize);
|
||||
|
||||
/* Check if the following node is free and, if so, merge it */
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/kasan.h>
|
||||
#include <nuttx/sched.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
@ -306,7 +307,8 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
|
||||
/* Update heap statistics */
|
||||
|
||||
heap->mm_curused += MM_SIZEOF_NODE(node);
|
||||
nodesize = MM_SIZEOF_NODE(node);
|
||||
heap->mm_curused += nodesize;
|
||||
if (heap->mm_curused > heap->mm_maxused)
|
||||
{
|
||||
heap->mm_maxused = heap->mm_curused;
|
||||
@ -324,7 +326,8 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
if (ret)
|
||||
{
|
||||
MM_ADD_BACKTRACE(heap, node);
|
||||
ret = kasan_unpoison(ret, mm_malloc_size(heap, ret));
|
||||
ret = kasan_unpoison(ret, nodesize - MM_ALLOCNODE_OVERHEAD);
|
||||
sched_note_heap(true, heap, ret, nodesize);
|
||||
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
||||
memset(ret, MM_ALLOC_MAGIC, alignsize - MM_ALLOCNODE_OVERHEAD);
|
||||
#endif
|
||||
|
@ -30,6 +30,7 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/kasan.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
@ -267,7 +268,8 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
|
||||
/* Update heap statistics */
|
||||
|
||||
heap->mm_curused += MM_SIZEOF_NODE(node);
|
||||
size = MM_SIZEOF_NODE(node);
|
||||
heap->mm_curused += size;
|
||||
if (heap->mm_curused > heap->mm_maxused)
|
||||
{
|
||||
heap->mm_maxused = heap->mm_curused;
|
||||
@ -277,11 +279,9 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
|
||||
MM_ADD_BACKTRACE(heap, node);
|
||||
|
||||
alignedchunk = (uintptr_t)kasan_unpoison
|
||||
((FAR const void *)alignedchunk,
|
||||
mm_malloc_size(heap,
|
||||
(FAR void *)alignedchunk));
|
||||
|
||||
alignedchunk = (uintptr_t)kasan_unpoison((FAR const void *)alignedchunk,
|
||||
size - MM_ALLOCNODE_OVERHEAD);
|
||||
sched_note_heap(true, heap, (FAR void *)alignedchunk, size);
|
||||
DEBUGASSERT(alignedchunk % alignment == 0);
|
||||
return (FAR void *)alignedchunk;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/kasan.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
@ -382,10 +383,13 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
||||
heap->mm_maxused = heap->mm_curused;
|
||||
}
|
||||
|
||||
sched_note_heap(false, heap, oldmem, oldsize);
|
||||
sched_note_heap(true, heap, newmem, newsize);
|
||||
mm_unlock(heap);
|
||||
MM_ADD_BACKTRACE(heap, (FAR char *)newmem - MM_SIZEOF_ALLOCNODE);
|
||||
|
||||
newmem = kasan_unpoison(newmem, mm_malloc_size(heap, newmem));
|
||||
newmem = kasan_unpoison(newmem, MM_SIZEOF_NODE(oldnode) -
|
||||
MM_ALLOCNODE_OVERHEAD);
|
||||
if (kasan_reset_tag(newmem) != kasan_reset_tag(oldmem))
|
||||
{
|
||||
/* Now we have to move the user contents 'down' in memory. memcpy
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <nuttx/mm/mm.h>
|
||||
#include <nuttx/mm/kasan.h>
|
||||
#include <nuttx/mm/mempool.h>
|
||||
#include <nuttx/sched_note.h>
|
||||
|
||||
#include "tlsf/tlsf.h"
|
||||
|
||||
@ -492,15 +493,16 @@ static void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem,
|
||||
{
|
||||
if (mm_lock(heap) == 0)
|
||||
{
|
||||
size_t size = mm_malloc_size(heap, mem);
|
||||
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
||||
memset(mem, MM_FREE_MAGIC, mm_malloc_size(heap, mem));
|
||||
memset(mem, MM_FREE_MAGIC, size);
|
||||
#endif
|
||||
|
||||
kasan_poison(mem, mm_malloc_size(heap, mem));
|
||||
kasan_poison(mem, size);
|
||||
|
||||
/* Update heap statistics */
|
||||
|
||||
heap->mm_curused -= mm_malloc_size(heap, mem);
|
||||
heap->mm_curused -= size;
|
||||
|
||||
/* Pass, return to the tlsf pool */
|
||||
|
||||
@ -510,6 +512,7 @@ static void mm_delayfree(FAR struct mm_heap_s *heap, FAR void *mem,
|
||||
}
|
||||
else
|
||||
{
|
||||
sched_note_heap(false, heap, mem, size);
|
||||
tlsf_free(heap->mm_tlsf, mem);
|
||||
}
|
||||
|
||||
@ -1134,6 +1137,7 @@ size_t mm_malloc_size(FAR struct mm_heap_s *heap, FAR void *mem)
|
||||
|
||||
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
{
|
||||
size_t nodesize;
|
||||
FAR void *ret;
|
||||
|
||||
/* In case of zero-length allocations allocate the minimum size object */
|
||||
@ -1168,7 +1172,8 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
ret = tlsf_malloc(heap->mm_tlsf, size);
|
||||
#endif
|
||||
|
||||
heap->mm_curused += mm_malloc_size(heap, ret);
|
||||
nodesize = mm_malloc_size(heap, ret);
|
||||
heap->mm_curused += nodesize;
|
||||
if (heap->mm_curused > heap->mm_maxused)
|
||||
{
|
||||
heap->mm_maxused = heap->mm_curused;
|
||||
@ -1179,11 +1184,13 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
if (ret)
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
FAR struct memdump_backtrace_s *buf = ret + mm_malloc_size(heap, ret);
|
||||
FAR struct memdump_backtrace_s *buf = ret + nodesize;
|
||||
|
||||
memdump_backtrace(heap, buf);
|
||||
#endif
|
||||
ret = kasan_unpoison(ret, mm_malloc_size(heap, ret));
|
||||
|
||||
ret = kasan_unpoison(ret, nodesize);
|
||||
sched_note_heap(true, heap, ret, nodesize);
|
||||
|
||||
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
||||
memset(ret, 0xaa, nodesize);
|
||||
@ -1218,6 +1225,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
size_t size)
|
||||
{
|
||||
size_t nodesize;
|
||||
FAR void *ret;
|
||||
|
||||
#ifdef CONFIG_MM_HEAP_MEMPOOL
|
||||
@ -1245,7 +1253,8 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
ret = tlsf_memalign(heap->mm_tlsf, alignment, size);
|
||||
#endif
|
||||
|
||||
heap->mm_curused += mm_malloc_size(heap, ret);
|
||||
nodesize = mm_malloc_size(heap, ret);
|
||||
heap->mm_curused += nodesize;
|
||||
if (heap->mm_curused > heap->mm_maxused)
|
||||
{
|
||||
heap->mm_maxused = heap->mm_curused;
|
||||
@ -1256,11 +1265,12 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
|
||||
if (ret)
|
||||
{
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
FAR struct memdump_backtrace_s *buf = ret + mm_malloc_size(heap, ret);
|
||||
FAR struct memdump_backtrace_s *buf = ret + nodesize;
|
||||
|
||||
memdump_backtrace(heap, buf);
|
||||
#endif
|
||||
ret = kasan_unpoison(ret, mm_malloc_size(heap, ret));
|
||||
ret = kasan_unpoison(ret, nodesize);
|
||||
sched_note_heap(true, heap, ret, nodesize);
|
||||
}
|
||||
|
||||
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
||||
@ -1302,6 +1312,8 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
||||
size_t size)
|
||||
{
|
||||
FAR void *newmem;
|
||||
size_t oldsize;
|
||||
size_t newsize;
|
||||
|
||||
/* If oldmem is NULL, then realloc is equivalent to malloc */
|
||||
|
||||
@ -1361,7 +1373,8 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
||||
/* Allocate from the tlsf pool */
|
||||
|
||||
DEBUGVERIFY(mm_lock(heap));
|
||||
heap->mm_curused -= mm_malloc_size(heap, oldmem);
|
||||
oldsize = mm_malloc_size(heap, oldmem);
|
||||
heap->mm_curused -= oldsize;
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
newmem = tlsf_realloc(heap->mm_tlsf, oldmem, size +
|
||||
sizeof(struct memdump_backtrace_s));
|
||||
@ -1369,7 +1382,8 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
||||
newmem = tlsf_realloc(heap->mm_tlsf, oldmem, size);
|
||||
#endif
|
||||
|
||||
heap->mm_curused += mm_malloc_size(heap, newmem ? newmem : oldmem);
|
||||
newsize = mm_malloc_size(heap, newmem);
|
||||
heap->mm_curused += newmem ? newsize : oldsize;
|
||||
if (heap->mm_curused > heap->mm_maxused)
|
||||
{
|
||||
heap->mm_maxused = heap->mm_curused;
|
||||
@ -1377,20 +1391,21 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
|
||||
|
||||
mm_unlock(heap);
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
if (newmem)
|
||||
{
|
||||
FAR struct memdump_backtrace_s *buf =
|
||||
newmem + mm_malloc_size(heap, newmem);
|
||||
|
||||
#if CONFIG_MM_BACKTRACE >= 0
|
||||
FAR struct memdump_backtrace_s *buf = newmem + newsize;
|
||||
memdump_backtrace(heap, buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
sched_note_heap(false, heap, oldmem, oldsize);
|
||||
sched_note_heap(true, heap, newmem, newsize);
|
||||
}
|
||||
|
||||
#if CONFIG_MM_FREE_DELAYCOUNT_MAX > 0
|
||||
/* Try again after free delay list */
|
||||
|
||||
if (newmem == NULL && free_delaylist(heap, true))
|
||||
else if (free_delaylist(heap, true))
|
||||
{
|
||||
return mm_realloc(heap, oldmem, size);
|
||||
}
|
||||
|
@ -1284,6 +1284,14 @@ config SCHED_INSTRUMENTATION_IRQHANDLER
|
||||
|
||||
void sched_note_irqhandler(int irq, FAR void *handler, bool enter);
|
||||
|
||||
config SCHED_INSTRUMENTATION_HEAP
|
||||
bool "Heap monitor hooks"
|
||||
default n
|
||||
---help---
|
||||
Enables additional hooks for heap allocation.
|
||||
|
||||
void sched_note_heap(bool alloc, FAR void* heap, FAR void *mem, size_t size)
|
||||
|
||||
config SCHED_INSTRUMENTATION_DUMP
|
||||
bool "Use note dump for instrumentation"
|
||||
default n
|
||||
|
Loading…
Reference in New Issue
Block a user