diff --git a/mm/mm_heap/mm.h b/mm/mm_heap/mm.h index 30e3c55ec0..990be24f3d 100644 --- a/mm/mm_heap/mm.h +++ b/mm/mm_heap/mm.h @@ -136,18 +136,18 @@ /* What is the size of the allocnode? */ -#define SIZEOF_MM_ALLOCNODE sizeof(struct mm_allocnode_s) +#define MM_SIZEOF_ALLOCNODE sizeof(struct mm_allocnode_s) /* What is the overhead of the allocnode * Remove the space of preceding field since it locates at the end of the * previous freenode */ -#define OVERHEAD_MM_ALLOCNODE (SIZEOF_MM_ALLOCNODE - sizeof(mmsize_t)) +#define MM_ALLOCNODE_OVERHEAD (MM_SIZEOF_ALLOCNODE - sizeof(mmsize_t)) /* Get the node size */ -#define SIZEOF_MM_NODE(node) ((node)->size & (~MM_MASK_BIT)) +#define MM_SIZEOF_NODE(node) ((node)->size & (~MM_MASK_BIT)) /**************************************************************************** * Public Types @@ -196,7 +196,7 @@ struct mm_freenode_s FAR struct mm_freenode_s *blink; }; -static_assert(SIZEOF_MM_ALLOCNODE <= MM_MIN_CHUNK, +static_assert(MM_SIZEOF_ALLOCNODE <= MM_MIN_CHUNK, "Error size for struct mm_allocnode_s\n"); static_assert(MM_ALIGN >= sizeof(uintptr_t) && diff --git a/mm/mm_heap/mm_addfreechunk.c b/mm/mm_heap/mm_addfreechunk.c index 3c7f975608..5714583405 100644 --- a/mm/mm_heap/mm_addfreechunk.c +++ b/mm/mm_heap/mm_addfreechunk.c @@ -48,7 +48,7 @@ void mm_addfreechunk(FAR struct mm_heap_s *heap, { FAR struct mm_freenode_s *next; FAR struct mm_freenode_s *prev; - size_t nodesize = SIZEOF_MM_NODE(node); + size_t nodesize = MM_SIZEOF_NODE(node); int ndx; DEBUGASSERT(nodesize >= MM_MIN_CHUNK); @@ -62,7 +62,7 @@ void mm_addfreechunk(FAR struct mm_heap_s *heap, for (prev = &heap->mm_nodelist[ndx], next = heap->mm_nodelist[ndx].flink; - next && next->size && SIZEOF_MM_NODE(next) < nodesize; + next && next->size && MM_SIZEOF_NODE(next) < nodesize; prev = next, next = next->flink); /* Does it go in mid next or at the end? */ diff --git a/mm/mm_heap/mm_brkaddr.c b/mm/mm_heap/mm_brkaddr.c index 37d6d61d1e..480026ef3d 100644 --- a/mm/mm_heap/mm_brkaddr.c +++ b/mm/mm_heap/mm_brkaddr.c @@ -54,5 +54,5 @@ FAR void *mm_brkaddr(FAR struct mm_heap_s *heap, int region) #endif brkaddr = (uintptr_t)heap->mm_heapend[region]; - return brkaddr ? (FAR void *)(brkaddr + SIZEOF_MM_ALLOCNODE) : NULL; + return brkaddr ? (FAR void *)(brkaddr + MM_SIZEOF_ALLOCNODE) : NULL; } diff --git a/mm/mm_heap/mm_checkcorruption.c b/mm/mm_heap/mm_checkcorruption.c index 30df3a1e14..8e3c89bc46 100644 --- a/mm/mm_heap/mm_checkcorruption.c +++ b/mm/mm_heap/mm_checkcorruption.c @@ -40,11 +40,11 @@ static void checkcorruption_handler(FAR struct mm_allocnode_s *node, FAR void *arg) { - size_t nodesize = SIZEOF_MM_NODE(node); + size_t nodesize = MM_SIZEOF_NODE(node); if ((node->size & MM_ALLOC_BIT) != 0) { - assert(nodesize >= SIZEOF_MM_ALLOCNODE); + assert(nodesize >= MM_SIZEOF_ALLOCNODE); } else { @@ -52,12 +52,12 @@ static void checkcorruption_handler(FAR struct mm_allocnode_s *node, assert(nodesize >= MM_MIN_CHUNK); assert(fnode->blink->flink == fnode); - assert(SIZEOF_MM_NODE(fnode->blink) <= nodesize); + assert(MM_SIZEOF_NODE(fnode->blink) <= nodesize); assert(fnode->flink == NULL || fnode->flink->blink == fnode); assert(fnode->flink == NULL || - SIZEOF_MM_NODE(fnode->flink) == 0 || - SIZEOF_MM_NODE(fnode->flink) >= nodesize); + MM_SIZEOF_NODE(fnode->flink) == 0 || + MM_SIZEOF_NODE(fnode->flink) >= nodesize); } } diff --git a/mm/mm_heap/mm_extend.c b/mm/mm_heap/mm_extend.c index 8781f4b20e..2ad5e87d53 100644 --- a/mm/mm_heap/mm_extend.c +++ b/mm/mm_heap/mm_extend.c @@ -34,7 +34,7 @@ * Pre-processor Definitions ****************************************************************************/ -#define MIN_EXTEND (2 * SIZEOF_MM_ALLOCNODE) +#define MIN_EXTEND (2 * MM_SIZEOF_ALLOCNODE) /**************************************************************************** * Public Functions @@ -84,12 +84,12 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size, */ oldnode = heap->mm_heapend[region]; - DEBUGASSERT((uintptr_t)oldnode + SIZEOF_MM_ALLOCNODE == blockstart); + DEBUGASSERT((uintptr_t)oldnode + MM_SIZEOF_ALLOCNODE == blockstart); /* The size of the old node now extends to the new terminal node. - * This is the old size (SIZEOF_MM_ALLOCNODE) plus the size of + * This is the old size (MM_SIZEOF_ALLOCNODE) plus the size of * the block (size) minus the size of the new terminal node - * (SIZEOF_MM_ALLOCNODE) or simply: + * (MM_SIZEOF_ALLOCNODE) or simply: */ oldnode->size = size | (oldnode->size & MM_MASK_BIT); @@ -101,8 +101,8 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size, /* Get and initialize the new terminal node in the heap */ newnode = (FAR struct mm_allocnode_s *) - (blockend - SIZEOF_MM_ALLOCNODE); - newnode->size = SIZEOF_MM_ALLOCNODE | MM_ALLOC_BIT; + (blockend - MM_SIZEOF_ALLOCNODE); + newnode->size = MM_SIZEOF_ALLOCNODE | MM_ALLOC_BIT; heap->mm_heapend[region] = newnode; diff --git a/mm/mm_heap/mm_foreach.c b/mm/mm_heap/mm_foreach.c index f9883bde0a..ddf7884ac7 100644 --- a/mm/mm_heap/mm_foreach.c +++ b/mm/mm_heap/mm_foreach.c @@ -78,7 +78,7 @@ void mm_foreach(FAR struct mm_heap_s *heap, mm_node_handler_t handler, node < heap->mm_heapend[region]; node = (FAR struct mm_allocnode_s *)((FAR char *)node + nodesize)) { - nodesize = SIZEOF_MM_NODE(node); + nodesize = MM_SIZEOF_NODE(node); minfo("region=%d node=%p size=%zu preceding=%u (%c %c)\n", region, node, nodesize, (unsigned int)node->preceding, (node->size & MM_PREVFREE_BIT) ? 'F' : 'A', @@ -87,7 +87,7 @@ void mm_foreach(FAR struct mm_heap_s *heap, mm_node_handler_t handler, handler(node, arg); DEBUGASSERT((node->size & MM_PREVFREE_BIT) == 0 || - SIZEOF_MM_NODE(prev) == node->preceding); + MM_SIZEOF_NODE(prev) == node->preceding); prev = node; } diff --git a/mm/mm_heap/mm_free.c b/mm/mm_heap/mm_free.c index 699b87d33f..9c4b33e69c 100644 --- a/mm/mm_heap/mm_free.c +++ b/mm/mm_heap/mm_free.c @@ -112,8 +112,8 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem) /* Map the memory chunk into a free node */ - node = (FAR struct mm_freenode_s *)((FAR char *)mem - SIZEOF_MM_ALLOCNODE); - nodesize = SIZEOF_MM_NODE(node); + node = (FAR struct mm_freenode_s *)((FAR char *)mem - MM_SIZEOF_ALLOCNODE); + nodesize = MM_SIZEOF_NODE(node); /* Sanity check against double-frees */ @@ -128,7 +128,7 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem) if ((next->size & MM_ALLOC_BIT) == 0) { FAR struct mm_allocnode_s *andbeyond; - size_t nextsize = SIZEOF_MM_NODE(next); + size_t nextsize = MM_SIZEOF_NODE(next); /* Get the node following the next node (which will * become the new next node). We know that we can never @@ -171,7 +171,7 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem) { prev = (FAR struct mm_freenode_s *) ((FAR char *)node - node->preceding); - prevsize = SIZEOF_MM_NODE(prev); + prevsize = MM_SIZEOF_NODE(prev); DEBUGASSERT((prev->size & MM_ALLOC_BIT) == 0 && node->preceding == prevsize); diff --git a/mm/mm_heap/mm_initialize.c b/mm/mm_heap/mm_initialize.c index 7bc8629216..414aa96f11 100644 --- a/mm/mm_heap/mm_initialize.c +++ b/mm/mm_heap/mm_initialize.c @@ -65,7 +65,7 @@ static FAR void *mempool_memalign(FAR void *arg, size_t alignment, if (ret) { node = (FAR struct mm_allocnode_s *) - ((FAR char *)ret - SIZEOF_MM_ALLOCNODE); + ((FAR char *)ret - MM_SIZEOF_ALLOCNODE); node->pid = PID_MM_MEMPOOL; } @@ -137,13 +137,13 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart, /* Adjust the provided heap start and size. * - * Note: (uintptr_t)node + SIZEOF_MM_ALLOCNODE is what's actually + * Note: (uintptr_t)node + MM_SIZEOF_ALLOCNODE is what's actually * returned to the malloc user, which should have natural alignment. * (that is, in this implementation, MM_MIN_CHUNK-alignment.) */ - heapbase = MM_ALIGN_UP((uintptr_t)heapstart + 2 * SIZEOF_MM_ALLOCNODE) - - 2 * SIZEOF_MM_ALLOCNODE; + heapbase = MM_ALIGN_UP((uintptr_t)heapstart + 2 * MM_SIZEOF_ALLOCNODE) - + 2 * MM_SIZEOF_ALLOCNODE; heapend = MM_ALIGN_DOWN((uintptr_t)heapstart + (uintptr_t)heapsize); heapsize = heapend - heapbase; @@ -170,14 +170,14 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart, heap->mm_heapstart[IDX] = (FAR struct mm_allocnode_s *)heapbase; MM_ADD_BACKTRACE(heap, heap->mm_heapstart[IDX]); - heap->mm_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE | MM_ALLOC_BIT; + heap->mm_heapstart[IDX]->size = MM_SIZEOF_ALLOCNODE | MM_ALLOC_BIT; node = (FAR struct mm_freenode_s *) - (heapbase + SIZEOF_MM_ALLOCNODE); - DEBUGASSERT((((uintptr_t)node + SIZEOF_MM_ALLOCNODE) % MM_ALIGN) == 0); - node->size = heapsize - 2 * SIZEOF_MM_ALLOCNODE; + (heapbase + MM_SIZEOF_ALLOCNODE); + DEBUGASSERT((((uintptr_t)node + MM_SIZEOF_ALLOCNODE) % MM_ALIGN) == 0); + node->size = heapsize - 2 * MM_SIZEOF_ALLOCNODE; heap->mm_heapend[IDX] = (FAR struct mm_allocnode_s *) - (heapend - SIZEOF_MM_ALLOCNODE); - heap->mm_heapend[IDX]->size = SIZEOF_MM_ALLOCNODE | MM_ALLOC_BIT | + (heapend - MM_SIZEOF_ALLOCNODE); + heap->mm_heapend[IDX]->size = MM_SIZEOF_ALLOCNODE | MM_ALLOC_BIT | MM_PREVFREE_BIT; heap->mm_heapend[IDX]->preceding = node->size; MM_ADD_BACKTRACE(heap, heap->mm_heapend[IDX]); @@ -238,7 +238,7 @@ FAR struct mm_heap_s *mm_initialize(FAR const char *name, heapsize -= sizeof(struct mm_heap_s); heapstart = (FAR char *)heap_adj + sizeof(struct mm_heap_s); - DEBUGASSERT(MM_MIN_CHUNK >= SIZEOF_MM_ALLOCNODE); + DEBUGASSERT(MM_MIN_CHUNK >= MM_SIZEOF_ALLOCNODE); /* Set up global variables */ diff --git a/mm/mm_heap/mm_mallinfo.c b/mm/mm_heap/mm_mallinfo.c index d844bd9293..a1a47f4074 100644 --- a/mm/mm_heap/mm_mallinfo.c +++ b/mm/mm_heap/mm_mallinfo.c @@ -48,7 +48,7 @@ struct mm_mallinfo_handler_s static void mallinfo_handler(FAR struct mm_allocnode_s *node, FAR void *arg) { FAR struct mallinfo *info = arg; - size_t nodesize = SIZEOF_MM_NODE(node); + size_t nodesize = MM_SIZEOF_NODE(node); minfo("node=%p size=%zu preceding=%u (%c)\n", node, nodesize, (unsigned int)node->preceding, @@ -58,7 +58,7 @@ static void mallinfo_handler(FAR struct mm_allocnode_s *node, FAR void *arg) if ((node->size & MM_ALLOC_BIT) != 0) { - DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE); + DEBUGASSERT(nodesize >= MM_SIZEOF_ALLOCNODE); info->aordblks++; info->uordblks += nodesize; } @@ -68,12 +68,12 @@ static void mallinfo_handler(FAR struct mm_allocnode_s *node, FAR void *arg) DEBUGASSERT(nodesize >= MM_MIN_CHUNK); DEBUGASSERT(fnode->blink->flink == fnode); - DEBUGASSERT(SIZEOF_MM_NODE(fnode->blink) <= nodesize); + DEBUGASSERT(MM_SIZEOF_NODE(fnode->blink) <= nodesize); DEBUGASSERT(fnode->flink == NULL || fnode->flink->blink == fnode); DEBUGASSERT(fnode->flink == NULL || - SIZEOF_MM_NODE(fnode->flink) == 0 || - SIZEOF_MM_NODE(fnode->flink) >= nodesize); + MM_SIZEOF_NODE(fnode->flink) == 0 || + MM_SIZEOF_NODE(fnode->flink) >= nodesize); info->ordblks++; info->fordblks += nodesize; @@ -90,13 +90,13 @@ static void mallinfo_task_handler(FAR struct mm_allocnode_s *node, FAR struct mm_mallinfo_handler_s *handler = arg; FAR const struct malltask *task = handler->task; FAR struct mallinfo_task *info = handler->info; - size_t nodesize = SIZEOF_MM_NODE(node); + size_t nodesize = MM_SIZEOF_NODE(node); /* Check if the node corresponds to an allocated memory chunk */ if ((node->size & MM_ALLOC_BIT) != 0) { - DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE); + DEBUGASSERT(nodesize >= MM_SIZEOF_ALLOCNODE); #if CONFIG_MM_BACKTRACE < 0 if (task->pid == PID_MM_ALLOC) { diff --git a/mm/mm_heap/mm_malloc.c b/mm/mm_heap/mm_malloc.c index e5b81aca7b..d73a9bbd6a 100644 --- a/mm/mm_heap/mm_malloc.c +++ b/mm/mm_heap/mm_malloc.c @@ -141,12 +141,12 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) * least MM_MIN_CHUNK. */ - if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE) + if (size < MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD) { - size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE; + size = MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD; } - alignsize = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE); + alignsize = MM_ALIGN_UP(size + MM_ALLOCNODE_OVERHEAD); if (alignsize < size) { /* There must have been an integer overflow */ @@ -172,7 +172,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) for (node = heap->mm_nodelist[ndx].flink; node; node = node->flink) { DEBUGASSERT(node->blink->flink == node); - nodesize = SIZEOF_MM_NODE(node); + nodesize = MM_SIZEOF_NODE(node); if (nodesize >= alignsize) { break; @@ -210,7 +210,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) /* Check if we have to split the free node into one of the allocated * size and another smaller freenode. In some cases, the remaining - * bytes can be smaller (they may be SIZEOF_MM_ALLOCNODE). In that + * bytes can be smaller (they may be MM_SIZEOF_ALLOCNODE). In that * case, we will just carry the few wasted bytes at the end of the * allocation. */ @@ -249,7 +249,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) /* Handle the case of an exact size match */ node->size |= MM_ALLOC_BIT; - ret = (FAR void *)((FAR char *)node + SIZEOF_MM_ALLOCNODE); + ret = (FAR void *)((FAR char *)node + MM_SIZEOF_ALLOCNODE); } DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret)); @@ -260,7 +260,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size) MM_ADD_BACKTRACE(heap, node); kasan_unpoison(ret, mm_malloc_size(heap, ret)); #ifdef CONFIG_MM_FILL_ALLOCATIONS - memset(ret, 0xaa, alignsize - OVERHEAD_MM_ALLOCNODE); + memset(ret, 0xaa, alignsize - MM_ALLOCNODE_OVERHEAD); #endif #ifdef CONFIG_DEBUG_MM minfo("Allocated %p, size %zu\n", ret, alignsize); diff --git a/mm/mm_heap/mm_malloc_size.c b/mm/mm_heap/mm_malloc_size.c index 5cc7471ace..ad97aab7f4 100644 --- a/mm/mm_heap/mm_malloc_size.c +++ b/mm/mm_heap/mm_malloc_size.c @@ -56,11 +56,11 @@ size_t mm_malloc_size(FAR struct mm_heap_s *heap, FAR void *mem) /* Map the memory chunk into a free node */ - node = (FAR struct mm_freenode_s *)((FAR char *)mem - SIZEOF_MM_ALLOCNODE); + node = (FAR struct mm_freenode_s *)((FAR char *)mem - MM_SIZEOF_ALLOCNODE); /* Sanity check against double-frees */ DEBUGASSERT(node->size & MM_ALLOC_BIT); - return SIZEOF_MM_NODE(node) - OVERHEAD_MM_ALLOCNODE; + return MM_SIZEOF_NODE(node) - MM_ALLOCNODE_OVERHEAD; } diff --git a/mm/mm_heap/mm_memalign.c b/mm/mm_heap/mm_memalign.c index 5833c8dcc0..333110db3e 100644 --- a/mm/mm_heap/mm_memalign.c +++ b/mm/mm_heap/mm_memalign.c @@ -106,12 +106,12 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, * alignment points within the allocated memory. * * NOTE: These are sizes given to malloc and not chunk sizes. They do - * not include SIZEOF_MM_ALLOCNODE. + * not include MM_SIZEOF_ALLOCNODE. */ - if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE) + if (size < MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD) { - size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE; + size = MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD; } newsize = MM_ALIGN_UP(size); /* Make multiples of our granule size */ @@ -145,7 +145,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, * the allocation. */ - node = (FAR struct mm_allocnode_s *)(rawchunk - SIZEOF_MM_ALLOCNODE); + node = (FAR struct mm_allocnode_s *)(rawchunk - MM_SIZEOF_ALLOCNODE); /* Find the aligned subregion */ @@ -163,13 +163,13 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, /* Get the node the next node after the allocation. */ next = (FAR struct mm_allocnode_s *) - ((FAR char *)node + SIZEOF_MM_NODE(node)); + ((FAR char *)node + MM_SIZEOF_NODE(node)); newnode = (FAR struct mm_allocnode_s *) - (alignedchunk - SIZEOF_MM_ALLOCNODE); + (alignedchunk - MM_SIZEOF_ALLOCNODE); /* Preceding size is full size of the new 'node,' including - * SIZEOF_MM_ALLOCNODE + * MM_SIZEOF_ALLOCNODE */ precedingsize = (uintptr_t)newnode - (uintptr_t)node; @@ -186,7 +186,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, { alignedchunk += alignment; newnode = (FAR struct mm_allocnode_s *) - (alignedchunk - SIZEOF_MM_ALLOCNODE); + (alignedchunk - MM_SIZEOF_ALLOCNODE); precedingsize = (uintptr_t)newnode - (uintptr_t)node; } @@ -210,7 +210,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, prev->flink->blink = prev->blink; } - precedingsize += SIZEOF_MM_NODE(prev); + precedingsize += MM_SIZEOF_NODE(prev); node = (FAR struct mm_allocnode_s *)prev; } @@ -227,10 +227,10 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, next->size &= ~MM_PREVFREE_BIT; /* Convert the newnode chunk size back into malloc-compatible size by - * subtracting the header size OVERHEAD_MM_ALLOCNODE. + * subtracting the header size MM_ALLOCNODE_OVERHEAD. */ - allocsize = newnodesize - OVERHEAD_MM_ALLOCNODE; + allocsize = newnodesize - MM_ALLOCNODE_OVERHEAD; /* Add the original, newly freed node to the free nodelist */ @@ -244,16 +244,16 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment, } /* Check if there is free space at the end of the aligned chunk. Convert - * malloc-compatible chunk size to include OVERHEAD_MM_ALLOCNODE as needed + * malloc-compatible chunk size to include MM_ALLOCNODE_OVERHEAD as needed * for mm_shrinkchunk. */ - size = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE); + size = MM_ALIGN_UP(size + MM_ALLOCNODE_OVERHEAD); if (allocsize > size) { /* Shrink the chunk by that much -- remember, mm_shrinkchunk wants - * internal chunk sizes that include OVERHEAD_MM_ALLOCNODE. + * internal chunk sizes that include MM_ALLOCNODE_OVERHEAD. */ mm_shrinkchunk(heap, node, size); diff --git a/mm/mm_heap/mm_memdump.c b/mm/mm_heap/mm_memdump.c index 7f45c2f419..3e1cc81bdd 100644 --- a/mm/mm_heap/mm_memdump.c +++ b/mm/mm_heap/mm_memdump.c @@ -53,11 +53,11 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg) { FAR const struct mm_memdump_s *dump = arg; - size_t nodesize = SIZEOF_MM_NODE(node); + size_t nodesize = MM_SIZEOF_NODE(node); if ((node->size & MM_ALLOC_BIT) != 0) { - DEBUGASSERT(nodesize >= SIZEOF_MM_ALLOCNODE); + DEBUGASSERT(nodesize >= MM_SIZEOF_ALLOCNODE); #if CONFIG_MM_BACKTRACE < 0 if (dump->pid == PID_MM_ALLOC) #else @@ -70,7 +70,7 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg) #if CONFIG_MM_BACKTRACE < 0 syslog(LOG_INFO, "%12zu%*p\n", nodesize, MM_PTR_FMT_WIDTH, - ((FAR char *)node + SIZEOF_MM_ALLOCNODE)); + ((FAR char *)node + MM_SIZEOF_ALLOCNODE)); #else char buf[CONFIG_MM_BACKTRACE * MM_PTR_FMT_WIDTH + 1] = ""; @@ -89,7 +89,7 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg) syslog(LOG_INFO, "%6d%12zu%12lu%*p%s\n", node->pid, nodesize, node->seqno, MM_PTR_FMT_WIDTH, - ((FAR char *)node + SIZEOF_MM_ALLOCNODE), buf); + ((FAR char *)node + MM_SIZEOF_ALLOCNODE), buf); #endif } } @@ -99,16 +99,16 @@ static void memdump_handler(FAR struct mm_allocnode_s *node, FAR void *arg) DEBUGASSERT(nodesize >= MM_MIN_CHUNK); DEBUGASSERT(fnode->blink->flink == fnode); - DEBUGASSERT(SIZEOF_MM_NODE(fnode->blink) <= nodesize); + DEBUGASSERT(MM_SIZEOF_NODE(fnode->blink) <= nodesize); DEBUGASSERT(fnode->flink == NULL || fnode->flink->blink == fnode); DEBUGASSERT(fnode->flink == NULL || - SIZEOF_MM_NODE(fnode->flink) == 0 || - SIZEOF_MM_NODE(fnode->flink) >= nodesize); + MM_SIZEOF_NODE(fnode->flink) == 0 || + MM_SIZEOF_NODE(fnode->flink) >= nodesize); syslog(LOG_INFO, "%12zu%*p\n", nodesize, MM_PTR_FMT_WIDTH, - ((FAR char *)node + SIZEOF_MM_ALLOCNODE)); + ((FAR char *)node + MM_SIZEOF_ALLOCNODE)); } } diff --git a/mm/mm_heap/mm_realloc.c b/mm/mm_heap/mm_realloc.c index f8de8b6402..f864c2ebee 100644 --- a/mm/mm_heap/mm_realloc.c +++ b/mm/mm_heap/mm_realloc.c @@ -108,12 +108,12 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, * least MM_MIN_CHUNK. */ - if (size < MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE) + if (size < MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD) { - size = MM_MIN_CHUNK - OVERHEAD_MM_ALLOCNODE; + size = MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD; } - newsize = MM_ALIGN_UP(size + OVERHEAD_MM_ALLOCNODE); + newsize = MM_ALIGN_UP(size + MM_ALLOCNODE_OVERHEAD); if (newsize < size) { /* There must have been an integer overflow */ @@ -125,7 +125,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, /* Map the memory chunk into an allocated node structure */ oldnode = (FAR struct mm_allocnode_s *) - ((FAR char *)oldmem - SIZEOF_MM_ALLOCNODE); + ((FAR char *)oldmem - MM_SIZEOF_ALLOCNODE); /* We need to hold the MM mutex while we muck with the nodelist. */ @@ -134,7 +134,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, /* Check if this is a request to reduce the size of the allocation. */ - oldsize = SIZEOF_MM_NODE(oldnode); + oldsize = MM_SIZEOF_NODE(oldnode); if (newsize <= oldsize) { /* Handle the special case where we are not going to change the size @@ -144,8 +144,8 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, if (newsize < oldsize) { mm_shrinkchunk(heap, oldnode, newsize); - kasan_poison((FAR char *)oldnode + SIZEOF_MM_NODE(oldnode) + - sizeof(mmsize_t), oldsize - SIZEOF_MM_NODE(oldnode)); + kasan_poison((FAR char *)oldnode + MM_SIZEOF_NODE(oldnode) + + sizeof(mmsize_t), oldsize - MM_SIZEOF_NODE(oldnode)); } /* Then return the original address */ @@ -165,7 +165,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, if ((next->size & MM_ALLOC_BIT) == 0) { DEBUGASSERT((next->size & MM_PREVFREE_BIT) == 0); - nextsize = SIZEOF_MM_NODE(next); + nextsize = MM_SIZEOF_NODE(next); } if ((oldnode->size & MM_PREVFREE_BIT) != 0) @@ -173,7 +173,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, prev = (FAR struct mm_freenode_s *) ((FAR char *)oldnode - oldnode->preceding); DEBUGASSERT((prev->size & MM_ALLOC_BIT) == 0); - prevsize = SIZEOF_MM_NODE(prev); + prevsize = MM_SIZEOF_NODE(prev); } /* Now, check if we can extend the current allocation or not */ @@ -296,7 +296,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, (newnode->size & MM_MASK_BIT); } - newmem = (FAR void *)((FAR char *)newnode + SIZEOF_MM_ALLOCNODE); + newmem = (FAR void *)((FAR char *)newnode + MM_SIZEOF_ALLOCNODE); /* Now we want to return newnode */ @@ -366,7 +366,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, } mm_unlock(heap); - MM_ADD_BACKTRACE(heap, (FAR char *)newmem - SIZEOF_MM_ALLOCNODE); + MM_ADD_BACKTRACE(heap, (FAR char *)newmem - MM_SIZEOF_ALLOCNODE); kasan_unpoison(newmem, mm_malloc_size(heap, newmem)); if (newmem != oldmem) @@ -375,7 +375,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, * should be safe for this. */ - memcpy(newmem, oldmem, oldsize - OVERHEAD_MM_ALLOCNODE); + memcpy(newmem, oldmem, oldsize - MM_ALLOCNODE_OVERHEAD); } return newmem; @@ -395,7 +395,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem, newmem = mm_malloc(heap, size); if (newmem) { - memcpy(newmem, oldmem, oldsize - OVERHEAD_MM_ALLOCNODE); + memcpy(newmem, oldmem, oldsize - MM_ALLOCNODE_OVERHEAD); mm_free(heap, oldmem); } diff --git a/mm/mm_heap/mm_shrinkchunk.c b/mm/mm_heap/mm_shrinkchunk.c index ce2cf16913..bf3465245d 100644 --- a/mm/mm_heap/mm_shrinkchunk.c +++ b/mm/mm_heap/mm_shrinkchunk.c @@ -53,7 +53,7 @@ void mm_shrinkchunk(FAR struct mm_heap_s *heap, FAR struct mm_allocnode_s *node, size_t size) { FAR struct mm_freenode_s *next; - size_t nodesize = SIZEOF_MM_NODE(node); + size_t nodesize = MM_SIZEOF_NODE(node); DEBUGASSERT((size & MM_GRAN_MASK) == 0); @@ -67,7 +67,7 @@ void mm_shrinkchunk(FAR struct mm_heap_s *heap, { FAR struct mm_allocnode_s *andbeyond; FAR struct mm_freenode_s *newnode; - size_t nextsize = SIZEOF_MM_NODE(next); + size_t nextsize = MM_SIZEOF_NODE(next); /* Get the chunk next the next node (which could be the tail chunk) */