2012-07-15 01:31:12 +02:00
|
|
|
/****************************************************************************
|
2014-09-22 18:53:50 +02:00
|
|
|
* mm/mm_heap/mm_malloc.c
|
2007-02-18 00:21:28 +01:00
|
|
|
*
|
2021-02-08 13:50:10 +01:00
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
|
|
|
* contributor license agreements. See the NOTICE file distributed with
|
|
|
|
* this work for additional information regarding copyright ownership. The
|
|
|
|
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance with the
|
|
|
|
* License. You may obtain a copy of the License at
|
2007-02-18 00:21:28 +01:00
|
|
|
*
|
2021-02-08 13:50:10 +01:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2007-02-18 00:21:28 +01:00
|
|
|
*
|
2021-02-08 13:50:10 +01:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
* License for the specific language governing permissions and limitations
|
|
|
|
* under the License.
|
2007-02-18 00:21:28 +01:00
|
|
|
*
|
2012-07-15 01:31:12 +02:00
|
|
|
****************************************************************************/
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/****************************************************************************
|
2007-02-18 00:21:28 +01:00
|
|
|
* Included Files
|
2012-07-15 01:31:12 +02:00
|
|
|
****************************************************************************/
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2013-03-08 21:36:18 +01:00
|
|
|
#include <nuttx/config.h>
|
2007-02-18 00:21:28 +01:00
|
|
|
|
|
|
|
#include <assert.h>
|
2013-04-18 19:40:38 +02:00
|
|
|
#include <debug.h>
|
2018-11-12 16:36:35 +01:00
|
|
|
#include <string.h>
|
2012-07-15 01:31:12 +02:00
|
|
|
|
2020-04-07 16:21:42 +02:00
|
|
|
#include <nuttx/arch.h>
|
2014-09-24 15:29:09 +02:00
|
|
|
#include <nuttx/mm/mm.h>
|
2022-06-02 07:44:14 +02:00
|
|
|
#include <nuttx/sched.h>
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2021-03-02 09:03:00 +01:00
|
|
|
#include "mm_heap/mm.h"
|
2021-10-09 09:22:28 +02:00
|
|
|
#include "kasan/kasan.h"
|
2021-03-02 09:03:00 +01:00
|
|
|
|
2020-04-07 16:21:42 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Private Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2022-11-06 00:19:44 +01:00
|
|
|
static void free_delaylist(FAR struct mm_heap_s *heap)
|
2020-04-07 16:21:42 +02:00
|
|
|
{
|
2020-04-29 21:21:23 +02:00
|
|
|
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
|
2020-04-07 16:21:42 +02:00
|
|
|
FAR struct mm_delaynode_s *tmp;
|
|
|
|
irqstate_t flags;
|
|
|
|
|
|
|
|
/* Move the delay list to local */
|
|
|
|
|
2023-10-29 18:01:37 +01:00
|
|
|
flags = spin_lock_irqsave(&heap->mm_spinlock);
|
2020-04-07 16:21:42 +02:00
|
|
|
|
2021-07-03 16:16:25 +02:00
|
|
|
tmp = heap->mm_delaylist[up_cpu_index()];
|
|
|
|
heap->mm_delaylist[up_cpu_index()] = NULL;
|
2020-04-07 16:21:42 +02:00
|
|
|
|
2023-10-29 18:01:37 +01:00
|
|
|
spin_unlock_irqrestore(&heap->mm_spinlock, flags);
|
2020-04-07 16:21:42 +02:00
|
|
|
|
|
|
|
/* Test if the delayed is empty */
|
|
|
|
|
|
|
|
while (tmp)
|
|
|
|
{
|
|
|
|
FAR void *address;
|
|
|
|
|
|
|
|
/* Get the first delayed deallocation */
|
|
|
|
|
|
|
|
address = tmp;
|
|
|
|
tmp = tmp->flink;
|
|
|
|
|
|
|
|
/* The address should always be non-NULL since that was checked in the
|
|
|
|
* 'while' condition above.
|
|
|
|
*/
|
|
|
|
|
|
|
|
mm_free(heap, address);
|
|
|
|
}
|
|
|
|
#endif
|
2020-04-29 21:21:23 +02:00
|
|
|
}
|
2020-04-07 16:21:42 +02:00
|
|
|
|
2022-07-04 09:41:06 +02:00
|
|
|
#if CONFIG_MM_BACKTRACE >= 0
|
2022-06-02 07:44:14 +02:00
|
|
|
void mm_dump_handler(FAR struct tcb_s *tcb, FAR void *arg)
|
|
|
|
{
|
|
|
|
struct mallinfo_task info;
|
2023-05-28 13:23:56 +02:00
|
|
|
struct malltask task;
|
2022-06-02 07:44:14 +02:00
|
|
|
|
2023-05-28 14:09:42 +02:00
|
|
|
task.pid = tcb ? tcb->pid : PID_MM_LEAK;
|
2023-05-28 13:23:56 +02:00
|
|
|
task.seqmin = 0;
|
|
|
|
task.seqmax = ULONG_MAX;
|
|
|
|
info = mm_mallinfo_task(arg, &task);
|
2022-06-02 07:44:14 +02:00
|
|
|
mwarn("pid:%5d, used:%10d, nused:%10d\n",
|
2023-05-28 13:23:56 +02:00
|
|
|
task.pid, info.uordblks, info.aordblks);
|
2022-06-02 07:44:14 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-05-12 13:39:27 +02:00
|
|
|
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
|
|
|
void mm_mempool_dump_handle(FAR struct mempool_s *pool, FAR void *arg)
|
|
|
|
{
|
|
|
|
struct mempoolinfo_s info;
|
|
|
|
|
|
|
|
mempool_info(pool, &info);
|
2023-05-28 15:11:08 +02:00
|
|
|
mwarn("%9lu%11lu%9lu%9lu%9lu%9lu\n",
|
2023-05-12 13:39:27 +02:00
|
|
|
info.sizeblks, info.arena, info.aordblks,
|
2023-05-28 15:11:08 +02:00
|
|
|
info.ordblks, info.iordblks, info.nwaiter);
|
2023-05-12 13:39:27 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-08-31 18:54:55 +02:00
|
|
|
/****************************************************************************
|
|
|
|
* Public Functions
|
|
|
|
****************************************************************************/
|
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/****************************************************************************
|
2013-03-08 21:36:18 +01:00
|
|
|
* Name: mm_malloc
|
2007-02-18 00:21:28 +01:00
|
|
|
*
|
|
|
|
* Description:
|
2012-07-15 01:31:12 +02:00
|
|
|
* Find the smallest chunk that satisfies the request. Take the memory from
|
|
|
|
* that chunk, save the remaining, smaller chunk (if any).
|
2007-02-18 00:21:28 +01:00
|
|
|
*
|
|
|
|
* 8-byte alignment of the allocated data is assured.
|
|
|
|
*
|
2012-07-15 01:31:12 +02:00
|
|
|
****************************************************************************/
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2013-03-08 21:36:18 +01:00
|
|
|
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
2007-02-18 00:21:28 +01:00
|
|
|
{
|
2007-02-27 22:17:21 +01:00
|
|
|
FAR struct mm_freenode_s *node;
|
2017-11-21 14:24:10 +01:00
|
|
|
size_t alignsize;
|
2022-12-15 12:55:16 +01:00
|
|
|
size_t nodesize;
|
2021-03-21 05:11:11 +01:00
|
|
|
FAR void *ret = NULL;
|
2007-02-18 00:21:28 +01:00
|
|
|
int ndx;
|
|
|
|
|
2021-07-07 15:21:42 +02:00
|
|
|
/* Free the delay list first */
|
2020-04-07 16:21:42 +02:00
|
|
|
|
2022-11-06 00:19:44 +01:00
|
|
|
free_delaylist(heap);
|
2020-04-07 16:21:42 +02:00
|
|
|
|
2022-10-30 04:34:24 +01:00
|
|
|
#if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
2022-11-26 16:44:38 +01:00
|
|
|
ret = mempool_multiple_alloc(heap->mm_mpool, size);
|
2022-10-30 04:34:24 +01:00
|
|
|
if (ret != NULL)
|
|
|
|
{
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/* Adjust the size to account for (1) the size of the allocated node and
|
2022-12-30 09:30:55 +01:00
|
|
|
* (2) to make sure that it is aligned with MM_ALIGN and its size is at
|
|
|
|
* least MM_MIN_CHUNK.
|
2007-02-18 00:21:28 +01:00
|
|
|
*/
|
|
|
|
|
2023-09-06 05:48:30 +02:00
|
|
|
if (size < MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD)
|
2022-12-30 09:30:55 +01:00
|
|
|
{
|
2023-09-06 05:48:30 +02:00
|
|
|
size = MM_MIN_CHUNK - MM_ALLOCNODE_OVERHEAD;
|
2022-12-30 09:30:55 +01:00
|
|
|
}
|
|
|
|
|
2023-09-06 05:48:30 +02:00
|
|
|
alignsize = MM_ALIGN_UP(size + MM_ALLOCNODE_OVERHEAD);
|
2021-01-31 09:08:36 +01:00
|
|
|
if (alignsize < size)
|
|
|
|
{
|
|
|
|
/* There must have been an integer overflow */
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2022-12-30 09:30:55 +01:00
|
|
|
DEBUGASSERT(alignsize >= MM_ALIGN);
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2022-09-06 08:18:45 +02:00
|
|
|
/* We need to hold the MM mutex while we muck with the nodelist. */
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2022-11-05 23:41:26 +01:00
|
|
|
DEBUGVERIFY(mm_lock(heap));
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2022-10-30 10:39:15 +01:00
|
|
|
/* Convert the request size into a nodelist index */
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2022-10-30 10:39:15 +01:00
|
|
|
ndx = mm_size2ndx(alignsize);
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/* Search for a large enough chunk in the list of nodes. This list is
|
|
|
|
* ordered by size, but will have occasional zero sized nodes as we visit
|
2013-03-08 19:29:56 +01:00
|
|
|
* other mm_nodelist[] entries.
|
2007-02-18 00:21:28 +01:00
|
|
|
*/
|
|
|
|
|
2022-12-15 12:55:16 +01:00
|
|
|
for (node = heap->mm_nodelist[ndx].flink; node; node = node->flink)
|
2020-02-13 09:06:05 +01:00
|
|
|
{
|
|
|
|
DEBUGASSERT(node->blink->flink == node);
|
2023-09-06 05:48:30 +02:00
|
|
|
nodesize = MM_SIZEOF_NODE(node);
|
2022-12-15 12:55:16 +01:00
|
|
|
if (nodesize >= alignsize)
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2020-02-13 09:06:05 +01:00
|
|
|
}
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/* If we found a node with non-zero size, then this is one to use. Since
|
2021-07-31 17:17:21 +02:00
|
|
|
* the list is ordered, we know that it must be the best fitting chunk
|
2012-07-15 01:31:12 +02:00
|
|
|
* available.
|
2007-02-18 00:21:28 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (node)
|
|
|
|
{
|
2007-02-27 22:17:21 +01:00
|
|
|
FAR struct mm_freenode_s *remainder;
|
|
|
|
FAR struct mm_freenode_s *next;
|
2007-02-21 22:55:16 +01:00
|
|
|
size_t remaining;
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/* Remove the node. There must be a predecessor, but there may not be
|
|
|
|
* a successor node.
|
2007-02-18 00:21:28 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(node->blink);
|
|
|
|
node->blink->flink = node->flink;
|
|
|
|
if (node->flink)
|
|
|
|
{
|
|
|
|
node->flink->blink = node->blink;
|
|
|
|
}
|
|
|
|
|
2022-12-22 09:02:42 +01:00
|
|
|
/* Get a pointer to the next node in physical memory */
|
|
|
|
|
|
|
|
next = (FAR struct mm_freenode_s *)(((FAR char *)node) + nodesize);
|
2023-09-06 06:30:24 +02:00
|
|
|
|
|
|
|
/* Node next must be alloced, otherwise it should be merged.
|
|
|
|
* Its prenode(the founded node) must be free and preceding should
|
|
|
|
* match with nodesize.
|
|
|
|
*/
|
|
|
|
|
|
|
|
DEBUGASSERT(MM_NODE_IS_ALLOC(next) && MM_PREVNODE_IS_FREE(next) &&
|
2022-12-22 09:02:42 +01:00
|
|
|
next->preceding == nodesize);
|
|
|
|
|
2012-07-15 01:31:12 +02:00
|
|
|
/* Check if we have to split the free node into one of the allocated
|
|
|
|
* size and another smaller freenode. In some cases, the remaining
|
2023-09-06 05:48:30 +02:00
|
|
|
* bytes can be smaller (they may be MM_SIZEOF_ALLOCNODE). In that
|
2012-07-15 01:31:12 +02:00
|
|
|
* case, we will just carry the few wasted bytes at the end of the
|
|
|
|
* allocation.
|
2007-02-18 00:21:28 +01:00
|
|
|
*/
|
|
|
|
|
2022-12-15 12:55:16 +01:00
|
|
|
remaining = nodesize - alignsize;
|
2022-12-30 09:30:55 +01:00
|
|
|
if (remaining >= MM_MIN_CHUNK)
|
2007-02-18 00:21:28 +01:00
|
|
|
{
|
|
|
|
/* Create the remainder node */
|
|
|
|
|
2017-11-21 14:24:10 +01:00
|
|
|
remainder = (FAR struct mm_freenode_s *)
|
|
|
|
(((FAR char *)node) + alignsize);
|
|
|
|
|
2022-12-22 09:02:42 +01:00
|
|
|
remainder->size = remaining;
|
2007-02-18 00:21:28 +01:00
|
|
|
|
|
|
|
/* Adjust the size of the node under consideration */
|
|
|
|
|
2022-12-22 09:02:42 +01:00
|
|
|
node->size = alignsize | (node->size & MM_MASK_BIT);
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2022-12-15 12:55:16 +01:00
|
|
|
/* Adjust the 'preceding' size of the (old) next node. */
|
2007-02-18 00:21:28 +01:00
|
|
|
|
2022-12-15 12:55:16 +01:00
|
|
|
next->preceding = remaining;
|
2007-02-18 00:21:28 +01:00
|
|
|
|
|
|
|
/* Add the remainder back into the nodelist */
|
|
|
|
|
2013-03-08 19:29:56 +01:00
|
|
|
mm_addfreechunk(heap, remainder);
|
2007-02-18 00:21:28 +01:00
|
|
|
}
|
2022-12-22 09:02:42 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Previous physical memory node is alloced, so clear the previous
|
|
|
|
* free bit in next->size.
|
|
|
|
*/
|
|
|
|
|
|
|
|
next->size &= ~MM_PREVFREE_BIT;
|
|
|
|
}
|
2007-02-18 00:21:28 +01:00
|
|
|
|
|
|
|
/* Handle the case of an exact size match */
|
|
|
|
|
2022-12-15 12:55:16 +01:00
|
|
|
node->size |= MM_ALLOC_BIT;
|
2023-09-06 05:48:30 +02:00
|
|
|
ret = (FAR void *)((FAR char *)node + MM_SIZEOF_ALLOCNODE);
|
2007-02-18 00:21:28 +01:00
|
|
|
}
|
|
|
|
|
2020-02-13 09:06:05 +01:00
|
|
|
DEBUGASSERT(ret == NULL || mm_heapmember(heap, ret));
|
2022-09-06 08:18:45 +02:00
|
|
|
mm_unlock(heap);
|
2013-02-28 16:31:58 +01:00
|
|
|
|
2018-11-12 16:36:35 +01:00
|
|
|
if (ret)
|
|
|
|
{
|
2022-08-01 09:59:37 +02:00
|
|
|
MM_ADD_BACKTRACE(heap, node);
|
2022-11-24 06:46:53 +01:00
|
|
|
kasan_unpoison(ret, mm_malloc_size(heap, ret));
|
2021-10-09 09:22:28 +02:00
|
|
|
#ifdef CONFIG_MM_FILL_ALLOCATIONS
|
2023-09-06 05:48:30 +02:00
|
|
|
memset(ret, 0xaa, alignsize - MM_ALLOCNODE_OVERHEAD);
|
2018-11-12 16:36:35 +01:00
|
|
|
#endif
|
2013-02-28 16:31:58 +01:00
|
|
|
#ifdef CONFIG_DEBUG_MM
|
2021-10-09 09:22:28 +02:00
|
|
|
minfo("Allocated %p, size %zu\n", ret, alignsize);
|
|
|
|
#endif
|
2013-02-28 16:31:58 +01:00
|
|
|
}
|
2021-10-09 09:22:28 +02:00
|
|
|
#ifdef CONFIG_DEBUG_MM
|
2023-06-19 11:03:35 +02:00
|
|
|
else if (MM_INTERNAL_HEAP(heap))
|
2013-02-28 16:31:58 +01:00
|
|
|
{
|
2022-05-20 10:20:10 +02:00
|
|
|
#ifdef CONFIG_MM_DUMP_ON_FAILURE
|
2022-03-26 16:27:21 +01:00
|
|
|
struct mallinfo minfo;
|
2023-05-17 12:05:02 +02:00
|
|
|
# ifdef CONFIG_MM_DUMP_DETAILS_ON_FAILURE
|
|
|
|
struct mm_memdump_s dump =
|
|
|
|
{
|
2023-05-28 14:09:42 +02:00
|
|
|
PID_MM_ALLOC, 0, ULONG_MAX
|
2023-05-17 12:05:02 +02:00
|
|
|
};
|
|
|
|
# endif
|
2022-05-20 10:20:10 +02:00
|
|
|
#endif
|
2022-03-26 16:27:21 +01:00
|
|
|
|
2021-10-09 09:22:28 +02:00
|
|
|
mwarn("WARNING: Allocation failed, size %zu\n", alignsize);
|
2022-05-20 10:20:10 +02:00
|
|
|
#ifdef CONFIG_MM_DUMP_ON_FAILURE
|
2023-06-02 10:12:27 +02:00
|
|
|
minfo = mm_mallinfo(heap);
|
2022-03-26 16:27:21 +01:00
|
|
|
mwarn("Total:%d, used:%d, free:%d, largest:%d, nused:%d, nfree:%d\n",
|
|
|
|
minfo.arena, minfo.uordblks, minfo.fordblks,
|
|
|
|
minfo.mxordblk, minfo.aordblks, minfo.ordblks);
|
2022-07-04 09:41:06 +02:00
|
|
|
# if CONFIG_MM_BACKTRACE >= 0
|
2022-06-02 07:44:14 +02:00
|
|
|
nxsched_foreach(mm_dump_handler, heap);
|
2023-05-09 07:46:05 +02:00
|
|
|
mm_dump_handler(NULL, heap);
|
2022-06-02 07:44:14 +02:00
|
|
|
# endif
|
2023-05-09 10:03:24 +02:00
|
|
|
# if CONFIG_MM_HEAP_MEMPOOL_THRESHOLD != 0
|
2023-05-28 15:11:08 +02:00
|
|
|
mwarn("%11s%9s%9s%9s%9s%9s\n",
|
|
|
|
"bsize", "total", "nused",
|
|
|
|
"nfree", "nifree", "nwaiter");
|
2023-05-12 13:39:27 +02:00
|
|
|
mempool_multiple_foreach(heap->mm_mpool,
|
|
|
|
mm_mempool_dump_handle, NULL);
|
2023-05-09 10:03:24 +02:00
|
|
|
# endif
|
2023-05-17 12:05:02 +02:00
|
|
|
# ifdef CONFIG_MM_DUMP_DETAILS_ON_FAILURE
|
|
|
|
mm_memdump(heap, &dump);
|
|
|
|
# endif
|
2022-05-21 01:13:09 +02:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_MM_PANIC_ON_FAILURE
|
|
|
|
PANIC();
|
2022-05-20 10:20:10 +02:00
|
|
|
#endif
|
2013-02-28 16:31:58 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-12-30 09:30:55 +01:00
|
|
|
DEBUGASSERT(ret == NULL || ((uintptr_t)ret) % MM_ALIGN == 0);
|
2007-02-18 00:21:28 +01:00
|
|
|
return ret;
|
|
|
|
}
|