mm: Remove mm_heap_impl_s struct

it's more simple to make mm_heap_s opaque outside of mm

Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
Change-Id: I5c8e435f6baba6d22b10c5f7e8d9191104fb5af2
This commit is contained in:
Xiang Xiao 2021-07-03 22:16:25 +08:00 committed by David Sidrane
parent 441b03c61c
commit 76cdd5c329
47 changed files with 271 additions and 308 deletions

View File

@ -32,7 +32,7 @@
* Public Data
****************************************************************************/
struct mm_heap_s g_ccm_heap;
FAR struct mm_heap_s *g_ccm_heap;
/****************************************************************************
* Public Functions

View File

@ -69,25 +69,25 @@
*/
#define ccm_initialize() \
mm_initialize(&g_ccm_heap, "ccm", (FAR void *)CCM_START, CCM_END-CCM_START)
g_ccm_heap = mm_initialize("ccm", (FAR void *)CCM_START, CCM_END-CCM_START)
/* The ccm_addregion interface could be used if, for example, you want to
* add some other memory region to the CCM heap. I don't really know why
* you might want to do that, but the functionality is essentially free.
*/
#define ccm_addregion(b,s) mm_addregion(&g_ccm_heap, b, s);
#define ccm_addregion(b,s) mm_addregion(g_ccm_heap, b, s);
/* Then, once g_ccm_heap has been setup by ccm_initialize(), these memory
* allocators can be used just like the standard memory allocators.
*/
#define ccm_malloc(s) mm_malloc(&g_ccm_heap, s)
#define ccm_zalloc(s) mm_zalloc(&g_ccm_heap, s)
#define ccm_calloc(n,s) mm_calloc(&g_ccm_heap, n,s)
#define ccm_free(p) mm_free(&g_ccm_heap, p)
#define ccm_realloc(p,s) mm_realloc(&g_ccm_heap, p, s)
#define ccm_memalign(a,s) mm_memalign(&g_ccm_heap, a, s)
#define ccm_malloc(s) mm_malloc(g_ccm_heap, s)
#define ccm_zalloc(s) mm_zalloc(g_ccm_heap, s)
#define ccm_calloc(n,s) mm_calloc(g_ccm_heap, n,s)
#define ccm_free(p) mm_free(g_ccm_heap, p)
#define ccm_realloc(p,s) mm_realloc(g_ccm_heap, p, s)
#define ccm_memalign(a,s) mm_memalign(g_ccm_heap, a, s)
/****************************************************************************
* Public Types
@ -107,7 +107,7 @@ extern "C"
#define EXTERN extern
#endif
EXTERN struct mm_heap_s g_ccm_heap;
EXTERN FAR struct mm_heap_s *g_ccm_heap;
/****************************************************************************
* Public Function Prototypes

View File

@ -32,7 +32,7 @@
* Public Data
****************************************************************************/
struct mm_heap_s g_dtcm_heap;
FAR struct mm_heap_s *g_dtcm_heap;
/****************************************************************************
* Public Functions

View File

@ -69,25 +69,25 @@
*/
#define dtcm_initialize() \
mm_initialize(&g_dtcm_heap, "dtcm", (FAR void *)DTCM_START, DTCM_END-DTCM_START)
g_dtcm_heap = mm_initialize("dtcm", (FAR void *)DTCM_START, DTCM_END-DTCM_START)
/* The dtcm_addregion interface could be used if, for example, you want to
* add some other memory region to the DTCM heap. I don't really know why
* you might want to do that, but the functionality is essentially free.
*/
#define dtcm_addregion(b,s) mm_addregion(&g_dtcm_heap, b, s);
#define dtcm_addregion(b,s) mm_addregion(g_dtcm_heap, b, s);
/* Then, once g_dtcm_heap has been setup by dtcm_initialize(), these memory
* allocators can be used just like the standard memory allocators.
*/
#define dtcm_malloc(s) mm_malloc(&g_dtcm_heap, s)
#define dtcm_zalloc(s) mm_zalloc(&g_dtcm_heap, s)
#define dtcm_calloc(n,s) mm_calloc(&g_dtcm_heap, n,s)
#define dtcm_free(p) mm_free(&g_dtcm_heap, p)
#define dtcm_realloc(p,s) mm_realloc(&g_dtcm_heap, p, s)
#define dtcm_memalign(a,s) mm_memalign(&g_dtcm_heap, a, s)
#define dtcm_malloc(s) mm_malloc(g_dtcm_heap, s)
#define dtcm_zalloc(s) mm_zalloc(g_dtcm_heap, s)
#define dtcm_calloc(n,s) mm_calloc(g_dtcm_heap, n,s)
#define dtcm_free(p) mm_free(g_dtcm_heap, p)
#define dtcm_realloc(p,s) mm_realloc(g_dtcm_heap, p, s)
#define dtcm_memalign(a,s) mm_memalign(g_dtcm_heap, a, s)
/****************************************************************************
* Public Types
@ -107,7 +107,7 @@ extern "C"
#define EXTERN extern
#endif
EXTERN struct mm_heap_s g_dtcm_heap;
EXTERN FAR struct mm_heap_s *g_dtcm_heap;
/****************************************************************************
* Public Function Prototypes

View File

@ -40,7 +40,7 @@
* Public Data
****************************************************************************/
struct mm_heap_s g_dtcm_heap;
FAR struct mm_heap_s *g_dtcm_heap;
/****************************************************************************
* Private Functions

View File

@ -61,25 +61,25 @@
*/
#define dtcm_initialize() \
mm_initialize(&g_dtcm_heap, "dtcm", (FAR void *)DTCM_START, DTCM_END-DTCM_START)
g_dtcm_heap = mm_initialize("dtcm", (FAR void *)DTCM_START, DTCM_END-DTCM_START)
/* The dtcm_addregion interface could be used if, for example, you want to
* add some other memory region to the DTCM heap. I don't really know why
* you might want to do that, but the functionality is essentially free.
*/
#define dtcm_addregion(b,s) mm_addregion(&g_dtcm_heap, b, s);
#define dtcm_addregion(b,s) mm_addregion(g_dtcm_heap, b, s);
/* Then, once g_dtcm_heap has been setup by dtcm_initialize(), these memory
* allocators can be used just like the standard memory allocators.
*/
#define dtcm_malloc(s) mm_malloc(&g_dtcm_heap, s)
#define dtcm_zalloc(s) mm_zalloc(&g_dtcm_heap, s)
#define dtcm_calloc(n,s) mm_calloc(&g_dtcm_heap, n,s)
#define dtcm_free(p) mm_free(&g_dtcm_heap, p)
#define dtcm_realloc(p,s) mm_realloc(&g_dtcm_heap, p, s)
#define dtcm_memalign(a,s) mm_memalign(&g_dtcm_heap, a, s)
#define dtcm_malloc(s) mm_malloc(g_dtcm_heap, s)
#define dtcm_zalloc(s) mm_zalloc(g_dtcm_heap, s)
#define dtcm_calloc(n,s) mm_calloc(g_dtcm_heap, n,s)
#define dtcm_free(p) mm_free(g_dtcm_heap, p)
#define dtcm_realloc(p,s) mm_realloc(g_dtcm_heap, p, s)
#define dtcm_memalign(a,s) mm_memalign(g_dtcm_heap, a, s)
/****************************************************************************
* Public Types
@ -99,7 +99,7 @@ extern "C"
#define EXTERN extern
#endif
EXTERN struct mm_heap_s g_dtcm_heap;
EXTERN FAR struct mm_heap_s *g_dtcm_heap;
/****************************************************************************
* Public Function Prototypes

View File

@ -34,7 +34,7 @@
* Private Data
****************************************************************************/
static struct mm_heap_s g_rtcheap;
static FAR struct mm_heap_s *g_rtcheap;
/****************************************************************************
* Public Functions
@ -62,7 +62,7 @@ void esp32c3_rtcheap_initialize(void)
start = (FAR void *)&_srtcheap;
size = (size_t)((uintptr_t)&_ertcheap - (uintptr_t)&_srtcheap);
mm_initialize(&g_rtcheap, "rtcheap", start, size);
g_rtcheap = mm_initialize("rtcheap", start, size);
}
/****************************************************************************
@ -75,7 +75,7 @@ void esp32c3_rtcheap_initialize(void)
void *esp32c3_rtcheap_malloc(size_t size)
{
return mm_malloc(&g_rtcheap, size);
return mm_malloc(g_rtcheap, size);
}
/****************************************************************************
@ -89,7 +89,7 @@ void *esp32c3_rtcheap_malloc(size_t size)
void *esp32c3_rtcheap_calloc(size_t n, size_t elem_size)
{
return mm_calloc(&g_rtcheap, n, elem_size);
return mm_calloc(g_rtcheap, n, elem_size);
}
/****************************************************************************
@ -102,7 +102,7 @@ void *esp32c3_rtcheap_calloc(size_t n, size_t elem_size)
void *esp32c3_rtcheap_realloc(void *ptr, size_t size)
{
return mm_realloc(&g_rtcheap, ptr, size);
return mm_realloc(g_rtcheap, ptr, size);
}
/****************************************************************************
@ -115,7 +115,7 @@ void *esp32c3_rtcheap_realloc(void *ptr, size_t size)
void *esp32c3_rtcheap_zalloc(size_t size)
{
return mm_zalloc(&g_rtcheap, size);
return mm_zalloc(g_rtcheap, size);
}
/****************************************************************************
@ -128,7 +128,7 @@ void *esp32c3_rtcheap_zalloc(size_t size)
void esp32c3_rtcheap_free(FAR void *mem)
{
mm_free(&g_rtcheap, mem);
mm_free(g_rtcheap, mem);
}
/****************************************************************************
@ -146,7 +146,7 @@ void esp32c3_rtcheap_free(FAR void *mem)
void *esp32c3_rtcheap_memalign(size_t alignment, size_t size)
{
return mm_memalign(&g_rtcheap, alignment, size);
return mm_memalign(g_rtcheap, alignment, size);
}
/****************************************************************************
@ -165,7 +165,7 @@ void *esp32c3_rtcheap_memalign(size_t alignment, size_t size)
bool esp32c3_rtcheap_heapmember(FAR void *mem)
{
return mm_heapmember(&g_rtcheap, mem);
return mm_heapmember(g_rtcheap, mem);
}
/****************************************************************************
@ -179,5 +179,5 @@ bool esp32c3_rtcheap_heapmember(FAR void *mem)
int esp32c3_rtcheap_mallinfo(FAR struct mallinfo *info)
{
return mm_mallinfo(&g_rtcheap, info);
return mm_mallinfo(g_rtcheap, info);
}

View File

@ -2429,16 +2429,9 @@ static int32_t esp_event_post_wrap(esp_event_base_t event_base,
uint32_t esp_get_free_heap_size(void)
{
int ret;
struct mallinfo info;
ret = mm_mallinfo(&g_mmheap, &info);
if (ret)
{
wlerr("ERROR: Failed to create task\n");
return 0;
}
info = kmm_mallinfo();
return info.fordblks;
}

View File

@ -48,7 +48,7 @@ struct mm_delaynode_s
FAR struct mm_delaynode_s *flink;
};
struct mm_heap_impl_s
struct mm_heap_s
{
#ifdef CONFIG_SMP
struct mm_delaynode_s *mm_delaylist[CONFIG_SMP_NCPUS];
@ -75,8 +75,8 @@ static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
flags = enter_critical_section();
tmp->flink = heap->mm_impl->mm_delaylist[up_cpu_index()];
heap->mm_impl->mm_delaylist[up_cpu_index()] = tmp;
tmp->flink = heap->mm_delaylist[up_cpu_index()];
heap->mm_delaylist[up_cpu_index()] = tmp;
leave_critical_section(flags);
}
@ -93,8 +93,8 @@ static void mm_free_delaylist(FAR struct mm_heap_s *heap)
flags = enter_critical_section();
tmp = heap->mm_impl->mm_delaylist[up_cpu_index()];
heap->mm_impl->mm_delaylist[up_cpu_index()] = NULL;
tmp = heap->mm_delaylist[up_cpu_index()];
heap->mm_delaylist[up_cpu_index()] = NULL;
leave_critical_section(flags);
@ -141,23 +141,24 @@ static void mm_free_delaylist(FAR struct mm_heap_s *heap)
*
****************************************************************************/
void mm_initialize(FAR struct mm_heap_s *heap, FAR const char *name,
FAR void *heap_start, size_t heap_size)
FAR struct mm_heap_s *mm_initialize(FAR const char *name,
FAR void *heap_start, size_t heap_size)
{
FAR struct mm_heap_impl_s *impl;
FAR struct mm_heap_s *heap;
impl = host_memalign(sizeof(FAR void *), sizeof(*impl));
DEBUGASSERT(impl);
heap = host_memalign(sizeof(FAR void *), sizeof(*heap));
DEBUGASSERT(heap);
memset(impl, 0, sizeof(struct mm_heap_impl_s));
heap->mm_impl = impl;
memset(heap, 0, sizeof(struct mm_heap_s));
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
impl->mm_procfs.name = name;
impl->mm_procfs.mallinfo = (FAR void *)mm_mallinfo;
impl->mm_procfs.user_data = heap;
procfs_register_meminfo(&impl->mm_procfs);
heap->mm_procfs.name = name;
heap->mm_procfs.mallinfo = (FAR void *)mm_mallinfo;
heap->mm_procfs.user_data = heap;
procfs_register_meminfo(&heap->mm_procfs);
#endif
return heap;
}
/****************************************************************************

View File

@ -41,7 +41,7 @@
* Private Data
****************************************************************************/
struct mm_heap_s g_iheap;
FAR struct mm_heap_s *g_iheap;
/****************************************************************************
* Public Functions
@ -62,7 +62,7 @@ void xtensa_imm_initialize(void)
start = (FAR void *)ESP32_IMEM_START;
size = CONFIG_XTENSA_IMEM_REGION_SIZE;
mm_initialize(&g_iheap, "esp32-imem", start, size);
g_iheap = mm_initialize("esp32-imem", start, size);
}
/****************************************************************************
@ -75,7 +75,7 @@ void xtensa_imm_initialize(void)
void *xtensa_imm_malloc(size_t size)
{
return mm_malloc(&g_iheap, size);
return mm_malloc(g_iheap, size);
}
/****************************************************************************
@ -89,7 +89,7 @@ void *xtensa_imm_malloc(size_t size)
void *xtensa_imm_calloc(size_t n, size_t elem_size)
{
return mm_calloc(&g_iheap, n, elem_size);
return mm_calloc(g_iheap, n, elem_size);
}
/****************************************************************************
@ -102,7 +102,7 @@ void *xtensa_imm_calloc(size_t n, size_t elem_size)
void *xtensa_imm_realloc(void *ptr, size_t size)
{
return mm_realloc(&g_iheap, ptr, size);
return mm_realloc(g_iheap, ptr, size);
}
/****************************************************************************
@ -115,7 +115,7 @@ void *xtensa_imm_realloc(void *ptr, size_t size)
void *xtensa_imm_zalloc(size_t size)
{
return mm_zalloc(&g_iheap, size);
return mm_zalloc(g_iheap, size);
}
/****************************************************************************
@ -128,7 +128,7 @@ void *xtensa_imm_zalloc(size_t size)
void xtensa_imm_free(FAR void *mem)
{
mm_free(&g_iheap, mem);
mm_free(g_iheap, mem);
}
/****************************************************************************
@ -146,7 +146,7 @@ void xtensa_imm_free(FAR void *mem)
void *xtensa_imm_memalign(size_t alignment, size_t size)
{
return mm_memalign(&g_iheap, alignment, size);
return mm_memalign(g_iheap, alignment, size);
}
/****************************************************************************
@ -165,7 +165,7 @@ void *xtensa_imm_memalign(size_t alignment, size_t size)
bool xtensa_imm_heapmember(FAR void *mem)
{
return mm_heapmember(&g_iheap, mem);
return mm_heapmember(g_iheap, mem);
}
/****************************************************************************
@ -179,7 +179,7 @@ bool xtensa_imm_heapmember(FAR void *mem)
int xtensa_imm_mallinfo(FAR struct mallinfo *info)
{
return mm_mallinfo(&g_iheap, info);
return mm_mallinfo(g_iheap, info);
}
#endif /* CONFIG_XTENSA_IMEM_USE_SEPARATE_HEAP */

View File

@ -34,7 +34,7 @@
* Private Data
****************************************************************************/
static struct mm_heap_s g_iramheap;
static struct mm_heap_s *g_iramheap;
/****************************************************************************
* Public Functions
@ -60,7 +60,7 @@ void esp32_iramheap_initialize(void)
start = (void *)&_siramheap;
size = (size_t)((uintptr_t)&_eiramheap - (uintptr_t)&_siramheap);
mm_initialize(&g_iramheap, "iramheap", start, size);
g_iramheap = mm_initialize("iramheap", start, size);
}
/****************************************************************************
@ -73,7 +73,7 @@ void esp32_iramheap_initialize(void)
void *esp32_iramheap_malloc(size_t size)
{
return mm_malloc(&g_iramheap, size);
return mm_malloc(g_iramheap, size);
}
/****************************************************************************
@ -87,7 +87,7 @@ void *esp32_iramheap_malloc(size_t size)
void *esp32_iramheap_calloc(size_t n, size_t elem_size)
{
return mm_calloc(&g_iramheap, n, elem_size);
return mm_calloc(g_iramheap, n, elem_size);
}
/****************************************************************************
@ -100,7 +100,7 @@ void *esp32_iramheap_calloc(size_t n, size_t elem_size)
void *esp32_iramheap_realloc(void *ptr, size_t size)
{
return mm_realloc(&g_iramheap, ptr, size);
return mm_realloc(g_iramheap, ptr, size);
}
/****************************************************************************
@ -113,7 +113,7 @@ void *esp32_iramheap_realloc(void *ptr, size_t size)
void *esp32_iramheap_zalloc(size_t size)
{
return mm_zalloc(&g_iramheap, size);
return mm_zalloc(g_iramheap, size);
}
/****************************************************************************
@ -126,7 +126,7 @@ void *esp32_iramheap_zalloc(size_t size)
void esp32_iramheap_free(void *mem)
{
mm_free(&g_iramheap, mem);
mm_free(g_iramheap, mem);
}
/****************************************************************************
@ -144,7 +144,7 @@ void esp32_iramheap_free(void *mem)
void *esp32_iramheap_memalign(size_t alignment, size_t size)
{
return mm_memalign(&g_iramheap, alignment, size);
return mm_memalign(g_iramheap, alignment, size);
}
/****************************************************************************
@ -163,7 +163,7 @@ void *esp32_iramheap_memalign(size_t alignment, size_t size)
bool esp32_iramheap_heapmember(void *mem)
{
return mm_heapmember(&g_iramheap, mem);
return mm_heapmember(g_iramheap, mem);
}
/****************************************************************************
@ -177,5 +177,5 @@ bool esp32_iramheap_heapmember(void *mem)
int esp32_iramheap_mallinfo(struct mallinfo *info)
{
return mm_mallinfo(&g_iramheap, info);
return mm_mallinfo(g_iramheap, info);
}

View File

@ -34,7 +34,7 @@
* Private Data
****************************************************************************/
static struct mm_heap_s g_rtcheap;
static struct mm_heap_s *g_rtcheap;
/****************************************************************************
* Public Functions
@ -60,7 +60,7 @@ void esp32_rtcheap_initialize(void)
start = (void *)&_srtcheap;
size = (size_t)((uintptr_t)&_ertcheap - (uintptr_t)&_srtcheap);
mm_initialize(&g_rtcheap, "rtcheap", start, size);
g_rtcheap = mm_initialize("rtcheap", start, size);
}
/****************************************************************************
@ -76,7 +76,7 @@ void esp32_rtcheap_initialize(void)
void *esp32_rtcheap_malloc(size_t size)
{
return mm_malloc(&g_rtcheap, size);
return mm_malloc(g_rtcheap, size);
}
/****************************************************************************
@ -90,7 +90,7 @@ void *esp32_rtcheap_malloc(size_t size)
void *esp32_rtcheap_calloc(size_t n, size_t elem_size)
{
return mm_calloc(&g_rtcheap, n, elem_size);
return mm_calloc(g_rtcheap, n, elem_size);
}
/****************************************************************************
@ -103,7 +103,7 @@ void *esp32_rtcheap_calloc(size_t n, size_t elem_size)
void *esp32_rtcheap_realloc(void *ptr, size_t size)
{
return mm_realloc(&g_rtcheap, ptr, size);
return mm_realloc(g_rtcheap, ptr, size);
}
/****************************************************************************
@ -116,7 +116,7 @@ void *esp32_rtcheap_realloc(void *ptr, size_t size)
void *esp32_rtcheap_zalloc(size_t size)
{
return mm_zalloc(&g_rtcheap, size);
return mm_zalloc(g_rtcheap, size);
}
/****************************************************************************
@ -129,7 +129,7 @@ void *esp32_rtcheap_zalloc(size_t size)
void esp32_rtcheap_free(void *mem)
{
mm_free(&g_rtcheap, mem);
mm_free(g_rtcheap, mem);
}
/****************************************************************************
@ -151,7 +151,7 @@ void esp32_rtcheap_free(void *mem)
void *esp32_rtcheap_memalign(size_t alignment, size_t size)
{
return mm_memalign(&g_rtcheap, alignment, size);
return mm_memalign(g_rtcheap, alignment, size);
}
/****************************************************************************
@ -170,7 +170,7 @@ void *esp32_rtcheap_memalign(size_t alignment, size_t size)
bool esp32_rtcheap_heapmember(void *mem)
{
return mm_heapmember(&g_rtcheap, mem);
return mm_heapmember(g_rtcheap, mem);
}
/****************************************************************************
@ -187,5 +187,5 @@ bool esp32_rtcheap_heapmember(void *mem)
int esp32_rtcheap_mallinfo(struct mallinfo *info)
{
return mm_mallinfo(&g_rtcheap, info);
return mm_mallinfo(g_rtcheap, info);
}

View File

@ -2310,16 +2310,9 @@ int32_t esp_event_post(esp_event_base_t event_base,
uint32_t esp_get_free_heap_size(void)
{
int ret;
struct mallinfo info;
ret = mm_mallinfo(&g_mmheap, &info);
if (ret)
{
wlerr("Failed to create task\n");
return 0;
}
info = kmm_mallinfo();
return info.fordblks;
}

View File

@ -250,7 +250,7 @@ typedef CODE void (*addrenv_sigtramp_t)(_sa_sigaction_t sighand, int signo,
struct addrenv_reserve_s
{
addrenv_sigtramp_t ar_sigtramp; /* Signal trampoline */
struct mm_heap_s ar_usrheap; /* User space heap structure */
struct mm_heap_s *ar_usrheap; /* User space heap structure */
};
/* Each instance of this structure resides at the beginning of the user-

View File

@ -96,13 +96,7 @@
* Public Types
****************************************************************************/
struct mm_heap_impl_s; /* Forward reference */
struct mm_heap_s
{
struct mm_heap_impl_s *mm_impl;
};
#define MM_IS_VALID(heap) ((heap) != NULL && (heap)->mm_impl != NULL)
struct mm_heap_s; /* Forward reference */
/****************************************************************************
* Public Data
@ -143,13 +137,13 @@ extern "C"
#if defined(CONFIG_BUILD_FLAT) || !defined(__KERNEL__)
/* Otherwise, the user heap data structures are in common .bss */
EXTERN struct mm_heap_s g_mmheap;
EXTERN FAR struct mm_heap_s *g_mmheap;
#endif
#ifdef CONFIG_MM_KERNEL_HEAP
/* This is the kernel heap */
EXTERN struct mm_heap_s g_kmmheap;
EXTERN FAR struct mm_heap_s *g_kmmheap;
#endif
/****************************************************************************
@ -158,8 +152,8 @@ EXTERN struct mm_heap_s g_kmmheap;
/* Functions contained in mm_initialize.c ***********************************/
void mm_initialize(FAR struct mm_heap_s *heap, FAR const char *name,
FAR void *heap_start, size_t heap_size);
FAR struct mm_heap_s *mm_initialize(FAR const char *name,
FAR void *heap_start, size_t heap_size);
void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
size_t heapsize);

View File

@ -98,7 +98,7 @@ struct userspace_s
/* Memory manager heap structure */
FAR struct mm_heap_s *us_heap;
FAR struct mm_heap_s **us_heap;
/* Task startup routine */

View File

@ -45,11 +45,11 @@ This directory contains the NuttX memory management logic. This include:
in memory:
include <nuttx/mm/mm.h>
static struct mm_heap_s g_myheap;
static struct mm_heap_s *g_myheap;
Then initialize the heap using:
mm_initialize(&g_myheap, myheap_start, myheap_size);
g_myheap = mm_initialize(myheap_start, myheap_size);
Where mm_initialize() and all related interfaces are prototyped in the
header file include/nuttx/mm/mm.h.

View File

@ -49,7 +49,7 @@
void kmm_addregion(FAR void *heap_start, size_t heap_size)
{
return mm_addregion(&g_kmmheap, heap_start, heap_size);
mm_addregion(g_kmmheap, heap_start, heap_size);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -44,7 +44,7 @@
FAR void *kmm_brkaddr(int region)
{
return mm_brkaddr(&g_kmmheap, region);
return mm_brkaddr(g_kmmheap, region);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -42,7 +42,7 @@
FAR void *kmm_calloc(size_t n, size_t elem_size)
{
return mm_calloc(&g_kmmheap, n, elem_size);
return mm_calloc(g_kmmheap, n, elem_size);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -42,7 +42,7 @@
void kmm_checkcorruption(void)
{
mm_checkcorruption(&g_kmmheap);
mm_checkcorruption(g_kmmheap);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -43,7 +43,7 @@
void kmm_extend(FAR void *mem, size_t size, int region)
{
mm_extend(&g_kmmheap, mem, size, region);
mm_extend(g_kmmheap, mem, size, region);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -53,7 +53,7 @@
void kmm_free(FAR void *mem)
{
DEBUGASSERT(kmm_heapmember(mem));
mm_free(&g_kmmheap, mem);
mm_free(g_kmmheap, mem);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -52,7 +52,7 @@
bool kmm_heapmember(FAR void *mem)
{
return mm_heapmember(&g_kmmheap, mem);
return mm_heapmember(g_kmmheap, mem);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -34,7 +34,7 @@
/* This is the kernel heap */
struct mm_heap_s g_kmmheap;
FAR struct mm_heap_s *g_kmmheap;
/****************************************************************************
* Public Functions
@ -58,7 +58,7 @@ struct mm_heap_s g_kmmheap;
void kmm_initialize(FAR void *heap_start, size_t heap_size)
{
mm_initialize(&g_kmmheap, "Kmem", heap_start, heap_size);
g_kmmheap = mm_initialize("Kmem", heap_start, heap_size);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -46,7 +46,7 @@
struct mallinfo kmm_mallinfo(void)
{
struct mallinfo info;
mm_mallinfo(&g_kmmheap, &info);
mm_mallinfo(g_kmmheap, &info);
return info;
}

View File

@ -48,7 +48,7 @@
FAR void *kmm_malloc(size_t size)
{
return mm_malloc(&g_kmmheap, size);
return mm_malloc(g_kmmheap, size);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -51,7 +51,7 @@
FAR void *kmm_memalign(size_t alignment, size_t size)
{
return mm_memalign(&g_kmmheap, alignment, size);
return mm_memalign(g_kmmheap, alignment, size);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -49,7 +49,7 @@
FAR void *kmm_realloc(FAR void *oldmem, size_t newsize)
{
return mm_realloc(&g_kmmheap, oldmem, newsize);
return mm_realloc(g_kmmheap, oldmem, newsize);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -48,7 +48,7 @@
FAR void *kmm_zalloc(size_t size)
{
return mm_zalloc(&g_kmmheap, size);
return mm_zalloc(g_kmmheap, size);
}
#endif /* CONFIG_MM_KERNEL_HEAP */

View File

@ -113,8 +113,6 @@
* Public Types
****************************************************************************/
struct mm_heap_s;
/* Determines the size of the chunk size/offset type */
#ifdef CONFIG_MM_SMALL
@ -172,7 +170,7 @@ struct mm_delaynode_s
/* This describes one heap (possibly with multiple regions) */
struct mm_heap_impl_s
struct mm_heap_s
{
/* Mutually exclusive access to this data set is enforced with
* the following un-named semaphore.

View File

@ -46,25 +46,21 @@
void mm_addfreechunk(FAR struct mm_heap_s *heap,
FAR struct mm_freenode_s *node)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_freenode_s *next;
FAR struct mm_freenode_s *prev;
int ndx;
DEBUGASSERT(MM_IS_VALID(heap));
DEBUGASSERT(node->size >= SIZEOF_MM_FREENODE);
DEBUGASSERT((node->preceding & MM_ALLOC_BIT) == 0);
heap_impl = heap->mm_impl;
/* Convert the size to a nodelist index */
ndx = mm_size2ndx(node->size);
/* Now put the new node into the next */
for (prev = &heap_impl->mm_nodelist[ndx],
next = heap_impl->mm_nodelist[ndx].flink;
for (prev = &heap->mm_nodelist[ndx],
next = heap->mm_nodelist[ndx].flink;
next && next->size && next->size < node->size;
prev = next, next = next->flink);

View File

@ -45,18 +45,14 @@
FAR void *mm_brkaddr(FAR struct mm_heap_s *heap, int region)
{
FAR struct mm_heap_impl_s *heap_impl;
uintptr_t brkaddr;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
#if CONFIG_MM_REGIONS > 1
DEBUGASSERT(heap && region < heap_impl->mm_nregions);
DEBUGASSERT(heap && region < heap->mm_nregions);
#else
DEBUGASSERT(heap && region == 0);
#endif
brkaddr = (uintptr_t)heap_impl->mm_heapend[region];
brkaddr = (uintptr_t)heap->mm_heapend[region];
return brkaddr ? (FAR void *)(brkaddr + SIZEOF_MM_ALLOCNODE) : 0;
}

View File

@ -47,13 +47,9 @@
void mm_checkcorruption(FAR struct mm_heap_s *heap)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_allocnode_s *node;
FAR struct mm_allocnode_s *prev;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
#if CONFIG_MM_REGIONS > 1
int region;
#else
@ -63,7 +59,7 @@ void mm_checkcorruption(FAR struct mm_heap_s *heap)
/* Visit each region */
#if CONFIG_MM_REGIONS > 1
for (region = 0; region < heap_impl->mm_nregions; region++)
for (region = 0; region < heap->mm_nregions; region++)
#endif
{
prev = NULL;
@ -88,8 +84,8 @@ void mm_checkcorruption(FAR struct mm_heap_s *heap)
mm_takesemaphore(heap);
}
for (node = heap_impl->mm_heapstart[region];
node < heap_impl->mm_heapend[region];
for (node = heap->mm_heapstart[region];
node < heap->mm_heapend[region];
node = (FAR struct mm_allocnode_s *)
((FAR char *)node + node->size))
{
@ -116,7 +112,7 @@ void mm_checkcorruption(FAR struct mm_heap_s *heap)
prev = node;
}
assert(node == heap_impl->mm_heapend[region]);
assert(node == heap->mm_heapend[region]);
mm_givesemaphore(heap);
}

View File

@ -52,21 +52,17 @@
void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
int region)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_allocnode_s *oldnode;
FAR struct mm_allocnode_s *newnode;
uintptr_t blockstart;
uintptr_t blockend;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Make sure that we were passed valid parameters */
DEBUGASSERT(heap && mem);
#if CONFIG_MM_REGIONS > 1
DEBUGASSERT(size >= MIN_EXTEND &&
(size_t)region < (size_t)heap_impl->mm_nregions);
(size_t)region < (size_t)heap->mm_nregions);
#else
DEBUGASSERT(size >= MIN_EXTEND && region == 0);
#endif
@ -87,7 +83,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
* immediately follow this node.
*/
oldnode = heap_impl->mm_heapend[region];
oldnode = heap->mm_heapend[region];
DEBUGASSERT((uintptr_t)oldnode + SIZEOF_MM_ALLOCNODE == (uintptr_t)mem);
/* The size of the old node now extends to the new terminal node.
@ -109,7 +105,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
newnode->size = SIZEOF_MM_ALLOCNODE;
newnode->preceding = oldnode->size | MM_ALLOC_BIT;
heap_impl->mm_heapend[region] = newnode;
heap->mm_heapend[region] = newnode;
mm_givesemaphore(heap);
/* Finally "free" the new block of memory where the old terminal node was

View File

@ -39,19 +39,15 @@
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_delaynode_s *tmp = mem;
irqstate_t flags;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Delay the deallocation until a more appropriate time. */
flags = enter_critical_section();
tmp->flink = heap_impl->mm_delaylist[up_cpu_index()];
heap_impl->mm_delaylist[up_cpu_index()] = tmp;
tmp->flink = heap->mm_delaylist[up_cpu_index()];
heap->mm_delaylist[up_cpu_index()] = tmp;
leave_critical_section(flags);
}

View File

@ -54,11 +54,6 @@
bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
{
FAR struct mm_heap_impl_s *heap_impl;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
#if CONFIG_MM_REGIONS > 1
int i;
@ -66,10 +61,10 @@ bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
* between the region's two guard nodes.
*/
for (i = 0; i < heap_impl->mm_nregions; i++)
for (i = 0; i < heap->mm_nregions; i++)
{
if (mem > (FAR void *)heap_impl->mm_heapstart[i] &&
mem < (FAR void *)heap_impl->mm_heapend[i])
if (mem > (FAR void *)heap->mm_heapstart[i] &&
mem < (FAR void *)heap->mm_heapend[i])
{
return true;
}
@ -84,8 +79,8 @@ bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
* two guard nodes.
*/
if (mem > (FAR void *)heap_impl->mm_heapstart[0] &&
mem < (FAR void *)heap_impl->mm_heapend[0])
if (mem > (FAR void *)heap->mm_heapstart[0] &&
mem < (FAR void *)heap->mm_heapend[0])
{
return true;
}

View File

@ -57,16 +57,13 @@
void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
size_t heapsize)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_freenode_s *node;
uintptr_t heapbase;
uintptr_t heapend;
#if CONFIG_MM_REGIONS > 1
int IDX;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
IDX = heap_impl->mm_nregions;
IDX = heap->mm_nregions;
/* Writing past CONFIG_MM_REGIONS would have catastrophic consequences */
@ -78,9 +75,6 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
#else
# define IDX 0
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
#endif
#if defined(CONFIG_MM_SMALL) && !defined(CONFIG_SMALL_MEMORY)
@ -106,7 +100,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
/* Add the size of this region to the total size of the heap */
heap_impl->mm_heapsize += heapsize;
heap->mm_heapsize += heapsize;
/* Create two "allocated" guard nodes at the beginning and end of
* the heap. These only serve to keep us from allocating outside
@ -116,23 +110,23 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
* all available memory.
*/
heap_impl->mm_heapstart[IDX] = (FAR struct mm_allocnode_s *)
heapbase;
heap_impl->mm_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE;
heap_impl->mm_heapstart[IDX]->preceding = MM_ALLOC_BIT;
node = (FAR struct mm_freenode_s *)
(heapbase + SIZEOF_MM_ALLOCNODE);
node->size = heapsize - 2*SIZEOF_MM_ALLOCNODE;
node->preceding = SIZEOF_MM_ALLOCNODE;
heap_impl->mm_heapend[IDX] = (FAR struct mm_allocnode_s *)
(heapend - SIZEOF_MM_ALLOCNODE);
heap_impl->mm_heapend[IDX]->size = SIZEOF_MM_ALLOCNODE;
heap_impl->mm_heapend[IDX]->preceding = node->size | MM_ALLOC_BIT;
heap->mm_heapstart[IDX] = (FAR struct mm_allocnode_s *)
heapbase;
heap->mm_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE;
heap->mm_heapstart[IDX]->preceding = MM_ALLOC_BIT;
node = (FAR struct mm_freenode_s *)
(heapbase + SIZEOF_MM_ALLOCNODE);
node->size = heapsize - 2*SIZEOF_MM_ALLOCNODE;
node->preceding = SIZEOF_MM_ALLOCNODE;
heap->mm_heapend[IDX] = (FAR struct mm_allocnode_s *)
(heapend - SIZEOF_MM_ALLOCNODE);
heap->mm_heapend[IDX]->size = SIZEOF_MM_ALLOCNODE;
heap->mm_heapend[IDX]->preceding = node->size | MM_ALLOC_BIT;
#undef IDX
#if CONFIG_MM_REGIONS > 1
heap_impl->mm_nregions++;
heap->mm_nregions++;
#endif
/* Add the single, large free node to the nodelist */
@ -150,38 +144,38 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
* heap region.
*
* Input Parameters:
* name - The heap procfs name
* heap - The selected heap
* heapstart - Start of the initial heap region
* heapsize - Size of the initial heap region
*
* Returned Value:
* None
* Return the address of a new heap instance.
*
* Assumptions:
*
****************************************************************************/
void mm_initialize(FAR struct mm_heap_s *heap, FAR const char *name,
FAR void *heapstart, size_t heapsize)
FAR struct mm_heap_s *mm_initialize(FAR const char *name,
FAR void *heapstart, size_t heapsize)
{
FAR struct mm_heap_impl_s *heap_impl;
uintptr_t heap_adj;
int i;
FAR struct mm_heap_s *heap;
uintptr_t heap_adj;
int i;
minfo("Heap: name=%s, start=%p size=%zu\n", name, heapstart, heapsize);
/* First ensure the memory to be used is aligned */
heap_adj = MM_ALIGN_UP((uintptr_t) heapstart);
heapsize -= heap_adj - (uintptr_t) heapstart;
heap_adj = MM_ALIGN_UP((uintptr_t)heapstart);
heapsize -= heap_adj - (uintptr_t)heapstart;
/* Reserve a block space for mm_heap_impl_s context */
/* Reserve a block space for mm_heap_s context */
DEBUGASSERT(heapsize > sizeof(struct mm_heap_impl_s));
heap->mm_impl = (FAR struct mm_heap_impl_s *)heap_adj;
heap_impl = heap->mm_impl;
heapsize -= sizeof(struct mm_heap_impl_s);
heapstart = (FAR char *)heap_adj + sizeof(struct mm_heap_impl_s);
DEBUGASSERT(heapsize > sizeof(struct mm_heap_s));
heap = (FAR struct mm_heap_s *)heap_adj;
heapsize -= sizeof(struct mm_heap_s);
heapstart = (FAR char *)heap_adj + sizeof(struct mm_heap_s);
/* The following two lines have cause problems for some older ZiLog
* compilers in the past (but not the more recent). Life is easier if we
@ -197,14 +191,14 @@ void mm_initialize(FAR struct mm_heap_s *heap, FAR const char *name,
/* Set up global variables */
memset(heap_impl, 0, sizeof(struct mm_heap_impl_s));
memset(heap, 0, sizeof(struct mm_heap_s));
/* Initialize the node array */
for (i = 1; i < MM_NNODES; i++)
{
heap_impl->mm_nodelist[i - 1].flink = &heap_impl->mm_nodelist[i];
heap_impl->mm_nodelist[i].blink = &heap_impl->mm_nodelist[i - 1];
heap->mm_nodelist[i - 1].flink = &heap->mm_nodelist[i];
heap->mm_nodelist[i].blink = &heap->mm_nodelist[i - 1];
}
/* Initialize the malloc semaphore to one (to support one-at-
@ -219,10 +213,12 @@ void mm_initialize(FAR struct mm_heap_s *heap, FAR const char *name,
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MEMINFO)
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
heap_impl->mm_procfs.name = name;
heap_impl->mm_procfs.mallinfo = (FAR void *)mm_mallinfo;
heap_impl->mm_procfs.user_data = heap;
procfs_register_meminfo(&heap_impl->mm_procfs);
heap->mm_procfs.name = name;
heap->mm_procfs.mallinfo = (FAR void *)mm_mallinfo;
heap->mm_procfs.user_data = heap;
procfs_register_meminfo(&heap->mm_procfs);
#endif
#endif
return heap;
}

View File

@ -46,7 +46,6 @@
int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_allocnode_s *node;
FAR struct mm_allocnode_s *prev;
size_t mxordblk = 0;
@ -61,13 +60,11 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
#endif
DEBUGASSERT(info);
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Visit each region */
#if CONFIG_MM_REGIONS > 1
for (region = 0; region < heap_impl->mm_nregions; region++)
for (region = 0; region < heap->mm_nregions; region++)
#endif
{
prev = NULL;
@ -78,8 +75,8 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
mm_takesemaphore(heap);
for (node = heap_impl->mm_heapstart[region];
node < heap_impl->mm_heapend[region];
for (node = heap->mm_heapstart[region];
node < heap->mm_heapend[region];
node = (FAR struct mm_allocnode_s *)
((FAR char *)node + node->size))
{
@ -122,8 +119,8 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
}
minfo("region=%d node=%p heapend=%p\n",
region, node, heap_impl->mm_heapend[region]);
DEBUGASSERT(node == heap_impl->mm_heapend[region]);
region, node, heap->mm_heapend[region]);
DEBUGASSERT(node == heap->mm_heapend[region]);
mm_givesemaphore(heap);
@ -131,9 +128,9 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
}
#undef region
DEBUGASSERT(uordblks + fordblks == heap_impl->mm_heapsize);
DEBUGASSERT(uordblks + fordblks == heap->mm_heapsize);
info->arena = heap_impl->mm_heapsize;
info->arena = heap->mm_heapsize;
info->ordblks = ordblks;
info->aordblks = aordblks;
info->mxordblk = mxordblk;

View File

@ -48,19 +48,15 @@
static void mm_free_delaylist(FAR struct mm_heap_s *heap)
{
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_delaynode_s *tmp;
irqstate_t flags;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Move the delay list to local */
flags = enter_critical_section();
tmp = heap_impl->mm_delaylist[up_cpu_index()];
heap_impl->mm_delaylist[up_cpu_index()] = NULL;
tmp = heap->mm_delaylist[up_cpu_index()];
heap->mm_delaylist[up_cpu_index()] = NULL;
leave_critical_section(flags);
@ -101,15 +97,11 @@ static void mm_free_delaylist(FAR struct mm_heap_s *heap)
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
{
FAR struct mm_heap_impl_s *heap_impl;
FAR struct mm_freenode_s *node;
size_t alignsize;
FAR void *ret = NULL;
int ndx;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Firstly, free mm_delaylist */
mm_free_delaylist(heap);
@ -160,7 +152,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
* other mm_nodelist[] entries.
*/
for (node = heap_impl->mm_nodelist[ndx].flink;
for (node = heap->mm_nodelist[ndx].flink;
node && node->size < alignsize;
node = node->flink)
{

View File

@ -72,19 +72,14 @@
void mm_seminitialize(FAR struct mm_heap_s *heap)
{
FAR struct mm_heap_impl_s *heap_impl;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Initialize the MM semaphore to one (to support one-at-a-time access to
* private data sets).
*/
_SEM_INIT(&heap_impl->mm_semaphore, 0, 1);
_SEM_INIT(&heap->mm_semaphore, 0, 1);
heap_impl->mm_holder = NO_HOLDER;
heap_impl->mm_counts_held = 0;
heap->mm_holder = NO_HOLDER;
heap->mm_counts_held = 0;
}
/****************************************************************************
@ -100,7 +95,6 @@ void mm_seminitialize(FAR struct mm_heap_s *heap)
int mm_trysemaphore(FAR struct mm_heap_s *heap)
{
FAR struct mm_heap_impl_s *heap_impl;
pid_t my_pid = getpid();
int ret;
@ -133,9 +127,6 @@ int mm_trysemaphore(FAR struct mm_heap_s *heap)
* 'else', albeit with a nonsensical PID value.
*/
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
if (my_pid < 0)
{
ret = my_pid;
@ -146,20 +137,20 @@ int mm_trysemaphore(FAR struct mm_heap_s *heap)
* task actually running?
*/
if (heap_impl->mm_holder == my_pid)
if (heap->mm_holder == my_pid)
{
/* Yes, just increment the number of references held by the current
* task.
*/
heap_impl->mm_counts_held++;
heap->mm_counts_held++;
ret = OK;
}
else
{
/* Try to take the semaphore */
ret = _SEM_TRYWAIT(&heap_impl->mm_semaphore);
ret = _SEM_TRYWAIT(&heap->mm_semaphore);
if (ret < 0)
{
ret = _SEM_ERRVAL(ret);
@ -168,8 +159,8 @@ int mm_trysemaphore(FAR struct mm_heap_s *heap)
/* We have it. Claim the heap for the current task and return */
heap_impl->mm_holder = my_pid;
heap_impl->mm_counts_held = 1;
heap->mm_holder = my_pid;
heap->mm_counts_held = 1;
ret = OK;
}
@ -188,21 +179,17 @@ errout:
void mm_takesemaphore(FAR struct mm_heap_s *heap)
{
FAR struct mm_heap_impl_s *heap_impl;
pid_t my_pid = getpid();
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* Does the current task already hold the semaphore? */
if (heap_impl->mm_holder == my_pid)
if (heap->mm_holder == my_pid)
{
/* Yes, just increment the number of references held by the current
* task.
*/
heap_impl->mm_counts_held++;
heap->mm_counts_held++;
}
else
{
@ -213,7 +200,7 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
mseminfo("PID=%d taking\n", my_pid);
do
{
ret = _SEM_WAIT(&heap_impl->mm_semaphore);
ret = _SEM_WAIT(&heap->mm_semaphore);
/* The only case that an error should occur here is if the wait
* was awakened by a signal.
@ -231,12 +218,12 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
* the semaphore for the current task and return.
*/
heap_impl->mm_holder = my_pid;
heap_impl->mm_counts_held = 1;
heap->mm_holder = my_pid;
heap->mm_counts_held = 1;
}
mseminfo("Holder=%d count=%d\n", heap_impl->mm_holder,
heap_impl->mm_counts_held);
mseminfo("Holder=%d count=%d\n", heap->mm_holder,
heap->mm_counts_held);
}
/****************************************************************************
@ -249,26 +236,21 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
void mm_givesemaphore(FAR struct mm_heap_s *heap)
{
FAR struct mm_heap_impl_s *heap_impl;
DEBUGASSERT(MM_IS_VALID(heap));
heap_impl = heap->mm_impl;
/* The current task should be holding at least one reference to the
* semaphore.
*/
DEBUGASSERT(heap_impl->mm_holder == getpid());
DEBUGASSERT(heap->mm_holder == getpid());
/* Does the current task hold multiple references to the semaphore */
if (heap_impl->mm_counts_held > 1)
if (heap->mm_counts_held > 1)
{
/* Yes, just release one count and return */
heap_impl->mm_counts_held--;
mseminfo("Holder=%d count=%d\n", heap_impl->mm_holder,
heap_impl->mm_counts_held);
heap->mm_counts_held--;
mseminfo("Holder=%d count=%d\n", heap->mm_holder,
heap->mm_counts_held);
}
else
{
@ -276,8 +258,8 @@ void mm_givesemaphore(FAR struct mm_heap_s *heap)
mseminfo("PID=%d giving\n", getpid());
heap_impl->mm_holder = NO_HOLDER;
heap_impl->mm_counts_held = 0;
DEBUGVERIFY(_SEM_POST(&heap_impl->mm_semaphore));
heap->mm_holder = NO_HOLDER;
heap->mm_counts_held = 0;
DEBUGVERIFY(_SEM_POST(&heap->mm_semaphore));
}
}

View File

@ -49,7 +49,7 @@
#else
/* Otherwise, the user heap data structures are in common .bss */
struct mm_heap_s g_mmheap;
FAR struct mm_heap_s *g_mmheap;
#endif
/****************************************************************************

View File

@ -43,7 +43,7 @@
* ARCH_DATA_RESERVE_SIZE
*/
# define USR_HEAP (&ARCH_DATA_RESERVE->ar_usrheap)
# define USR_HEAP (ARCH_DATA_RESERVE->ar_usrheap)
#elif defined(CONFIG_BUILD_PROTECTED) && defined(__KERNEL__)
/* In the protected mode, there are two heaps: A kernel heap and a single
@ -51,12 +51,20 @@
* structure from the userspace interface.
*/
# define USR_HEAP (USERSPACE->us_heap)
# define USR_HEAP (*USERSPACE->us_heap)
#else
/* Otherwise, the user heap data structures are in common .bss */
# define USR_HEAP &g_mmheap
# define USR_HEAP g_mmheap
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
#ifdef CONFIG_BUILD_KERNEL
void umm_try_initialize(void);
#endif
#endif /* __MM_UMM_HEAP_UMM_HEAP_H */

View File

@ -81,5 +81,45 @@
void umm_initialize(FAR void *heap_start, size_t heap_size)
{
mm_initialize(USR_HEAP, "Umem", heap_start, heap_size);
USR_HEAP = mm_initialize("Umem", heap_start, heap_size);
}
/****************************************************************************
* Name: umm_try_initialize
*
* Description:
* Allocate and initialize the user heap if not yet.
*
* Input Parameters:
* None
*
* Returned Value:
* None
*
****************************************************************************/
#ifdef CONFIG_BUILD_KERNEL
void umm_try_initialize(void)
{
uintptr_t allocbase;
/* Return if the user heap is already initialized. */
if (USR_HEAP != NULL)
{
return;
}
/* Allocate one page. If we provide a zero brkaddr to pgalloc(),
* it will create the first block in the correct virtual address
* space and return the start address of that block.
*/
allocbase = pgalloc(0, 1);
DEBUGASSERT(allocbase != 0);
/* Let umm_initialize do the real work. */
umm_initialize((FAR void *)allocbase, CONFIG_MM_PGSIZE);
}
#endif

View File

@ -54,6 +54,10 @@ FAR void *memalign(size_t alignment, size_t size)
FAR void *brkaddr;
FAR void *mem;
/* Initialize the user heap if it wasn't yet */
umm_try_initialize();
/* Loop until we successfully allocate the memory or until an error
* occurs. If we fail to allocate memory on the first pass, then call
* sbrk to extend the heap by one page. This may require several

View File

@ -62,6 +62,10 @@ FAR void *realloc(FAR void *oldmem, size_t size)
return NULL;
}
/* Initialize the user heap if it wasn't yet */
umm_try_initialize();
/* Loop until we successfully allocate the memory or until an error
* occurs. If we fail to allocate memory on the first pass, then call
* sbrk to extend the heap by one page. This may require several

View File

@ -85,10 +85,11 @@ FAR void *sbrk(intptr_t incr)
goto errout;
}
/* Get the current break address (NOTE: assumes region 0). If
* the memory manager is uninitialized, mm_brkaddr() will return
* zero.
*/
/* Initialize the user heap if it wasn't yet */
umm_try_initialize();
/* Get the current break address (NOTE: assumes region 0). */
brkaddr = (uintptr_t)mm_brkaddr(USR_HEAP, 0);
if (incr > 0)
@ -98,9 +99,7 @@ FAR void *sbrk(intptr_t incr)
pgincr = MM_NPAGES(incr);
/* Allocate the requested number of pages and map them to the
* break address. If we provide a zero brkaddr to pgalloc(), it
* will create the first block in the correct virtual address
* space and return the start address of that block.
* break address.
*/
allocbase = pgalloc(brkaddr, pgincr);
@ -110,23 +109,10 @@ FAR void *sbrk(intptr_t incr)
goto errout;
}
/* Has the been been initialized? brkaddr will be zero if the
* memory manager has not yet been initialized.
*/
/* Extend the heap (region 0) */
bytesize = pgincr << MM_PGSHIFT;
if (brkaddr == 0)
{
/* No... then initialize it now */
mm_initialize(USR_HEAP, "Umem", (FAR void *)allocbase, bytesize);
}
else
{
/* Extend the heap (region 0) */
mm_extend(USR_HEAP, (FAR void *)allocbase, bytesize, 0);
}
mm_extend(USR_HEAP, (FAR void *)allocbase, bytesize, 0);
}
return (FAR void *)brkaddr;