mm: Move mm_heap_s related stuff to private header file
since it will improve the modularity and reduce the inforamtion explosion Change-Id: I085b36adb38993a979625a1f4c252d364a15afa1
This commit is contained in:
parent
2e887a1c22
commit
635cfadc25
@ -94,171 +94,18 @@
|
||||
# undef CONFIG_MM_KERNEL_HEAP
|
||||
#endif
|
||||
|
||||
/* Chunk Header Definitions *************************************************/
|
||||
|
||||
/* These definitions define the characteristics of allocator
|
||||
*
|
||||
* MM_MIN_SHIFT is used to define MM_MIN_CHUNK.
|
||||
* MM_MIN_CHUNK - is the smallest physical chunk that can be allocated. It
|
||||
* must be at least a large as sizeof(struct mm_freenode_s). Larger values
|
||||
* may improve performance slightly, but will waste memory due to
|
||||
* quantization losses.
|
||||
*
|
||||
* MM_MAX_SHIFT is used to define MM_MAX_CHUNK
|
||||
* MM_MAX_CHUNK is the largest, contiguous chunk of memory that can be
|
||||
* allocated. It can range from 16-bytes to 4Gb. Larger values of
|
||||
* MM_MAX_SHIFT can cause larger data structure sizes and, perhaps,
|
||||
* minor performance losses.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_MM_SMALL) && UINTPTR_MAX <= UINT32_MAX
|
||||
/* Two byte offsets; Pointers may be 2 or 4 bytes;
|
||||
* sizeof(struct mm_freenode_s) is 8 or 12 bytes.
|
||||
* REVISIT: We could do better on machines with 16-bit addressing.
|
||||
*/
|
||||
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 4) /* 16 bytes */
|
||||
# define MM_MAX_SHIFT B2C_SHIFT(15) /* 32 Kb */
|
||||
|
||||
#elif defined(CONFIG_HAVE_LONG_LONG)
|
||||
/* Four byte offsets; Pointers may be 4 or 8 bytes
|
||||
* sizeof(struct mm_freenode_s) is 16 or 24 bytes.
|
||||
*/
|
||||
|
||||
# if UINTPTR_MAX <= UINT32_MAX
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 4) /* 16 bytes */
|
||||
# elif UINTPTR_MAX <= UINT64_MAX
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 5) /* 32 bytes */
|
||||
# endif
|
||||
# define MM_MAX_SHIFT B2C_SHIFT(22) /* 4 Mb */
|
||||
|
||||
#else
|
||||
/* Four byte offsets; Pointers must be 4 bytes.
|
||||
* sizeof(struct mm_freenode_s) is 16 bytes.
|
||||
*/
|
||||
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 4) /* 16 bytes */
|
||||
# define MM_MAX_SHIFT B2C_SHIFT(22) /* 4 Mb */
|
||||
#endif
|
||||
|
||||
/* All other definitions derive from these two */
|
||||
|
||||
#define MM_MIN_CHUNK (1 << MM_MIN_SHIFT)
|
||||
#define MM_MAX_CHUNK (1 << MM_MAX_SHIFT)
|
||||
#define MM_NNODES (MM_MAX_SHIFT - MM_MIN_SHIFT + 1)
|
||||
|
||||
#define MM_GRAN_MASK (MM_MIN_CHUNK-1)
|
||||
#define MM_ALIGN_UP(a) (((a) + MM_GRAN_MASK) & ~MM_GRAN_MASK)
|
||||
#define MM_ALIGN_DOWN(a) ((a) & ~MM_GRAN_MASK)
|
||||
|
||||
/* An allocated chunk is distinguished from a free chunk by bit 31 (or 15)
|
||||
* of the 'preceding' chunk size. If set, then this is an allocated chunk.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MM_SMALL
|
||||
# define MM_ALLOC_BIT 0x8000
|
||||
#else
|
||||
# define MM_ALLOC_BIT 0x80000000
|
||||
#endif
|
||||
#define MM_IS_ALLOCATED(n) \
|
||||
((int)((struct mm_allocnode_s*)(n)->preceding) < 0)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
****************************************************************************/
|
||||
|
||||
/* Determines the size of the chunk size/offset type */
|
||||
|
||||
#ifdef CONFIG_MM_SMALL
|
||||
typedef uint16_t mmsize_t;
|
||||
# define MMSIZE_MAX UINT16_MAX
|
||||
#else
|
||||
typedef uint32_t mmsize_t;
|
||||
# define MMSIZE_MAX UINT32_MAX
|
||||
#endif
|
||||
|
||||
/* This describes an allocated chunk. An allocated chunk is
|
||||
* distinguished from a free chunk by bit 15/31 of the 'preceding' chunk
|
||||
* size. If set, then this is an allocated chunk.
|
||||
*/
|
||||
|
||||
struct mm_allocnode_s
|
||||
{
|
||||
mmsize_t size; /* Size of this chunk */
|
||||
mmsize_t preceding; /* Size of the preceding chunk */
|
||||
};
|
||||
|
||||
/* What is the size of the allocnode? */
|
||||
|
||||
#ifdef CONFIG_MM_SMALL
|
||||
# define SIZEOF_MM_ALLOCNODE B2C(4)
|
||||
#else
|
||||
# define SIZEOF_MM_ALLOCNODE B2C(8)
|
||||
#endif
|
||||
|
||||
#define CHECK_ALLOCNODE_SIZE \
|
||||
DEBUGASSERT(sizeof(struct mm_allocnode_s) == SIZEOF_MM_ALLOCNODE)
|
||||
|
||||
/* This describes a free chunk */
|
||||
|
||||
struct mm_freenode_s
|
||||
{
|
||||
mmsize_t size; /* Size of this chunk */
|
||||
mmsize_t preceding; /* Size of the preceding chunk */
|
||||
FAR struct mm_freenode_s *flink; /* Supports a doubly linked list */
|
||||
FAR struct mm_freenode_s *blink;
|
||||
};
|
||||
|
||||
struct mm_delaynode_s
|
||||
{
|
||||
struct mm_delaynode_s *flink;
|
||||
};
|
||||
|
||||
/* What is the size of the freenode? */
|
||||
|
||||
#define MM_PTR_SIZE sizeof(FAR struct mm_freenode_s *)
|
||||
#define SIZEOF_MM_FREENODE (SIZEOF_MM_ALLOCNODE + 2*MM_PTR_SIZE)
|
||||
|
||||
#define CHECK_FREENODE_SIZE \
|
||||
DEBUGASSERT(sizeof(struct mm_freenode_s) == SIZEOF_MM_FREENODE)
|
||||
|
||||
/* This describes one heap (possibly with multiple regions) */
|
||||
|
||||
struct mm_heap_impl_s; /* Forward reference */
|
||||
struct mm_heap_s
|
||||
{
|
||||
/* Mutually exclusive access to this data set is enforced with
|
||||
* the following un-named semaphore.
|
||||
*/
|
||||
|
||||
sem_t mm_semaphore;
|
||||
pid_t mm_holder;
|
||||
int mm_counts_held;
|
||||
|
||||
/* This is the size of the heap provided to mm */
|
||||
|
||||
size_t mm_heapsize;
|
||||
|
||||
/* This is the first and last nodes of the heap */
|
||||
|
||||
FAR struct mm_allocnode_s *mm_heapstart[CONFIG_MM_REGIONS];
|
||||
FAR struct mm_allocnode_s *mm_heapend[CONFIG_MM_REGIONS];
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
int mm_nregions;
|
||||
#endif
|
||||
|
||||
/* All free nodes are maintained in a doubly linked list. This
|
||||
* array provides some hooks into the list at various points to
|
||||
* speed searches for free nodes.
|
||||
*/
|
||||
|
||||
struct mm_freenode_s mm_nodelist[MM_NNODES];
|
||||
|
||||
/* Free delay list, for some situation can't do free immdiately */
|
||||
|
||||
struct mm_delaynode_s *mm_delaylist;
|
||||
struct mm_heap_impl_s *mm_impl;
|
||||
};
|
||||
|
||||
#define MM_IS_VALID(heap) ((heap) != NULL && (heap)->mm_impl != NULL)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Data
|
||||
****************************************************************************/
|
||||
@ -512,20 +359,6 @@ FAR void kmm_checkcorruption(void);
|
||||
|
||||
#endif /* CONFIG_DEBUG_MM */
|
||||
|
||||
/* Functions contained in mm_shrinkchunk.c **********************************/
|
||||
|
||||
void mm_shrinkchunk(FAR struct mm_heap_s *heap,
|
||||
FAR struct mm_allocnode_s *node, size_t size);
|
||||
|
||||
/* Functions contained in mm_addfreechunk.c *********************************/
|
||||
|
||||
void mm_addfreechunk(FAR struct mm_heap_s *heap,
|
||||
FAR struct mm_freenode_s *node);
|
||||
|
||||
/* Functions contained in mm_size2ndx.c.c ***********************************/
|
||||
|
||||
int mm_size2ndx(size_t size);
|
||||
|
||||
#undef EXTERN
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
222
mm/mm_heap/mm.h
Normal file
222
mm/mm_heap/mm.h
Normal file
@ -0,0 +1,222 @@
|
||||
/****************************************************************************
|
||||
* mm/mm_heap/mm.h
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership. The
|
||||
* ASF licenses this file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance with the
|
||||
* License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
* License for the specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
#ifndef __MM_MM_HEAP_MM_H
|
||||
#define __MM_MM_HEAP_MM_H
|
||||
|
||||
/****************************************************************************
|
||||
* Included Files
|
||||
****************************************************************************/
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <string.h>
|
||||
#include <semaphore.h>
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
/* Configuration ************************************************************/
|
||||
|
||||
/* Chunk Header Definitions *************************************************/
|
||||
|
||||
/* These definitions define the characteristics of allocator
|
||||
*
|
||||
* MM_MIN_SHIFT is used to define MM_MIN_CHUNK.
|
||||
* MM_MIN_CHUNK - is the smallest physical chunk that can be allocated. It
|
||||
* must be at least a large as sizeof(struct mm_freenode_s). Larger values
|
||||
* may improve performance slightly, but will waste memory due to
|
||||
* quantization losses.
|
||||
*
|
||||
* MM_MAX_SHIFT is used to define MM_MAX_CHUNK
|
||||
* MM_MAX_CHUNK is the largest, contiguous chunk of memory that can be
|
||||
* allocated. It can range from 16-bytes to 4Gb. Larger values of
|
||||
* MM_MAX_SHIFT can cause larger data structure sizes and, perhaps,
|
||||
* minor performance losses.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_MM_SMALL) && UINTPTR_MAX <= UINT32_MAX
|
||||
/* Two byte offsets; Pointers may be 2 or 4 bytes;
|
||||
* sizeof(struct mm_freenode_s) is 8 or 12 bytes.
|
||||
* REVISIT: We could do better on machines with 16-bit addressing.
|
||||
*/
|
||||
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 4) /* 16 bytes */
|
||||
# define MM_MAX_SHIFT B2C_SHIFT(15) /* 32 Kb */
|
||||
|
||||
#elif defined(CONFIG_HAVE_LONG_LONG)
|
||||
/* Four byte offsets; Pointers may be 4 or 8 bytes
|
||||
* sizeof(struct mm_freenode_s) is 16 or 24 bytes.
|
||||
*/
|
||||
|
||||
# if UINTPTR_MAX <= UINT32_MAX
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 4) /* 16 bytes */
|
||||
# elif UINTPTR_MAX <= UINT64_MAX
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 5) /* 32 bytes */
|
||||
# endif
|
||||
# define MM_MAX_SHIFT B2C_SHIFT(22) /* 4 Mb */
|
||||
|
||||
#else
|
||||
/* Four byte offsets; Pointers must be 4 bytes.
|
||||
* sizeof(struct mm_freenode_s) is 16 bytes.
|
||||
*/
|
||||
|
||||
# define MM_MIN_SHIFT B2C_SHIFT( 4) /* 16 bytes */
|
||||
# define MM_MAX_SHIFT B2C_SHIFT(22) /* 4 Mb */
|
||||
#endif
|
||||
|
||||
/* All other definitions derive from these two */
|
||||
|
||||
#define MM_MIN_CHUNK (1 << MM_MIN_SHIFT)
|
||||
#define MM_MAX_CHUNK (1 << MM_MAX_SHIFT)
|
||||
#define MM_NNODES (MM_MAX_SHIFT - MM_MIN_SHIFT + 1)
|
||||
|
||||
#define MM_GRAN_MASK (MM_MIN_CHUNK-1)
|
||||
#define MM_ALIGN_UP(a) (((a) + MM_GRAN_MASK) & ~MM_GRAN_MASK)
|
||||
#define MM_ALIGN_DOWN(a) ((a) & ~MM_GRAN_MASK)
|
||||
|
||||
/* An allocated chunk is distinguished from a free chunk by bit 31 (or 15)
|
||||
* of the 'preceding' chunk size. If set, then this is an allocated chunk.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MM_SMALL
|
||||
# define MM_ALLOC_BIT 0x8000
|
||||
#else
|
||||
# define MM_ALLOC_BIT 0x80000000
|
||||
#endif
|
||||
#define MM_IS_ALLOCATED(n) \
|
||||
((int)((struct mm_allocnode_s*)(n)->preceding) < 0)
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
****************************************************************************/
|
||||
|
||||
/* Determines the size of the chunk size/offset type */
|
||||
|
||||
#ifdef CONFIG_MM_SMALL
|
||||
typedef uint16_t mmsize_t;
|
||||
# define MMSIZE_MAX UINT16_MAX
|
||||
#else
|
||||
typedef uint32_t mmsize_t;
|
||||
# define MMSIZE_MAX UINT32_MAX
|
||||
#endif
|
||||
|
||||
/* This describes an allocated chunk. An allocated chunk is
|
||||
* distinguished from a free chunk by bit 15/31 of the 'preceding' chunk
|
||||
* size. If set, then this is an allocated chunk.
|
||||
*/
|
||||
|
||||
struct mm_allocnode_s
|
||||
{
|
||||
mmsize_t size; /* Size of this chunk */
|
||||
mmsize_t preceding; /* Size of the preceding chunk */
|
||||
};
|
||||
|
||||
/* What is the size of the allocnode? */
|
||||
|
||||
#ifdef CONFIG_MM_SMALL
|
||||
# define SIZEOF_MM_ALLOCNODE B2C(4)
|
||||
#else
|
||||
# define SIZEOF_MM_ALLOCNODE B2C(8)
|
||||
#endif
|
||||
|
||||
#define CHECK_ALLOCNODE_SIZE \
|
||||
DEBUGASSERT(sizeof(struct mm_allocnode_s) == SIZEOF_MM_ALLOCNODE)
|
||||
|
||||
/* This describes a free chunk */
|
||||
|
||||
struct mm_freenode_s
|
||||
{
|
||||
mmsize_t size; /* Size of this chunk */
|
||||
mmsize_t preceding; /* Size of the preceding chunk */
|
||||
FAR struct mm_freenode_s *flink; /* Supports a doubly linked list */
|
||||
FAR struct mm_freenode_s *blink;
|
||||
};
|
||||
|
||||
struct mm_delaynode_s
|
||||
{
|
||||
struct mm_delaynode_s *flink;
|
||||
};
|
||||
|
||||
/* What is the size of the freenode? */
|
||||
|
||||
#define MM_PTR_SIZE sizeof(FAR struct mm_freenode_s *)
|
||||
#define SIZEOF_MM_FREENODE (SIZEOF_MM_ALLOCNODE + 2*MM_PTR_SIZE)
|
||||
|
||||
#define CHECK_FREENODE_SIZE \
|
||||
DEBUGASSERT(sizeof(struct mm_freenode_s) == SIZEOF_MM_FREENODE)
|
||||
|
||||
/* This describes one heap (possibly with multiple regions) */
|
||||
|
||||
struct mm_heap_impl_s
|
||||
{
|
||||
/* Mutually exclusive access to this data set is enforced with
|
||||
* the following un-named semaphore.
|
||||
*/
|
||||
|
||||
sem_t mm_semaphore;
|
||||
pid_t mm_holder;
|
||||
int mm_counts_held;
|
||||
|
||||
/* This is the size of the heap provided to mm */
|
||||
|
||||
size_t mm_heapsize;
|
||||
|
||||
/* This is the first and last nodes of the heap */
|
||||
|
||||
FAR struct mm_allocnode_s *mm_heapstart[CONFIG_MM_REGIONS];
|
||||
FAR struct mm_allocnode_s *mm_heapend[CONFIG_MM_REGIONS];
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
int mm_nregions;
|
||||
#endif
|
||||
|
||||
/* All free nodes are maintained in a doubly linked list. This
|
||||
* array provides some hooks into the list at various points to
|
||||
* speed searches for free nodes.
|
||||
*/
|
||||
|
||||
struct mm_freenode_s mm_nodelist[MM_NNODES];
|
||||
|
||||
/* Free delay list, for some situation can't do free immdiately */
|
||||
|
||||
struct mm_delaynode_s *mm_delaylist;
|
||||
};
|
||||
|
||||
/* Functions contained in mm_shrinkchunk.c **********************************/
|
||||
|
||||
struct mm_heap_s;
|
||||
|
||||
void mm_shrinkchunk(FAR struct mm_heap_s *heap,
|
||||
FAR struct mm_allocnode_s *node, size_t size);
|
||||
|
||||
/* Functions contained in mm_addfreechunk.c *********************************/
|
||||
|
||||
void mm_addfreechunk(FAR struct mm_heap_s *heap,
|
||||
FAR struct mm_freenode_s *node);
|
||||
|
||||
/* Functions contained in mm_size2ndx.c.c ***********************************/
|
||||
|
||||
int mm_size2ndx(size_t size);
|
||||
|
||||
#endif /* __MM_MM_HEAP_MM_H */
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -44,20 +46,25 @@
|
||||
void mm_addfreechunk(FAR struct mm_heap_s *heap,
|
||||
FAR struct mm_freenode_s *node)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_freenode_s *next;
|
||||
FAR struct mm_freenode_s *prev;
|
||||
int ndx;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
DEBUGASSERT(node->size >= SIZEOF_MM_FREENODE);
|
||||
DEBUGASSERT((node->preceding & MM_ALLOC_BIT) == 0);
|
||||
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Convert the size to a nodelist index */
|
||||
|
||||
ndx = mm_size2ndx(node->size);
|
||||
|
||||
/* Now put the new node into the next */
|
||||
|
||||
for (prev = &heap->mm_nodelist[ndx], next = heap->mm_nodelist[ndx].flink;
|
||||
for (prev = &heap_impl->mm_nodelist[ndx],
|
||||
next = heap_impl->mm_nodelist[ndx].flink;
|
||||
next && next->size && next->size < node->size;
|
||||
prev = next, next = next->flink);
|
||||
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -43,13 +45,18 @@
|
||||
|
||||
FAR void *mm_brkaddr(FAR struct mm_heap_s *heap, int region)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
uintptr_t brkaddr;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
DEBUGASSERT(heap && region < heap->mm_nregions);
|
||||
DEBUGASSERT(heap && region < heap_impl->mm_nregions);
|
||||
#else
|
||||
DEBUGASSERT(heap && region == 0);
|
||||
#endif
|
||||
|
||||
brkaddr = (uintptr_t)heap->mm_heapend[region];
|
||||
brkaddr = (uintptr_t)heap_impl->mm_heapend[region];
|
||||
return brkaddr ? (FAR void *)(brkaddr + SIZEOF_MM_ALLOCNODE) : 0;
|
||||
}
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
@ -50,17 +52,21 @@
|
||||
void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
|
||||
int region)
|
||||
{
|
||||
struct mm_allocnode_s *oldnode;
|
||||
struct mm_allocnode_s *newnode;
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_allocnode_s *oldnode;
|
||||
FAR struct mm_allocnode_s *newnode;
|
||||
uintptr_t blockstart;
|
||||
uintptr_t blockend;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Make sure that we were passed valid parameters */
|
||||
|
||||
DEBUGASSERT(heap && mem);
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
DEBUGASSERT(size >= MIN_EXTEND &&
|
||||
(size_t)region < (size_t)heap->mm_nregions);
|
||||
(size_t)region < (size_t)heap_impl->mm_nregions);
|
||||
#else
|
||||
DEBUGASSERT(size >= MIN_EXTEND && region == 0);
|
||||
#endif
|
||||
@ -81,7 +87,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
|
||||
* immediately follow this node.
|
||||
*/
|
||||
|
||||
oldnode = heap->mm_heapend[region];
|
||||
oldnode = heap_impl->mm_heapend[region];
|
||||
DEBUGASSERT((uintptr_t)oldnode + SIZEOF_MM_ALLOCNODE == (uintptr_t)mem);
|
||||
|
||||
/* The size of the old node now extends to the new terminal node.
|
||||
@ -103,7 +109,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
|
||||
newnode->size = SIZEOF_MM_ALLOCNODE;
|
||||
newnode->preceding = oldnode->size | MM_ALLOC_BIT;
|
||||
|
||||
heap->mm_heapend[region] = newnode;
|
||||
heap_impl->mm_heapend[region] = newnode;
|
||||
mm_givesemaphore(heap);
|
||||
|
||||
/* Finally "free" the new block of memory where the old terminal node was
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Private Functions
|
||||
****************************************************************************/
|
||||
@ -37,15 +39,19 @@
|
||||
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
|
||||
static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_delaynode_s *tmp = mem;
|
||||
irqstate_t flags;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Delay the deallocation until a more appropriate time. */
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
tmp->flink = heap->mm_delaylist;
|
||||
heap->mm_delaylist = tmp;
|
||||
tmp->flink = heap_impl->mm_delaylist;
|
||||
heap_impl->mm_delaylist = tmp;
|
||||
|
||||
leave_critical_section(flags);
|
||||
}
|
||||
|
@ -24,8 +24,13 @@
|
||||
|
||||
#include <nuttx/config.h>
|
||||
|
||||
#include <assert.h>
|
||||
#include <debug.h>
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -49,6 +54,11 @@
|
||||
|
||||
bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
int i;
|
||||
|
||||
@ -56,10 +66,10 @@ bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
|
||||
* between the region's two guard nodes.
|
||||
*/
|
||||
|
||||
for (i = 0; i < heap->mm_nregions; i++)
|
||||
for (i = 0; i < heap_impl->mm_nregions; i++)
|
||||
{
|
||||
if (mem > (FAR void *)heap->mm_heapstart[i] &&
|
||||
mem < (FAR void *)heap->mm_heapend[i])
|
||||
if (mem > (FAR void *)heap_impl->mm_heapstart[i] &&
|
||||
mem < (FAR void *)heap_impl->mm_heapend[i])
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -74,8 +84,8 @@ bool mm_heapmember(FAR struct mm_heap_s *heap, FAR void *mem)
|
||||
* two guard nodes.
|
||||
*/
|
||||
|
||||
if (mem > (FAR void *)heap->mm_heapstart[0] &&
|
||||
mem < (FAR void *)heap->mm_heapend[0])
|
||||
if (mem > (FAR void *)heap_impl->mm_heapstart[0] &&
|
||||
mem < (FAR void *)heap_impl->mm_heapend[0])
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -55,11 +57,16 @@
|
||||
void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
size_t heapsize)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_freenode_s *node;
|
||||
uintptr_t heapbase;
|
||||
uintptr_t heapend;
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
int IDX = heap->mm_nregions;
|
||||
int IDX;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
IDX = heap_impl->mm_nregions;
|
||||
|
||||
/* Writing past CONFIG_MM_REGIONS would have catastrophic consequences */
|
||||
|
||||
@ -71,6 +78,9 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
|
||||
#else
|
||||
# define IDX 0
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MM_SMALL) && !defined(CONFIG_SMALL_MEMORY)
|
||||
@ -96,7 +106,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
|
||||
/* Add the size of this region to the total size of the heap */
|
||||
|
||||
heap->mm_heapsize += heapsize;
|
||||
heap_impl->mm_heapsize += heapsize;
|
||||
|
||||
/* Create two "allocated" guard nodes at the beginning and end of
|
||||
* the heap. These only serve to keep us from allocating outside
|
||||
@ -106,24 +116,23 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
* all available memory.
|
||||
*/
|
||||
|
||||
heap->mm_heapstart[IDX] = (FAR struct mm_allocnode_s *)heapbase;
|
||||
heap->mm_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE;
|
||||
heap->mm_heapstart[IDX]->preceding = MM_ALLOC_BIT;
|
||||
|
||||
node = (FAR struct mm_freenode_s *)
|
||||
(heapbase + SIZEOF_MM_ALLOCNODE);
|
||||
node->size = heapsize - 2*SIZEOF_MM_ALLOCNODE;
|
||||
node->preceding = SIZEOF_MM_ALLOCNODE;
|
||||
|
||||
heap->mm_heapend[IDX] = (FAR struct mm_allocnode_s *)
|
||||
(heapend - SIZEOF_MM_ALLOCNODE);
|
||||
heap->mm_heapend[IDX]->size = SIZEOF_MM_ALLOCNODE;
|
||||
heap->mm_heapend[IDX]->preceding = node->size | MM_ALLOC_BIT;
|
||||
heap_impl->mm_heapstart[IDX] = (FAR struct mm_allocnode_s *)
|
||||
heapbase;
|
||||
heap_impl->mm_heapstart[IDX]->size = SIZEOF_MM_ALLOCNODE;
|
||||
heap_impl->mm_heapstart[IDX]->preceding = MM_ALLOC_BIT;
|
||||
node = (FAR struct mm_freenode_s *)
|
||||
(heapbase + SIZEOF_MM_ALLOCNODE);
|
||||
node->size = heapsize - 2*SIZEOF_MM_ALLOCNODE;
|
||||
node->preceding = SIZEOF_MM_ALLOCNODE;
|
||||
heap_impl->mm_heapend[IDX] = (FAR struct mm_allocnode_s *)
|
||||
(heapend - SIZEOF_MM_ALLOCNODE);
|
||||
heap_impl->mm_heapend[IDX]->size = SIZEOF_MM_ALLOCNODE;
|
||||
heap_impl->mm_heapend[IDX]->preceding = node->size | MM_ALLOC_BIT;
|
||||
|
||||
#undef IDX
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
heap->mm_nregions++;
|
||||
heap_impl->mm_nregions++;
|
||||
#endif
|
||||
|
||||
/* Add the single, large free node to the nodelist */
|
||||
@ -155,10 +164,19 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
void mm_initialize(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
size_t heapsize)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
int i;
|
||||
|
||||
minfo("Heap: start=%p size=%zu\n", heapstart, heapsize);
|
||||
|
||||
/* Reserve a block space for mm_heap_impl_s context */
|
||||
|
||||
DEBUGASSERT(heapsize > sizeof(struct mm_heap_impl_s));
|
||||
heap->mm_impl = (FAR struct mm_heap_impl_s *)heapstart;
|
||||
heap_impl = heap->mm_impl;
|
||||
heapsize -= sizeof(struct mm_heap_impl_s);
|
||||
heapstart = (FAR char *)heapstart + sizeof(struct mm_heap_impl_s);
|
||||
|
||||
/* The following two lines have cause problems for some older ZiLog
|
||||
* compilers in the past (but not the more recent). Life is easier if we
|
||||
* just the suppress them altogther for those tools.
|
||||
@ -173,23 +191,24 @@ void mm_initialize(FAR struct mm_heap_s *heap, FAR void *heapstart,
|
||||
|
||||
/* Set up global variables */
|
||||
|
||||
heap->mm_heapsize = 0;
|
||||
heap_impl->mm_heapsize = 0;
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
heap->mm_nregions = 0;
|
||||
heap_impl->mm_nregions = 0;
|
||||
#endif
|
||||
|
||||
/* Initialize mm_delaylist */
|
||||
|
||||
heap->mm_delaylist = NULL;
|
||||
heap_impl->mm_delaylist = NULL;
|
||||
|
||||
/* Initialize the node array */
|
||||
|
||||
memset(heap->mm_nodelist, 0, sizeof(struct mm_freenode_s) * MM_NNODES);
|
||||
memset(heap_impl->mm_nodelist, 0,
|
||||
sizeof(struct mm_freenode_s) * MM_NNODES);
|
||||
for (i = 1; i < MM_NNODES; i++)
|
||||
{
|
||||
heap->mm_nodelist[i - 1].flink = &heap->mm_nodelist[i];
|
||||
heap->mm_nodelist[i].blink = &heap->mm_nodelist[i - 1];
|
||||
heap_impl->mm_nodelist[i - 1].flink = &heap_impl->mm_nodelist[i];
|
||||
heap_impl->mm_nodelist[i].blink = &heap_impl->mm_nodelist[i - 1];
|
||||
}
|
||||
|
||||
/* Initialize the malloc semaphore to one (to support one-at-
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
@ -44,6 +46,7 @@
|
||||
|
||||
int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_allocnode_s *node;
|
||||
#ifdef CONFIG_DEBUG_ASSERTIONS
|
||||
FAR struct mm_allocnode_s *prev;
|
||||
@ -59,11 +62,13 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(info);
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Visit each region */
|
||||
|
||||
#if CONFIG_MM_REGIONS > 1
|
||||
for (region = 0; region < heap->mm_nregions; region++)
|
||||
for (region = 0; region < heap_impl->mm_nregions; region++)
|
||||
#endif
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_ASSERTIONS
|
||||
@ -75,8 +80,8 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
|
||||
|
||||
mm_takesemaphore(heap);
|
||||
|
||||
for (node = heap->mm_heapstart[region];
|
||||
node < heap->mm_heapend[region];
|
||||
for (node = heap_impl->mm_heapstart[region];
|
||||
node < heap_impl->mm_heapend[region];
|
||||
node = (FAR struct mm_allocnode_s *)
|
||||
((FAR char *)node + node->size))
|
||||
{
|
||||
@ -121,8 +126,8 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
|
||||
}
|
||||
|
||||
minfo("region=%d node=%p heapend=%p\n",
|
||||
region, node, heap->mm_heapend[region]);
|
||||
DEBUGASSERT(node == heap->mm_heapend[region]);
|
||||
region, node, heap_impl->mm_heapend[region]);
|
||||
DEBUGASSERT(node == heap_impl->mm_heapend[region]);
|
||||
|
||||
mm_givesemaphore(heap);
|
||||
|
||||
@ -130,9 +135,9 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
|
||||
}
|
||||
#undef region
|
||||
|
||||
DEBUGASSERT(uordblks + fordblks == heap->mm_heapsize);
|
||||
DEBUGASSERT(uordblks + fordblks == heap_impl->mm_heapsize);
|
||||
|
||||
info->arena = heap->mm_heapsize;
|
||||
info->arena = heap_impl->mm_heapsize;
|
||||
info->ordblks = ordblks;
|
||||
info->mxordblk = mxordblk;
|
||||
info->uordblks = uordblks;
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <nuttx/arch.h>
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
@ -46,15 +48,19 @@
|
||||
static void mm_free_delaylist(FAR struct mm_heap_s *heap)
|
||||
{
|
||||
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_delaynode_s *tmp;
|
||||
irqstate_t flags;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Move the delay list to local */
|
||||
|
||||
flags = enter_critical_section();
|
||||
|
||||
tmp = heap->mm_delaylist;
|
||||
heap->mm_delaylist = NULL;
|
||||
tmp = heap_impl->mm_delaylist;
|
||||
heap_impl->mm_delaylist = NULL;
|
||||
|
||||
leave_critical_section(flags);
|
||||
|
||||
@ -95,11 +101,15 @@ static void mm_free_delaylist(FAR struct mm_heap_s *heap)
|
||||
|
||||
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
FAR struct mm_freenode_s *node;
|
||||
size_t alignsize;
|
||||
void *ret = NULL;
|
||||
int ndx;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Firstly, free mm_delaylist */
|
||||
|
||||
mm_free_delaylist(heap);
|
||||
@ -144,7 +154,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
|
||||
* other mm_nodelist[] entries.
|
||||
*/
|
||||
|
||||
for (node = heap->mm_nodelist[ndx].flink;
|
||||
for (node = heap_impl->mm_nodelist[ndx].flink;
|
||||
node && node->size < alignsize;
|
||||
node = node->flink)
|
||||
{
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -31,6 +31,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -36,6 +36,8 @@
|
||||
# include <nuttx/irq.h>
|
||||
#endif
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
@ -74,14 +76,19 @@
|
||||
|
||||
void mm_seminitialize(FAR struct mm_heap_s *heap)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Initialize the MM semaphore to one (to support one-at-a-time access to
|
||||
* private data sets).
|
||||
*/
|
||||
|
||||
nxsem_init(&heap->mm_semaphore, 0, 1);
|
||||
nxsem_init(&heap_impl->mm_semaphore, 0, 1);
|
||||
|
||||
heap->mm_holder = NO_HOLDER;
|
||||
heap->mm_counts_held = 0;
|
||||
heap_impl->mm_holder = NO_HOLDER;
|
||||
heap_impl->mm_counts_held = 0;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -97,6 +104,7 @@ void mm_seminitialize(FAR struct mm_heap_s *heap)
|
||||
|
||||
int mm_trysemaphore(FAR struct mm_heap_s *heap)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
#ifdef CONFIG_SMP
|
||||
irqstate_t flags = enter_critical_section();
|
||||
#endif
|
||||
@ -132,6 +140,9 @@ int mm_trysemaphore(FAR struct mm_heap_s *heap)
|
||||
* 'else', albeit with a nonsensical PID value.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
if (my_pid < 0)
|
||||
{
|
||||
ret = my_pid;
|
||||
@ -142,20 +153,20 @@ int mm_trysemaphore(FAR struct mm_heap_s *heap)
|
||||
* task actually running?
|
||||
*/
|
||||
|
||||
if (heap->mm_holder == my_pid)
|
||||
if (heap_impl->mm_holder == my_pid)
|
||||
{
|
||||
/* Yes, just increment the number of references held by the current
|
||||
* task.
|
||||
*/
|
||||
|
||||
heap->mm_counts_held++;
|
||||
heap_impl->mm_counts_held++;
|
||||
ret = OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Try to take the semaphore */
|
||||
|
||||
ret = _SEM_TRYWAIT(&heap->mm_semaphore);
|
||||
ret = _SEM_TRYWAIT(&heap_impl->mm_semaphore);
|
||||
if (ret < 0)
|
||||
{
|
||||
ret = _SEM_ERRVAL(ret);
|
||||
@ -164,8 +175,8 @@ int mm_trysemaphore(FAR struct mm_heap_s *heap)
|
||||
|
||||
/* We have it. Claim the heap for the current task and return */
|
||||
|
||||
heap->mm_holder = my_pid;
|
||||
heap->mm_counts_held = 1;
|
||||
heap_impl->mm_holder = my_pid;
|
||||
heap_impl->mm_counts_held = 1;
|
||||
ret = OK;
|
||||
}
|
||||
|
||||
@ -187,20 +198,24 @@ errout:
|
||||
|
||||
void mm_takesemaphore(FAR struct mm_heap_s *heap)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
#ifdef CONFIG_SMP
|
||||
irqstate_t flags = enter_critical_section();
|
||||
#endif
|
||||
pid_t my_pid = getpid();
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* Does the current task already hold the semaphore? */
|
||||
|
||||
if (heap->mm_holder == my_pid)
|
||||
if (heap_impl->mm_holder == my_pid)
|
||||
{
|
||||
/* Yes, just increment the number of references held by the current
|
||||
* task.
|
||||
*/
|
||||
|
||||
heap->mm_counts_held++;
|
||||
heap_impl->mm_counts_held++;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -211,7 +226,7 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
|
||||
mseminfo("PID=%d taking\n", my_pid);
|
||||
do
|
||||
{
|
||||
ret = _SEM_WAIT(&heap->mm_semaphore);
|
||||
ret = _SEM_WAIT(&heap_impl->mm_semaphore);
|
||||
|
||||
/* The only case that an error should occur here is if the wait
|
||||
* was awakened by a signal.
|
||||
@ -229,14 +244,15 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
|
||||
* the semaphore for the current task and return.
|
||||
*/
|
||||
|
||||
heap->mm_holder = my_pid;
|
||||
heap->mm_counts_held = 1;
|
||||
heap_impl->mm_holder = my_pid;
|
||||
heap_impl->mm_counts_held = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
leave_critical_section(flags);
|
||||
#endif
|
||||
mseminfo("Holder=%d count=%d\n", heap->mm_holder, heap->mm_counts_held);
|
||||
mseminfo("Holder=%d count=%d\n", heap_impl->mm_holder,
|
||||
heap_impl->mm_counts_held);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
@ -249,25 +265,29 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
|
||||
|
||||
void mm_givesemaphore(FAR struct mm_heap_s *heap)
|
||||
{
|
||||
FAR struct mm_heap_impl_s *heap_impl;
|
||||
#ifdef CONFIG_SMP
|
||||
irqstate_t flags = enter_critical_section();
|
||||
#endif
|
||||
|
||||
DEBUGASSERT(MM_IS_VALID(heap));
|
||||
heap_impl = heap->mm_impl;
|
||||
|
||||
/* The current task should be holding at least one reference to the
|
||||
* semaphore.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(heap->mm_holder == getpid());
|
||||
DEBUGASSERT(heap_impl->mm_holder == getpid());
|
||||
|
||||
/* Does the current task hold multiple references to the semaphore */
|
||||
|
||||
if (heap->mm_counts_held > 1)
|
||||
if (heap_impl->mm_counts_held > 1)
|
||||
{
|
||||
/* Yes, just release one count and return */
|
||||
|
||||
heap->mm_counts_held--;
|
||||
mseminfo("Holder=%d count=%d\n", heap->mm_holder,
|
||||
heap->mm_counts_held);
|
||||
heap_impl->mm_counts_held--;
|
||||
mseminfo("Holder=%d count=%d\n", heap_impl->mm_holder,
|
||||
heap_impl->mm_counts_held);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -275,9 +295,9 @@ void mm_givesemaphore(FAR struct mm_heap_s *heap)
|
||||
|
||||
mseminfo("PID=%d giving\n", getpid());
|
||||
|
||||
heap->mm_holder = NO_HOLDER;
|
||||
heap->mm_counts_held = 0;
|
||||
DEBUGVERIFY(_SEM_POST(&heap->mm_semaphore));
|
||||
heap_impl->mm_holder = NO_HOLDER;
|
||||
heap_impl->mm_counts_held = 0;
|
||||
DEBUGVERIFY(_SEM_POST(&heap_impl->mm_semaphore));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -28,6 +28,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
@ -26,6 +26,8 @@
|
||||
|
||||
#include <nuttx/mm/mm.h>
|
||||
|
||||
#include "mm_heap/mm.h"
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
Loading…
x
Reference in New Issue
Block a user