mm: Simplify the semaphore handling

1.Move all special process to mm_takesemaphore
2.Remove the support of recurive lock
3.Remove mm_trysemaphore function

Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
Change-Id: Ie216a6294ab67c5d427f31b089beb15c532f08fe
This commit is contained in:
Xiang Xiao 2021-07-07 21:21:42 +08:00 committed by Gustavo Henrique Nihei
parent f5279f8583
commit 5fe51b923a
11 changed files with 75 additions and 219 deletions

View File

@ -65,9 +65,9 @@ struct mm_heap_s
* Private Functions
****************************************************************************/
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
{
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
FAR struct mm_delaynode_s *tmp = mem;
irqstate_t flags;
@ -79,9 +79,8 @@ static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
heap->mm_delaylist[up_cpu_index()] = tmp;
leave_critical_section(flags);
}
#endif
}
static void mm_free_delaylist(FAR struct mm_heap_s *heap)
{
@ -212,17 +211,27 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
FAR void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
{
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
if (getpid() == -ESRCH)
/* Check current environment */
if (up_interrupt_context())
{
/* getpid() return -ESRCH, means we are in situations
* during context switching(See mm_trysemaphore() & getpid()).
* Then add to mm_delaylist.
*/
/* We are in ISR, add to the delay list */
mm_add_delaylist(heap, mem);
}
else
#endif
if (getpid() < 0)
{
/* getpid() return -ESRCH, means we are in situations
* during context switching(See getpid's comment).
* Then add to the delay list.
*/
mm_add_delaylist(heap, mem);
}
else
{
host_free(mem);
}

View File

@ -177,8 +177,6 @@ struct mm_heap_s
*/
sem_t mm_semaphore;
pid_t mm_holder;
int mm_counts_held;
/* This is the size of the heap provided to mm */
@ -216,8 +214,7 @@ struct mm_heap_s
/* Functions contained in mm_sem.c ******************************************/
void mm_seminitialize(FAR struct mm_heap_s *heap);
void mm_takesemaphore(FAR struct mm_heap_s *heap);
int mm_trysemaphore(FAR struct mm_heap_s *heap);
bool mm_takesemaphore(FAR struct mm_heap_s *heap);
void mm_givesemaphore(FAR struct mm_heap_s *heap);
/* Functions contained in mm_shrinkchunk.c **********************************/

View File

@ -68,21 +68,10 @@ void mm_checkcorruption(FAR struct mm_heap_s *heap)
* Retake the semaphore for each region to reduce latencies
*/
if (up_interrupt_context())
if (mm_takesemaphore(heap) == false)
{
return;
}
else if (sched_idletask())
{
if (mm_trysemaphore(heap))
{
return;
}
}
else
{
mm_takesemaphore(heap);
}
for (node = heap->mm_heapstart[region];
node < heap->mm_heapend[region];

View File

@ -77,7 +77,7 @@ void mm_extend(FAR struct mm_heap_s *heap, FAR void *mem, size_t size,
/* Take the memory manager semaphore */
mm_takesemaphore(heap);
DEBUGVERIFY(mm_takesemaphore(heap));
/* Get the terminal node in the old heap. The block to extend must
* immediately follow this node.

View File

@ -36,9 +36,9 @@
* Private Functions
****************************************************************************/
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
{
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
FAR struct mm_delaynode_s *tmp = mem;
irqstate_t flags;
@ -50,8 +50,8 @@ static void mm_add_delaylist(FAR struct mm_heap_s *heap, FAR void *mem)
heap->mm_delaylist[up_cpu_index()] = tmp;
leave_critical_section(flags);
}
#endif
}
/****************************************************************************
* Public Functions
@ -83,39 +83,16 @@ void mm_free(FAR struct mm_heap_s *heap, FAR void *mem)
return;
}
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
/* Check current environment */
if (up_interrupt_context())
{
/* We are in ISR, add to mm_delaylist */
mm_add_delaylist(heap, mem);
return;
}
else if ((ret = mm_trysemaphore(heap)) == 0)
{
/* Got the sem, do free immediately */
}
else if (ret == -ESRCH || sched_idletask())
if (mm_takesemaphore(heap) == false)
{
/* We are in IDLE task & can't get sem, or meet -ESRCH return,
* which means we are in situations during context switching(See
* mm_trysemaphore() & getpid()). Then add to mm_delaylist.
* mm_takesemaphore() & getpid()). Then add to the delay list.
*/
mm_add_delaylist(heap, mem);
return;
}
else
#endif
{
/* We need to hold the MM semaphore while we muck with the
* nodelist.
*/
mm_takesemaphore(heap);
}
DEBUGASSERT(mm_heapmember(heap, mem));

View File

@ -86,7 +86,7 @@ void mm_addregion(FAR struct mm_heap_s *heap, FAR void *heapstart,
DEBUGASSERT(heapsize <= MMSIZE_MAX + 1);
#endif
mm_takesemaphore(heap);
DEBUGVERIFY(mm_takesemaphore(heap));
/* Adjust the provide heap start and size so that they are both aligned
* with the MM_MIN_CHUNK size.

View File

@ -73,7 +73,7 @@ int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
* Retake the semaphore for each region to reduce latencies
*/
mm_takesemaphore(heap);
DEBUGVERIFY(mm_takesemaphore(heap));
for (node = heap->mm_heapstart[region];
node < heap->mm_heapend[region];

View File

@ -102,7 +102,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
FAR void *ret = NULL;
int ndx;
/* Firstly, free mm_delaylist */
/* Free the delay list first */
mm_free_delaylist(heap);
@ -130,7 +130,7 @@ FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
/* We need to hold the MM semaphore while we muck with the nodelist. */
mm_takesemaphore(heap);
DEBUGVERIFY(mm_takesemaphore(heap));
/* Get the location in the node list to start the search. Special case
* really big allocations

View File

@ -115,7 +115,7 @@ FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
* nodelist.
*/
mm_takesemaphore(heap);
DEBUGVERIFY(mm_takesemaphore(heap));
/* Get the node associated with the allocation and the next node after
* the allocation.

View File

@ -107,7 +107,7 @@ FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
/* We need to hold the MM semaphore while we muck with the nodelist. */
mm_takesemaphore(heap);
DEBUGVERIFY(mm_takesemaphore(heap));
DEBUGASSERT(oldnode->preceding & MM_ALLOC_BIT);
DEBUGASSERT(mm_heapmember(heap, oldmem));

View File

@ -29,35 +29,12 @@
#include <assert.h>
#include <debug.h>
#include <nuttx/arch.h>
#include <nuttx/semaphore.h>
#include <nuttx/mm/mm.h>
#include "mm_heap/mm.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* This is a special value that indicates that there is no holder of the
* semaphore. The valid range of PIDs is 0-32767 and any value outside of
* that range could be used (except -ESRCH which is a special return value
* from getpid())
*/
#define NO_HOLDER ((pid_t)-1)
/* Define MONITOR_MM_SEMAPHORE to enable semaphore state monitoring */
#ifdef MONITOR_MM_SEMAPHORE
# define msemerr _err
# define msemwarn _warn
# define mseminfo _info
#else
# define msemerr _none
# define msemwarn _none
# define mseminfo _none
#endif
/****************************************************************************
* Public Functions
****************************************************************************/
@ -77,127 +54,67 @@ void mm_seminitialize(FAR struct mm_heap_s *heap)
*/
_SEM_INIT(&heap->mm_semaphore, 0, 1);
heap->mm_holder = NO_HOLDER;
heap->mm_counts_held = 0;
}
/****************************************************************************
* Name: mm_trysemaphore
*
* Description:
* Try to take the MM mutex. This is called only from the OS in certain
* conditions when it is necessary to have exclusive access to the memory
* manager but it is impossible to wait on a semaphore (e.g., the idle
* process when it performs its background memory cleanup).
*
****************************************************************************/
int mm_trysemaphore(FAR struct mm_heap_s *heap)
{
pid_t my_pid = getpid();
int ret;
/* getpid() returns the task ID of the task at the head of the ready-to-
* run task list. mm_trysemaphore() may be called during context
* switches. There are certain situations during context switching when
* the OS data structures are in flux and where the current task (i.e.,
* the task at the head of the ready-to-run task list) is not actually
* running. Granting the semaphore access in that case is known to result
* in heap corruption as in the following failure scenario.
*
* ---------------------------- -------------------------------
* TASK A TASK B
* ---------------------------- -------------------------------
* Begins memory allocation.
* - Holder is set to TASK B
* <---- Task B preempted, Task A runs
* Task A exits
* - Current task set to Task B
* Free tcb and stack memory
* - Since holder is Task B,
* memory manager is re-
* entered, and
* - Heap is corrupted.
* ---------------------------- -------------------------------
*
* This is handled by getpid(): If the case where Task B is not actually
* running, then getpid() will return the special value -ESRCH. That will
* avoid taking the fatal 'if' logic and will fall through to use the
* 'else', albeit with a nonsensical PID value.
*/
if (my_pid < 0)
{
ret = my_pid;
goto errout;
}
/* Does the current task already hold the semaphore? Is the current
* task actually running?
*/
if (heap->mm_holder == my_pid)
{
/* Yes, just increment the number of references held by the current
* task.
*/
heap->mm_counts_held++;
ret = OK;
}
else
{
/* Try to take the semaphore */
ret = _SEM_TRYWAIT(&heap->mm_semaphore);
if (ret < 0)
{
ret = _SEM_ERRVAL(ret);
goto errout;
}
/* We have it. Claim the heap for the current task and return */
heap->mm_holder = my_pid;
heap->mm_counts_held = 1;
ret = OK;
}
errout:
return ret;
}
/****************************************************************************
* Name: mm_takesemaphore
*
* Description:
* Take the MM mutex. This is the normal action before all memory
* management actions.
* Take the MM mutex. This may be called from the OS in certain conditions
* when it is impossible to wait on a semaphore:
* 1.The idle process performs the memory corruption check.
* 2.The task/thread free the memory in the exiting process.
*
* Input Parameters:
* heap - heap instance want to take semaphore
*
* Returned Value:
* true if the semaphore can be taken, otherwise false.
*
****************************************************************************/
void mm_takesemaphore(FAR struct mm_heap_s *heap)
bool mm_takesemaphore(FAR struct mm_heap_s *heap)
{
pid_t my_pid = getpid();
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
/* Check current environment */
/* Does the current task already hold the semaphore? */
if (heap->mm_holder == my_pid)
if (up_interrupt_context())
{
/* Yes, just increment the number of references held by the current
* task.
*/
/* Can't take semaphore in the interrupt handler */
heap->mm_counts_held++;
return false;
}
else
#endif
/* getpid() returns the task ID of the task at the head of the ready-to-
* run task list. mm_takesemaphore() may be called during context
* switches. There are certain situations during context switching when
* the OS data structures are in flux and then can't be freed immediately
* (e.g. the running thread stack).
*
* This is handled by getpid() to return the special value -ESRCH to
* indicate this special situation.
*/
if (getpid() < 0)
{
return false;
}
#if defined(CONFIG_BUILD_FLAT) || defined(__KERNEL__)
else if (sched_idletask())
{
/* Try to take the semaphore */
return _SEM_TRYWAIT(&heap->mm_semaphore) >= 0;
}
#endif
else
{
int ret;
/* Take the semaphore (perhaps waiting) */
mseminfo("PID=%d taking\n", my_pid);
do
{
ret = _SEM_WAIT(&heap->mm_semaphore);
@ -212,18 +129,10 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
DEBUGASSERT(ret == -EINTR || ret == -ECANCELED);
}
}
while (ret == -EINTR);
while (ret < 0);
/* We have it (or some awful, unexpected error occurred). Claim
* the semaphore for the current task and return.
*/
heap->mm_holder = my_pid;
heap->mm_counts_held = 1;
return true;
}
mseminfo("Holder=%d count=%d\n", heap->mm_holder,
heap->mm_counts_held);
}
/****************************************************************************
@ -236,30 +145,5 @@ void mm_takesemaphore(FAR struct mm_heap_s *heap)
void mm_givesemaphore(FAR struct mm_heap_s *heap)
{
/* The current task should be holding at least one reference to the
* semaphore.
*/
DEBUGASSERT(heap->mm_holder == getpid());
/* Does the current task hold multiple references to the semaphore */
if (heap->mm_counts_held > 1)
{
/* Yes, just release one count and return */
heap->mm_counts_held--;
mseminfo("Holder=%d count=%d\n", heap->mm_holder,
heap->mm_counts_held);
}
else
{
/* Nope, this is the last reference held by the current task. */
mseminfo("PID=%d giving\n", getpid());
heap->mm_holder = NO_HOLDER;
heap->mm_counts_held = 0;
DEBUGVERIFY(_SEM_POST(&heap->mm_semaphore));
}
DEBUGVERIFY(_SEM_POST(&heap->mm_semaphore));
}