pm: use rmutex_xx API for recursive lock
Signed-off-by: ligd <liguiding1@xiaomi.com>
This commit is contained in:
parent
f9849c7f60
commit
d8ebe98c6c
@ -79,13 +79,9 @@ struct pm_domain_s
|
|||||||
|
|
||||||
FAR const struct pm_governor_s *governor;
|
FAR const struct pm_governor_s *governor;
|
||||||
|
|
||||||
/* This semaphore manages mutually exclusive access to the domain state.
|
/* Recursive lock for race condition */
|
||||||
* It must be initialized to the value 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
sem_t sem;
|
rmutex_t lock;
|
||||||
pid_t holder;
|
|
||||||
unsigned int count;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This structure encapsulates all of the global data used by the PM system */
|
/* This structure encapsulates all of the global data used by the PM system */
|
||||||
|
@ -92,10 +92,7 @@ void pm_initialize(void)
|
|||||||
#endif
|
#endif
|
||||||
pm_set_governor(i, gov);
|
pm_set_governor(i, gov);
|
||||||
|
|
||||||
nxsem_init(&g_pmglobals.domain[i].sem, 0, 1);
|
nxrmutex_init(&g_pmglobals.domain[i].lock);
|
||||||
|
|
||||||
g_pmglobals.domain[i].holder = INVALID_PROCESS_ID;
|
|
||||||
g_pmglobals.domain[i].count = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,62 +33,6 @@
|
|||||||
|
|
||||||
#if defined(CONFIG_PM)
|
#if defined(CONFIG_PM)
|
||||||
|
|
||||||
/****************************************************************************
|
|
||||||
* Private Functions
|
|
||||||
****************************************************************************/
|
|
||||||
|
|
||||||
static int pm_recursive_lock(struct pm_domain_s *pm)
|
|
||||||
{
|
|
||||||
pid_t me = gettid();
|
|
||||||
int ret = OK;
|
|
||||||
|
|
||||||
/* Does this thread already hold the semaphore? */
|
|
||||||
|
|
||||||
if (pm->holder == me)
|
|
||||||
{
|
|
||||||
/* Yes.. just increment the reference count */
|
|
||||||
|
|
||||||
pm->count++;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* No.. take the semaphore (perhaps waiting) */
|
|
||||||
|
|
||||||
ret = nxsem_wait_uninterruptible(&pm->sem);
|
|
||||||
if (ret >= 0)
|
|
||||||
{
|
|
||||||
/* Now this thread holds the semaphore */
|
|
||||||
|
|
||||||
pm->holder = me;
|
|
||||||
pm->count = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pm_recursive_unlock(struct pm_domain_s *pm)
|
|
||||||
{
|
|
||||||
DEBUGASSERT(pm->holder == getpid() && pm->count > 0);
|
|
||||||
|
|
||||||
/* If the count would go to zero, then release the semaphore */
|
|
||||||
|
|
||||||
if (pm->count == 1)
|
|
||||||
{
|
|
||||||
/* We no longer hold the semaphore */
|
|
||||||
|
|
||||||
pm->holder = INVALID_PROCESS_ID;
|
|
||||||
pm->count = 0;
|
|
||||||
nxsem_post(&pm->sem);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* We still hold the semaphore. Just decrement the count */
|
|
||||||
|
|
||||||
pm->count--;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
* Public Functions
|
* Public Functions
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
@ -108,7 +52,7 @@ irqstate_t pm_lock(int domain)
|
|||||||
{
|
{
|
||||||
if (!up_interrupt_context() && !sched_idletask())
|
if (!up_interrupt_context() && !sched_idletask())
|
||||||
{
|
{
|
||||||
pm_recursive_lock(&g_pmglobals.domain[domain]);
|
nxrmutex_lock(&g_pmglobals.domain[domain].lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return enter_critical_section();
|
return enter_critical_section();
|
||||||
@ -131,7 +75,7 @@ void pm_unlock(int domain, irqstate_t flags)
|
|||||||
|
|
||||||
if (!up_interrupt_context() && !sched_idletask())
|
if (!up_interrupt_context() && !sched_idletask())
|
||||||
{
|
{
|
||||||
pm_recursive_unlock(&g_pmglobals.domain[domain]);
|
nxrmutex_unlock(&g_pmglobals.domain[domain].lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user