SMP: Introduce spin_lock_wo_note() and spin_unlock_wo_note()

These APIs are used in sched_note.c to protect instumentation data.
The deffrence between these APIs to exsiting spin_lock() and spin_unlock()
is that they do not perform insturumentation to avoid recursive call
when SCHED_INSTRUMENTATION_SPINLOCKS=y.

Signed-off-by: Masayuki Ishikawa <Masayuki.Ishikawa@jp.sony.com>
This commit is contained in:
Masayuki Ishikawa 2018-01-17 13:08:03 +09:00
parent e276d4d16d
commit ab3fa89023
3 changed files with 118 additions and 15 deletions

View File

@ -197,6 +197,30 @@ void spin_initializer(FAR struct spinlock_s *lock);
void spin_lock(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spin_lock_wo_note
*
* Description:
* If this CPU does not already hold the spinlock, then loop until the
* spinlock is successfully locked.
*
* This implementation is the same as the above spin_lock() except that
* it does not perform instrumentation logic.
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* None. When the function returns, the spinlock was successfully locked
* by this CPU.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_lock_wo_note(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spin_trylock
*
@ -268,6 +292,28 @@ void spin_unlock(FAR volatile spinlock_t *lock);
# define spin_unlock(l) do { *(l) = SP_UNLOCKED; } while (0)
#endif
/****************************************************************************
* Name: spin_unlock_wo_note
*
* Description:
* Release one count on a non-reentrant spinlock.
*
* This implementation is the same as the above spin_unlock() except that
* it does not perform instrumentation logic.
*
* Input Parameters:
* lock - A reference to the spinlock object to unlock.
*
* Returned Value:
* None.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_unlock_wo_note(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spin_unlockr
*

View File

@ -94,6 +94,10 @@ static void note_add(FAR const uint8_t *note, uint8_t notelen);
static struct note_info_s g_note_info;
#ifdef CONFIG_SMP
static volatile spinlock_t g_note_lock;
#endif
/****************************************************************************
* Private Functions
****************************************************************************/
@ -299,21 +303,10 @@ static void note_add(FAR const uint8_t *note, uint8_t notelen)
}
#endif
/* REVISIT: In the single CPU case, the following should be safe because
* the logic is always called within a critical section, but in the SMP
* case we have protection. One option would be to precalculate and
* advancing the new head entry before writing the data into the buffer.
* That will eliminate fatal race conditions (although could result in
* single notes being corrupted harmlessly).
*
* But there is a complexity: Advancing the head pointer where the note
* buffer is almost full could advance the head to wrap beyond the tail
* leaving the buffer in a bad state. A solution to this would be to pre-
* remove entries at the tail of the buffer as necessary to make certain
* that there will be space for the new note at the beginning of the
* buffer. I am less certain that this can be done safely in the SMP
* case.
*/
#ifdef CONFIG_SMP
irqstate_t flags = up_irq_save();
spin_lock_wo_note(&g_note_lock);
#endif
/* Get the index to the head of the circular buffer */
@ -345,6 +338,11 @@ static void note_add(FAR const uint8_t *note, uint8_t notelen)
}
g_note_info.ni_head = head;
#ifdef CONFIG_SMP
spin_unlock_wo_note(&g_note_lock);
up_irq_restore(flags);
#endif
}
/****************************************************************************

View File

@ -139,6 +139,38 @@ void spin_lock(FAR volatile spinlock_t *lock)
SP_DMB();
}
/****************************************************************************
* Name: spin_lock_wo_note
*
* Description:
* If this CPU does not already hold the spinlock, then loop until the
* spinlock is successfully locked.
*
* This implementation is the same as the above spin_lock() except that
* it does not perform instrumentation logic.
*
* Input Parameters:
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* None. When the function returns, the spinlock was successfully locked
* by this CPU.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_lock_wo_note(FAR volatile spinlock_t *lock)
{
while (up_testset(lock) == SP_LOCKED)
{
SP_DSB();
}
SP_DMB();
}
/****************************************************************************
* Name: spin_unlock
*
@ -171,6 +203,33 @@ void spin_unlock(FAR volatile spinlock_t *lock)
}
#endif
/****************************************************************************
* Name: spin_unlock_wo_note
*
* Description:
* Release one count on a non-reentrant spinlock.
*
* This implementation is the same as the above spin_unlock() except that
* it does not perform instrumentation logic.
*
* Input Parameters:
* lock - A reference to the spinlock object to unlock.
*
* Returned Value:
* None.
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
void spin_unlock_wo_note(FAR volatile spinlock_t *lock)
{
SP_DMB();
*lock = SP_UNLOCKED;
SP_DSB();
}
/****************************************************************************
* Name: spin_lockr
*