sched/semaphore/spinlock.c: Remove support for re-entrant spinlocks. They seem like a good idea, but they are never used.
This commit is contained in:
parent
e805bc0660
commit
e1a89d29ee
@ -66,7 +66,7 @@
|
||||
/* Memory barriers may be provided in arch/spinlock.h
|
||||
*
|
||||
* DMB - Data memory barrier. Assures writes are completed to memory.
|
||||
* DSB - Data syncrhonization barrier.
|
||||
* DSB - Data synchronization barrier.
|
||||
*/
|
||||
|
||||
#undef __SP_UNLOCK_FUNCTION
|
||||
@ -96,22 +96,6 @@
|
||||
# define SP_SECTION
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
* Public Types
|
||||
****************************************************************************/
|
||||
|
||||
struct spinlock_s
|
||||
{
|
||||
volatile spinlock_t sp_lock; /* Indicates if the spinlock is locked or
|
||||
* not. See the* values SP_LOCKED and
|
||||
* SP_UNLOCKED. */
|
||||
#ifdef CONFIG_SMP
|
||||
uint8_t sp_cpu; /* CPU holding the lock */
|
||||
uint16_t sp_count; /* The count of references by this CPU on
|
||||
* the lock */
|
||||
#endif
|
||||
};
|
||||
|
||||
/****************************************************************************
|
||||
* Public Function Prototypes
|
||||
****************************************************************************/
|
||||
@ -156,22 +140,6 @@ spinlock_t up_testset(volatile FAR spinlock_t *lock);
|
||||
/* void spin_initialize(FAR spinlock_t *lock, spinlock_t state); */
|
||||
#define spin_initialize(l,s) do { *(l) = (s); } while (0)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_initializer
|
||||
*
|
||||
* Description:
|
||||
* Initialize a re-entrant spinlock object to its initial, unlocked state.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to be initialized.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_initializer(FAR struct spinlock_s *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock
|
||||
*
|
||||
@ -242,33 +210,6 @@ void spin_lock_wo_note(FAR volatile spinlock_t *lock);
|
||||
|
||||
#define spin_trylock(l) up_testset(l)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lockr
|
||||
*
|
||||
* Description:
|
||||
* If this CPU does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is re-entrant in the sense that it can called
|
||||
* numerous times from the same CPU without blocking. Of course,
|
||||
* spin_unlock() must be called the same number of times. NOTE: the
|
||||
* thread that originallly took the look may be executing on a different
|
||||
* CPU when it unlocks the spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None. When the function returns, the spinlock was successfully locked
|
||||
* by this CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_lockr(FAR struct spinlock_s *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlock
|
||||
*
|
||||
@ -314,25 +255,6 @@ void spin_unlock(FAR volatile spinlock_t *lock);
|
||||
|
||||
void spin_unlock_wo_note(FAR volatile spinlock_t *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlockr
|
||||
*
|
||||
* Description:
|
||||
* Release one count on a re-entrant spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to unlock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_unlockr(FAR struct spinlock_s *lock);
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_islocked
|
||||
*
|
||||
@ -350,23 +272,6 @@ void spin_unlockr(FAR struct spinlock_s *lock);
|
||||
/* bool spin_islocked(FAR spinlock_t lock); */
|
||||
#define spin_islocked(l) (*(l) == SP_LOCKED)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_islockedr
|
||||
*
|
||||
* Description:
|
||||
* Release one count on a re-entrant spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to test.
|
||||
*
|
||||
* Returned Value:
|
||||
* A boolean value: true the spinlock is locked; false if it is unlocked.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
/* bool spin_islockedr(FAR struct spinlock_s *lock); */
|
||||
#define spin_islockedr(l) ((l)->sp_lock == SP_LOCKED)
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_setbit
|
||||
*
|
||||
|
@ -51,50 +51,10 @@
|
||||
|
||||
#ifdef CONFIG_SPINLOCK
|
||||
|
||||
/****************************************************************************
|
||||
* Pre-processor Definitions
|
||||
****************************************************************************/
|
||||
|
||||
#define IMPOSSIBLE_CPU 0xff
|
||||
|
||||
/* REVISIT: What happens if a thread taks a spinlock while running on one
|
||||
* CPU, but is suspended, then reassigned to another CPU where it runs and
|
||||
* eventually calls spin_unlock(). One solution might be to lock a thread to
|
||||
* a CPU if it holds a spinlock. That would assure that it never runs on
|
||||
* any other CPU and avoids such complexities.
|
||||
*/
|
||||
|
||||
#undef CONFIG_SPINLOCK_LOCKDOWN /* Feature not yet available */
|
||||
|
||||
/****************************************************************************
|
||||
* Public Functions
|
||||
****************************************************************************/
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_initializer
|
||||
*
|
||||
* Description:
|
||||
* Initialize a re-entrant spinlock object to its initial, unlocked state.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to be initialized.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_initializer(FAR struct spinlock_s *lock)
|
||||
{
|
||||
DEBUGASSERT(lock != NULL);
|
||||
|
||||
lock->sp_lock = SP_UNLOCKED;
|
||||
#ifdef CONFIG_SMP
|
||||
lock->sp_cpu = IMPOSSIBLE_CPU;
|
||||
lock->sp_count = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lock
|
||||
*
|
||||
@ -230,217 +190,6 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock)
|
||||
SP_DSB();
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_lockr
|
||||
*
|
||||
* Description:
|
||||
* If this CPU does not already hold the spinlock, then loop until the
|
||||
* spinlock is successfully locked.
|
||||
*
|
||||
* This implementation is re-entrant in the sense that it can called
|
||||
* numerous times from the same CPU without blocking. Of course,
|
||||
* spin_unlock() must be called the same number of times. NOTE: the
|
||||
* thread that originallly took the look may be executing on a different
|
||||
* CPU when it unlocks the spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to lock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None. When the function returns, the spinlock was successfully locked
|
||||
* by this CPU.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_lockr(FAR struct spinlock_s *lock)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irqstate_t flags;
|
||||
uint8_t cpu = this_cpu();
|
||||
|
||||
/* Disable interrupts (all CPUs) */
|
||||
|
||||
flags = up_irq_save();
|
||||
|
||||
/* Do we already hold the lock on this CPU? */
|
||||
|
||||
if (lock->sp_cpu == cpu)
|
||||
{
|
||||
/* Yes... just increment the number of references we have on the lock */
|
||||
|
||||
lock->sp_count++;
|
||||
DEBUGASSERT(lock->sp_lock = SP_LOCKED && lock->sp_count > 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
#ifdef CONFIG_SPINLOCK_LOCKDOWN
|
||||
/* REVISIT: What happens if this thread is suspended, then reassigned
|
||||
* to another CPU where it runs and eventually calls spin_unlock().
|
||||
* One solution might be to lock a thread to a CPU if it holds a
|
||||
* spinlock. That would assure that it never runs on any other CPU
|
||||
* and avoids such complexities.
|
||||
*/
|
||||
|
||||
# warning Missing logic
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we are waiting for a spinlock */
|
||||
|
||||
sched_note_spinlock(this_task(), &lock->sp_lock);
|
||||
#endif
|
||||
|
||||
/* Take the lock. REVISIT: We should set an indication in the TCB
|
||||
* that the thread is spinning. This might be useful in determining
|
||||
* some scheduling actions?
|
||||
*/
|
||||
|
||||
while (up_testset(&lock->sp_lock) == SP_LOCKED)
|
||||
{
|
||||
up_irq_restore(flags);
|
||||
sched_yield();
|
||||
flags = up_irq_save();
|
||||
SP_DSB();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we have the spinlock */
|
||||
|
||||
sched_note_spinlocked(this_task(), &lock->sp_lock);
|
||||
#endif
|
||||
|
||||
SP_DMB();
|
||||
|
||||
/* Take one count on the lock */
|
||||
|
||||
lock->sp_cpu = cpu;
|
||||
lock->sp_count = 1;
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we are waiting for a spinlock */
|
||||
|
||||
sched_note_spinlock(this_task(), &lock->sp_lock);
|
||||
#endif
|
||||
|
||||
/* Take the lock. REVISIT: We should set an indication in the TCB that
|
||||
* the thread is spinning. This might be useful in determining some
|
||||
* scheduling actions?
|
||||
*/
|
||||
|
||||
while (up_testset(&lock->sp_lock) == SP_LOCKED)
|
||||
{
|
||||
sched_yield();
|
||||
SP_DSB();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we have the spinlock */
|
||||
|
||||
sched_note_spinlocked(this_task(), &lock->sp_lock);
|
||||
#endif
|
||||
|
||||
SP_DMB();
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_unlockr
|
||||
*
|
||||
* Description:
|
||||
* Release one count on a spinlock.
|
||||
*
|
||||
* Input Parameters:
|
||||
* lock - A reference to the spinlock object to unlock.
|
||||
*
|
||||
* Returned Value:
|
||||
* None.
|
||||
*
|
||||
* Assumptions:
|
||||
* Not running at the interrupt level.
|
||||
*
|
||||
****************************************************************************/
|
||||
|
||||
void spin_unlockr(FAR struct spinlock_s *lock)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irqstate_t flags;
|
||||
#ifdef CONFIG_SPINLOCK_LOCKDOWN
|
||||
uint8_t cpu = this_cpu();
|
||||
#endif
|
||||
|
||||
/* Disable interrupts (all CPUs) */
|
||||
|
||||
flags = up_irq_save();
|
||||
|
||||
#ifdef CONFIG_SPINLOCK_LOCKDOWN
|
||||
/* REVISIT: What happens if this thread took the lock on a different CPU,
|
||||
* was suspended, then reassigned to this CPU where it runs and eventually
|
||||
* calls spin_unlock(). One solution might be to lock a thread to a CPU if
|
||||
* it holds a spinlock. That would assure that it never runs on any other
|
||||
* CPU and avoids such complexities.
|
||||
*/
|
||||
|
||||
DEBUGASSERT(lock != NULL && lock->sp_lock == SP_LOCKED &&
|
||||
lock->sp_cpu == this_cpu() && lock->sp_count > 0);
|
||||
|
||||
/* Do we already hold the lock? */
|
||||
|
||||
if (lock->sp_cpu == cpu)
|
||||
#else
|
||||
/* The alternative is to allow the lock to be released from any CPU */
|
||||
|
||||
DEBUGASSERT(lock != NULL && lock->sp_lock == SP_LOCKED &&
|
||||
lock->sp_count > 0);
|
||||
#endif
|
||||
|
||||
{
|
||||
/* Yes... just decrement the number of references we have on the lock */
|
||||
|
||||
if (lock->sp_count <= 1)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we are unlocking the spinlock */
|
||||
|
||||
sched_note_spinunlock(this_task(), &lock->sp_lock);
|
||||
#endif
|
||||
/* The count must decremented to zero */
|
||||
|
||||
lock->sp_count = 0;
|
||||
lock->sp_cpu = IMPOSSIBLE_CPU;
|
||||
lock->sp_lock = SP_UNLOCKED;
|
||||
}
|
||||
else
|
||||
{
|
||||
lock->sp_count--;
|
||||
}
|
||||
}
|
||||
|
||||
up_irq_restore(flags);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
|
||||
/* Notify that we are unlocking the spinlock */
|
||||
|
||||
sched_note_spinunlock(this_task(), &lock->sp_lock);
|
||||
#endif
|
||||
|
||||
/* Just mark the spinlock unlocked */
|
||||
|
||||
DEBUGASSERT(lock != NULL && lock->sp_lock == SP_LOCKED);
|
||||
lock->sp_lock = SP_UNLOCKED;
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Name: spin_setbit
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user