Feature: implement ticket spinlock

test config: ./tools/configure.sh -l qemu-armv8a:nsh_smp

Pass ostest

No matter big-endian or little-endian, ticket spinlock only check the
next and the owner is equal or not.

If they are equal, it means there is a task hold the lock or lock is
free.

Signed-off-by: TaiJu Wu <tjwu1217@gmail.com>

Co-authored-by: Xiang Xiao <xiaoxiang781216@gmail.com>
This commit is contained in:
TaiJu Wu 2023-10-05 19:19:08 +00:00 committed by Xiang Xiao
parent 643b9c1abf
commit ffba0d15a5
7 changed files with 119 additions and 17 deletions

View File

@ -33,7 +33,7 @@
#include <nuttx/sched_note.h>
#include <sched/sched.h>
#include <nuttx/cache.h>
#include <arch/spinlock.h>
#include <nuttx/spinlock.h>
#include <nuttx/init.h>
#include "init/init.h"

View File

@ -62,6 +62,7 @@ CONFIG_TESTING_GETPRIME=y
CONFIG_TESTING_OSTEST=y
CONFIG_TESTING_OSTEST_STACKSIZE=16384
CONFIG_TESTING_SMP=y
CONFIG_TICKET_SPINLOCK=y
CONFIG_UART1_BASE=0x9000000
CONFIG_UART1_IRQ=33
CONFIG_UART1_PL011=y

View File

@ -39,6 +39,24 @@
typedef uint8_t spinlock_t;
#else
#ifdef CONFIG_TICKET_SPINLOCK
union spinlock_u
{
struct
{
uint16_t owner;
uint16_t next;
} tickets;
uint32_t value;
};
typedef union spinlock_u spinlock_t;
# define SP_UNLOCKED (union spinlock_u){{0, 0}}
# define SP_LOCKED (union spinlock_u){{0, 1}}
#else
/* The architecture specific spinlock.h header file must also provide the
* following:
*
@ -51,6 +69,8 @@ typedef uint8_t spinlock_t;
#include <arch/spinlock.h>
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
@ -80,7 +100,8 @@ typedef uint8_t spinlock_t;
# define SP_SEV()
#endif
#if defined(CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS) && !defined(__SP_UNLOCK_FUNCTION)
#if !defined(__SP_UNLOCK_FUNCTION) && (defined(CONFIG_TICKET_SPINLOCK) || \
defined(CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS))
# define __SP_UNLOCK_FUNCTION 1
#endif
@ -199,7 +220,7 @@ void spin_lock_wo_note(FAR volatile spinlock_t *lock);
*
****************************************************************************/
spinlock_t spin_trylock(FAR volatile spinlock_t *lock);
bool spin_trylock(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spin_trylock_wo_note
@ -223,7 +244,7 @@ spinlock_t spin_trylock(FAR volatile spinlock_t *lock);
*
****************************************************************************/
spinlock_t spin_trylock_wo_note(FAR volatile spinlock_t *lock);
bool spin_trylock_wo_note(FAR volatile spinlock_t *lock);
/****************************************************************************
* Name: spin_unlock
@ -285,7 +306,11 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock);
****************************************************************************/
/* bool spin_islocked(FAR spinlock_t lock); */
#define spin_islocked(l) (*(l) == SP_LOCKED)
#ifdef CONFIG_TICKET_SPINLOCK
# define spin_islocked(l) ((*l).tickets.owner != (*l).tickets.next)
#else
# define spin_islocked(l) (*(l) == SP_LOCKED)
#endif
/****************************************************************************
* Name: spin_setbit

View File

@ -308,6 +308,16 @@ config SPINLOCK
CONFIG_ARCH_HAVE_MULTICPU. This permits the use of spinlocks in
other novel architectures.
if SPINLOCK
config TICKET_SPINLOCK
bool "Use ticket Spinlocks"
default n
---help---
Use ticket spinlock algorithm.
endif # SPINLOCK
config IRQCHAIN
bool "Enable multi handler sharing a IRQ"
default n

View File

@ -120,7 +120,7 @@ static bool irq_waitlock(int cpu)
* for the deadlock condition.
*/
while (spin_trylock_wo_note(&g_cpu_irqlock) == SP_LOCKED)
while (!spin_trylock_wo_note(&g_cpu_irqlock))
{
/* Is a pause request pending? */

View File

@ -40,4 +40,8 @@ if(CONFIG_SPINLOCK)
list(APPEND CSRCS spinlock.c)
endif()
if(CONFIG_TICKET_SPINLOCK)
list(APPEND CSRCS spinlock.c)
endif()
target_sources(sched PRIVATE ${CSRCS})

View File

@ -32,9 +32,13 @@
#include <nuttx/sched_note.h>
#include <arch/irq.h>
#ifdef CONFIG_TICKET_SPINLOCK
# include <stdatomic.h>
#endif
#include "sched/sched.h"
#ifdef CONFIG_SPINLOCK
#if defined(CONFIG_SPINLOCK) || defined(CONFIG_TICKET_SPINLOCK)
/****************************************************************************
* Public Functions
@ -71,7 +75,12 @@ void spin_lock(FAR volatile spinlock_t *lock)
sched_note_spinlock(this_task(), lock, NOTE_SPINLOCK_LOCK);
#endif
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_fetch_add(&lock->tickets.next, 1);
while (atomic_load(&lock->tickets.owner) != ticket)
#else /* CONFIG_SPINLOCK */
while (up_testset(lock) == SP_LOCKED)
#endif
{
SP_DSB();
SP_WFE();
@ -109,7 +118,12 @@ void spin_lock(FAR volatile spinlock_t *lock)
void spin_lock_wo_note(FAR volatile spinlock_t *lock)
{
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_fetch_add(&lock->tickets.next, 1);
while (atomic_load(&lock->tickets.owner) != ticket)
#else /* CONFIG_TICKET_SPINLOCK */
while (up_testset(lock) == SP_LOCKED)
#endif
{
SP_DSB();
SP_WFE();
@ -129,15 +143,15 @@ void spin_lock_wo_note(FAR volatile spinlock_t *lock)
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* SP_LOCKED - Failure, the spinlock was already locked
* SP_UNLOCKED - Success, the spinlock was successfully locked
* false - Failure, the spinlock was already locked
* true - Success, the spinlock was successfully locked
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
spinlock_t spin_trylock(FAR volatile spinlock_t *lock)
bool spin_trylock(FAR volatile spinlock_t *lock)
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
/* Notify that we are waiting for a spinlock */
@ -145,7 +159,27 @@ spinlock_t spin_trylock(FAR volatile spinlock_t *lock)
sched_note_spinlock(this_task(), lock, NOTE_SPINLOCK_LOCK);
#endif
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_load(&lock->tickets.next);
spinlock_t old =
{
{
ticket, ticket
}
};
spinlock_t new =
{
{
ticket, ticket + 1
}
};
if (!atomic_compare_exchange_strong(&lock->value, &old.value, new.value))
#else /* CONFIG_TICKET_SPINLOCK */
if (up_testset(lock) == SP_LOCKED)
#endif /* CONFIG_TICKET_SPINLOCK */
{
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
/* Notify that we abort for a spinlock */
@ -153,7 +187,7 @@ spinlock_t spin_trylock(FAR volatile spinlock_t *lock)
sched_note_spinlock(this_task(), lock, NOTE_SPINLOCK_ABORT);
#endif
SP_DSB();
return SP_LOCKED;
return false;
}
#ifdef CONFIG_SCHED_INSTRUMENTATION_SPINLOCKS
@ -162,7 +196,7 @@ spinlock_t spin_trylock(FAR volatile spinlock_t *lock)
sched_note_spinlock(this_task(), lock, NOTE_SPINLOCK_LOCKED);
#endif
SP_DMB();
return SP_UNLOCKED;
return true;
}
/****************************************************************************
@ -179,24 +213,44 @@ spinlock_t spin_trylock(FAR volatile spinlock_t *lock)
* lock - A reference to the spinlock object to lock.
*
* Returned Value:
* SP_LOCKED - Failure, the spinlock was already locked
* SP_UNLOCKED - Success, the spinlock was successfully locked
* false - Failure, the spinlock was already locked
* true - Success, the spinlock was successfully locked
*
* Assumptions:
* Not running at the interrupt level.
*
****************************************************************************/
spinlock_t spin_trylock_wo_note(FAR volatile spinlock_t *lock)
bool spin_trylock_wo_note(FAR volatile spinlock_t *lock)
{
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_load(&lock->tickets.next);
spinlock_t old =
{
{
ticket, ticket
}
};
spinlock_t new =
{
{
ticket, ticket + 1
}
};
if (!atomic_compare_exchange_strong(&lock->value, &old.value, new.value))
#else /* CONFIG_TICKET_SPINLOCK */
if (up_testset(lock) == SP_LOCKED)
#endif /* CONFIG_TICKET_SPINLOCK */
{
SP_DSB();
return SP_LOCKED;
return false;
}
SP_DMB();
return SP_UNLOCKED;
return true;
}
/****************************************************************************
@ -226,7 +280,11 @@ void spin_unlock(FAR volatile spinlock_t *lock)
#endif
SP_DMB();
#ifdef CONFIG_TICKET_SPINLOCK
atomic_fetch_add(&lock->tickets.owner, 1);
#else
*lock = SP_UNLOCKED;
#endif
SP_DSB();
SP_SEV();
}
@ -255,7 +313,11 @@ void spin_unlock(FAR volatile spinlock_t *lock)
void spin_unlock_wo_note(FAR volatile spinlock_t *lock)
{
SP_DMB();
#ifdef CONFIG_TICKET_SPINLOCK
atomic_fetch_add(&lock->tickets.owner, 1);
#else
*lock = SP_UNLOCKED;
#endif
SP_DSB();
SP_SEV();
}