spinlock: Move the inclusion of stdatomic.h to source file

to make rwlock work with c++ source code

Signed-off-by: Xiang Xiao <xiaoxiang@xiaomi.com>
This commit is contained in:
Xiang Xiao 2023-11-18 20:13:27 +08:00 committed by Petro Karashchenko
parent f35ec7727d
commit b2f75c2f3d
2 changed files with 38 additions and 30 deletions

View File

@ -41,9 +41,8 @@ extern "C"
#define EXTERN extern
#endif
#if defined(CONFIG_RW_SPINLOCK) && !defined(__cplusplus)
# include <stdatomic.h>
typedef atomic_int rwlock_t;
#if defined(CONFIG_RW_SPINLOCK)
typedef int rwlock_t;
# define RW_SP_UNLOCKED 0
# define RW_SP_READ_LOCKED 1
# define RW_SP_WRITE_LOCKED -1
@ -60,10 +59,10 @@ union spinlock_u
{
struct
{
uint16_t owner;
uint16_t next;
unsigned short owner;
unsigned short next;
} tickets;
uint32_t value;
unsigned int value;
};
typedef union spinlock_u spinlock_t;
@ -505,7 +504,7 @@ void spin_unlock_irqrestore_wo_note(FAR spinlock_t *lock, irqstate_t flags);
# define spin_unlock_irqrestore_wo_note(l, f) up_irq_restore(f)
#endif
#if defined(CONFIG_RW_SPINLOCK) && !defined(__cplusplus)
#if defined(CONFIG_RW_SPINLOCK)
/****************************************************************************
* Name: rwlock_init
@ -813,7 +812,7 @@ void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t flags);
# define write_unlock_irqrestore(l, f) up_irq_restore(f)
#endif
#endif /* CONFIG_RW_SPINLOCK && !__cplusplus */
#endif /* CONFIG_RW_SPINLOCK */
#undef EXTERN
#if defined(__cplusplus)

View File

@ -25,6 +25,7 @@
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdatomic.h>
#include <sched.h>
#include <assert.h>
@ -76,8 +77,9 @@ void spin_lock(FAR volatile spinlock_t *lock)
#endif
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_fetch_add(&lock->tickets.next, 1);
while (atomic_load(&lock->tickets.owner) != ticket)
unsigned short ticket =
atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.next, 1);
while (atomic_load((FAR atomic_ushort *)&lock->tickets.owner) != ticket)
#else /* CONFIG_SPINLOCK */
while (up_testset(lock) == SP_LOCKED)
#endif
@ -119,8 +121,9 @@ void spin_lock(FAR volatile spinlock_t *lock)
void spin_lock_wo_note(FAR volatile spinlock_t *lock)
{
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_fetch_add(&lock->tickets.next, 1);
while (atomic_load(&lock->tickets.owner) != ticket)
unsigned short ticket =
atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.next, 1);
while (atomic_load((FAR atomic_ushort *)&lock->tickets.owner) != ticket)
#else /* CONFIG_TICKET_SPINLOCK */
while (up_testset(lock) == SP_LOCKED)
#endif
@ -160,7 +163,8 @@ bool spin_trylock(FAR volatile spinlock_t *lock)
#endif
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_load(&lock->tickets.next);
unsigned short ticket =
atomic_load((FAR atomic_ushort *)&lock->tickets.next);
spinlock_t old =
{
@ -176,7 +180,8 @@ bool spin_trylock(FAR volatile spinlock_t *lock)
}
};
if (!atomic_compare_exchange_strong(&lock->value, &old.value, new.value))
if (!atomic_compare_exchange_strong((FAR atomic_uint *)&lock->value,
&old.value, new.value))
#else /* CONFIG_TICKET_SPINLOCK */
if (up_testset(lock) == SP_LOCKED)
#endif /* CONFIG_TICKET_SPINLOCK */
@ -224,7 +229,8 @@ bool spin_trylock(FAR volatile spinlock_t *lock)
bool spin_trylock_wo_note(FAR volatile spinlock_t *lock)
{
#ifdef CONFIG_TICKET_SPINLOCK
uint16_t ticket = atomic_load(&lock->tickets.next);
unsigned short ticket =
atomic_load((FAR atomic_ushort *)&lock->tickets.next);
spinlock_t old =
{
@ -240,7 +246,8 @@ bool spin_trylock_wo_note(FAR volatile spinlock_t *lock)
}
};
if (!atomic_compare_exchange_strong(&lock->value, &old.value, new.value))
if (!atomic_compare_exchange_strong((FAR atomic_uint *)&lock->value,
&old.value, new.value))
#else /* CONFIG_TICKET_SPINLOCK */
if (up_testset(lock) == SP_LOCKED)
#endif /* CONFIG_TICKET_SPINLOCK */
@ -281,7 +288,7 @@ void spin_unlock(FAR volatile spinlock_t *lock)
SP_DMB();
#ifdef CONFIG_TICKET_SPINLOCK
atomic_fetch_add(&lock->tickets.owner, 1);
atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.owner, 1);
#else
*lock = SP_UNLOCKED;
#endif
@ -314,7 +321,7 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock)
{
SP_DMB();
#ifdef CONFIG_TICKET_SPINLOCK
atomic_fetch_add(&lock->tickets.owner, 1);
atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.owner, 1);
#else
*lock = SP_UNLOCKED;
#endif
@ -476,15 +483,15 @@ void read_lock(FAR volatile rwlock_t *lock)
{
while (true)
{
int old = atomic_load(lock);
int old = atomic_load((FAR atomic_int *)lock);
if (old <= RW_SP_WRITE_LOCKED)
{
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
SP_DSB();
SP_WFE();
}
else if(atomic_compare_exchange_strong(lock, &old, old + 1))
else if(atomic_compare_exchange_strong((FAR atomic_int *)lock,
&old, old + 1))
{
break;
}
@ -521,14 +528,14 @@ bool read_trylock(FAR volatile rwlock_t *lock)
{
while (true)
{
int old = atomic_load(lock);
int old = atomic_load((FAR atomic_int *)lock);
if (old <= RW_SP_WRITE_LOCKED)
{
DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
return false;
}
else if (atomic_compare_exchange_strong(lock, &old, old + 1))
else if (atomic_compare_exchange_strong((FAR atomic_int *)lock,
&old, old + 1))
{
break;
}
@ -557,10 +564,10 @@ bool read_trylock(FAR volatile rwlock_t *lock)
void read_unlock(FAR volatile rwlock_t *lock)
{
DEBUGASSERT(atomic_load(lock) >= RW_SP_READ_LOCKED);
DEBUGASSERT(atomic_load((FAR atomic_int *)lock) >= RW_SP_READ_LOCKED);
SP_DMB();
atomic_fetch_add(lock, -1);
atomic_fetch_sub((FAR atomic_int *)lock, 1);
SP_DSB();
SP_SEV();
}
@ -594,7 +601,8 @@ void write_lock(FAR volatile rwlock_t *lock)
{
int zero = RW_SP_UNLOCKED;
while (!atomic_compare_exchange_strong(lock, &zero, RW_SP_WRITE_LOCKED))
while (!atomic_compare_exchange_strong((FAR atomic_int *)lock,
&zero, RW_SP_WRITE_LOCKED))
{
SP_DSB();
SP_WFE();
@ -632,7 +640,8 @@ bool write_trylock(FAR volatile rwlock_t *lock)
{
int zero = RW_SP_UNLOCKED;
if (atomic_compare_exchange_strong(lock, &zero, RW_SP_WRITE_LOCKED))
if (atomic_compare_exchange_strong((FAR atomic_int *)lock,
&zero, RW_SP_WRITE_LOCKED))
{
SP_DMB();
return true;
@ -663,10 +672,10 @@ void write_unlock(FAR volatile rwlock_t *lock)
{
/* Ensure this cpu already get write lock */
DEBUGASSERT(atomic_load(lock) == RW_SP_WRITE_LOCKED);
DEBUGASSERT(atomic_load((FAR atomic_int *)lock) == RW_SP_WRITE_LOCKED);
SP_DMB();
atomic_store(lock, RW_SP_UNLOCKED);
atomic_store((FAR atomic_int *)lock, RW_SP_UNLOCKED);
SP_DSB();
SP_SEV();
}