From 8e1a042eef1a931e91836a3fb8fad73af6999bfe Mon Sep 17 00:00:00 2001 From: chenrun1 Date: Mon, 29 Jul 2024 21:20:05 +0800 Subject: [PATCH] nuttx/atomic.h:Fix missing type declarations at compile time Summary: 1.Modify the conditions for entering different include header files 2.Added pre-definition for _Atomic _Bool when it is missing 3.Added nuttx for stdatomic implementation. When toolchain does not support atomic, use lib/stdatomic to implement it Signed-off-by: chenrun1 --- arch/arm/Kconfig | 3 - arch/risc-v/Kconfig | 1 - arch/xtensa/Kconfig | 2 - include/nuttx/atomic.h | 53 +++- include/nuttx/lib/stdatomic.h | 240 ++++++++++++++++ include/stdbool.h | 2 + libs/libc/machine/CMakeLists.txt | 4 +- libs/libc/machine/Kconfig | 4 - libs/libc/machine/Make.defs | 4 +- libs/libc/machine/arch_atomic.c | 478 +++++++++++++++---------------- 10 files changed, 519 insertions(+), 272 deletions(-) create mode 100644 include/nuttx/lib/stdatomic.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2d174dccfe..25646d13fa 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -325,7 +325,6 @@ config ARCH_CHIP_RP2040 select ARCH_HAVE_TESTSET select ARCH_HAVE_I2CRESET select ARM_HAVE_WFE_SEV - select LIBC_ARCH_ATOMIC select ARCH_HAVE_PWM_MULTICHAN select ARCH_BOARD_COMMON ---help--- @@ -638,7 +637,6 @@ config ARCH_CHIP_CXD56XX config ARCH_CHIP_PHY62XX bool "Phyplus PHY62XX BLE" select ARCH_CORTEXM0 - select LIBC_ARCH_ATOMIC ---help--- Phyplus PHY62XX architectures (ARM Cortex-M0). @@ -646,7 +644,6 @@ config ARCH_CHIP_TLSR82 bool "Telink TLSR82XX" select ARCH_ARMV6M select ARCH_HAVE_RESET - select LIBC_ARCH_ATOMIC ---help--- Telink tlsr82xx architectures (Customed armv6m) diff --git a/arch/risc-v/Kconfig b/arch/risc-v/Kconfig index 2da3665d0c..4982e57cd8 100644 --- a/arch/risc-v/Kconfig +++ b/arch/risc-v/Kconfig @@ -73,7 +73,6 @@ config ARCH_CHIP_ESP32C3 select ARCH_VECNOTIRQ select ARCH_HAVE_MPU select ARCH_HAVE_RESET - select LIBC_ARCH_ATOMIC select LIBC_ARCH_MEMCPY select LIBC_ARCH_MEMCHR select LIBC_ARCH_MEMCMP diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 6f68d99b90..304dc8c97c 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -22,7 +22,6 @@ config ARCH_CHIP_ESP32 select ARCH_HAVE_TEXT_HEAP select ARCH_VECNOTIRQ select LIBC_PREVENT_STRING_KERNEL - select LIBC_ARCH_ATOMIC select LIBC_ARCH_MEMCPY if BUILD_FLAT select LIBC_ARCH_MEMCHR if BUILD_FLAT select LIBC_ARCH_MEMCMP if BUILD_FLAT @@ -55,7 +54,6 @@ config ARCH_CHIP_ESP32S2 select ARCH_HAVE_RESET select ARCH_HAVE_TEXT_HEAP select ARCH_VECNOTIRQ - select LIBC_ARCH_ATOMIC select LIBC_ARCH_MEMCPY select LIBC_ARCH_MEMCHR select LIBC_ARCH_MEMCMP diff --git a/include/nuttx/atomic.h b/include/nuttx/atomic.h index 1738e6441a..487366264a 100644 --- a/include/nuttx/atomic.h +++ b/include/nuttx/atomic.h @@ -25,22 +25,59 @@ * Included Files ****************************************************************************/ -#if !defined(__cplusplus) || defined(__clang__) -# include -#elif defined(__has_include) && __has_include() +#ifdef __has_include +# if defined(__cplusplus) && __has_include() extern "C++" { -# include +# include + +# define ATOMIC_VAR_INIT(value) (value) + + using std::atomic_bool; + using std::atomic_char; + using std::atomic_schar; + using std::atomic_uchar; + using std::atomic_short; + using std::atomic_ushort; using std::atomic_int; using std::atomic_uint; - using std::atomic_ushort; + using std::atomic_long; + using std::atomic_ulong; + using std::atomic_llong; + using std::atomic_ullong; + using std::atomic_load; + using std::atomic_load_explicit; using std::atomic_store; - using std::atomic_fetch_add; - using std::atomic_fetch_sub; + using std::atomic_store_explicit; + using std::atomic_exchange; + using std::atomic_exchange_explicit; using std::atomic_compare_exchange_strong; -# define ATOMIC_VAR_INIT(value) (value) + using std::atomic_compare_exchange_strong_explicit; + using std::atomic_compare_exchange_weak; + using std::atomic_compare_exchange_weak_explicit; + using std::atomic_fetch_add; + using std::atomic_fetch_add_explicit; + using std::atomic_fetch_sub; + using std::atomic_fetch_sub_explicit; + using std::atomic_fetch_and; + using std::atomic_fetch_and_explicit; + using std::atomic_fetch_or; + using std::atomic_fetch_or_explicit; + using std::atomic_fetch_xor; + using std::atomic_fetch_xor_explicit; } +# elif __has_include() +# if !(__clang__) && defined(__cplusplus) +# define _Atomic +# endif +# include +# include +# else +# include +# endif +#else +# include #endif /**************************************************************************** diff --git a/include/nuttx/lib/stdatomic.h b/include/nuttx/lib/stdatomic.h new file mode 100644 index 0000000000..65278027a6 --- /dev/null +++ b/include/nuttx/lib/stdatomic.h @@ -0,0 +1,240 @@ +/**************************************************************************** + * include/nuttx/lib/stdatomic.h + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. The + * ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + ****************************************************************************/ + +#ifndef __INCLUDE_NUTTX_LIB_STDATOMIC_H +#define __INCLUDE_NUTTX_LIB_STDATOMIC_H + +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include +#include + +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#ifndef __ATOMIC_RELAXED +# define __ATOMIC_RELAXED 0 +#endif + +#ifndef __ATOMIC_CONSUM +# define __ATOMIC_CONSUME 1 +#endif + +#ifndef __ATOMIC_ACQUIR +# define __ATOMIC_ACQUIRE 2 +#endif + +#ifndef __ATOMIC_RELEAS +# define __ATOMIC_RELEASE 3 +#endif + +#ifndef __ATOMIC_ACQ_REL +# define __ATOMIC_ACQ_REL 4 +#endif + +#ifndef __ATOMIC_SEQ_CS +# define __ATOMIC_SEQ_CST 5 +#endif + +#define ATOMIC_VAR_INIT(value) (value) + +#define atomic_store_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_store_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_store_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_store_4(obj, val, type) : \ + __atomic_store_8(obj, val, type)) + +#define atomic_store(obj, val) atomic_store_n(obj, val, __ATOMIC_RELAXED) +#define atomic_store_explicit(obj, val, type) atomic_store_n(obj, val, type) +#define atomic_init(obj, val) atomic_store(obj, val) + +#define atomic_load_n(obj, type) \ + (sizeof(*(obj)) == 1 ? __atomic_load_1(obj, type) : \ + sizeof(*(obj)) == 2 ? __atomic_load_2(obj, type) : \ + sizeof(*(obj)) == 4 ? __atomic_load_4(obj, type) : \ + __atomic_load_8(obj, type)) + +#define atomic_load(obj) atomic_load_n(obj, __ATOMIC_RELAXED) +#define atomic_load_explicit(obj, type) atomic_load_n(obj, type) + +#define atomic_exchange_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_exchange_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_exchange_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_exchange_4(obj, val, type) : \ + __atomic_exchange_8(obj, val, type)) + +#define atomic_exchange(obj, val) atomic_exchange_n(obj, val, __ATOMIC_RELAXED) +#define atomic_exchange_explicit(obj, val, type) atomic_exchange_n(obj, val, type) + +#define atomic_compare_exchange_n(obj, expected, desired, weak, success, failure) \ + (sizeof(*(obj)) == 1 ? __atomic_compare_exchange_1(obj, expected, desired, weak, success, failure) : \ + sizeof(*(obj)) == 2 ? __atomic_compare_exchange_2(obj, expected, desired, weak, success, failure) : \ + sizeof(*(obj)) == 4 ? __atomic_compare_exchange_4(obj, expected, desired, weak, success, failure) : \ + __atomic_compare_exchange_8(obj, expected, desired, weak, success, failure)) + +#define atomic_compare_exchange_strong(obj, expected, desired) \ + atomic_compare_exchange_n(obj, expected, desired, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED) +#define atomic_compare_exchange_strong_explicit(obj, expected, desired, success, failure) \ + atomic_compare_exchange_n(obj, expected, desired, false, success, failure) +#define atomic_compare_exchange_weak(obj, expected, desired) \ + atomic_compare_exchange_n(obj, expected, desired, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED) +#define atomic_compare_exchange_weak_explicit(obj, expected, desired, success, failure) \ + atomic_compare_exchange_n(obj, expected, desired, true, success, failure) + +#define atomic_fetch_or_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_fetch_or_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_fetch_or_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_fetch_or_4(obj, val, type) : \ + __atomic_fetch_or_8(obj, val, type)) + +#define atomic_fetch_or(obj, val) atomic_fetch_or_n(obj, val, __ATOMIC_RELAXED) +#define atomic_fetch_or_explicit(obj, val, type) atomic_fetch_or_n(obj, val, type) + +#define atomic_fetch_and_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_fetch_and_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_fetch_and_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_fetch_and_4(obj, val, type) : \ + __atomic_fetch_and_8(obj, val, type)) + +#define atomic_fetch_and(obj, val) atomic_fetch_and_n(obj, val, __ATOMIC_RELAXED) +#define atomic_fetch_and_explicit(obj, val, type) atomic_fetch_and_n(obj, val, type) + +#define atomic_fetch_xor_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_fetch_xor_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_fetch_xor_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_fetch_xor_4(obj, val, type) : \ + __atomic_fetch_xor_8(obj, val, type)) + +#define atomic_fetch_xor(obj, val) atomic_fetch_xor_n(obj, val, __ATOMIC_RELAXED) +#define atomic_fetch_xor_explicit(obj, val, type) atomic_fetch_xor_n(obj, val, type) + +#define atomic_fetch_add_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_fetch_add_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_fetch_add_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_fetch_add_4(obj, val, type) : \ + __atomic_fetch_add_8(obj, val, type)) + +#define atomic_fetch_add(obj, val) atomic_fetch_add_n(obj, val, __ATOMIC_RELAXED) +#define atomic_fetch_add_explicit(obj, val, type) atomic_fetch_add_n(obj, val, type) + +#define atomic_fetch_sub_n(obj, val, type) \ + (sizeof(*(obj)) == 1 ? __atomic_fetch_sub_1(obj, val, type) : \ + sizeof(*(obj)) == 2 ? __atomic_fetch_sub_2(obj, val, type) : \ + sizeof(*(obj)) == 4 ? __atomic_fetch_sub_4(obj, val, type) : \ + __atomic_fetch_sub_8(obj, val, type)) + +#define atomic_fetch_sub(obj, val) atomic_fetch_sub_n(obj, val, __ATOMIC_RELAXED) +#define atomic_fetch_sub_explicit(obj, val, type) atomic_fetch_sub_n(obj, val, type) + +/**************************************************************************** + * Public Types + ****************************************************************************/ + +typedef volatile bool atomic_bool; +typedef volatile char atomic_char; +typedef volatile signed char atomic_schar; +typedef volatile unsigned char atomic_uchar; +typedef volatile short atomic_short; +typedef volatile unsigned short atomic_ushort; +typedef volatile int atomic_int; +typedef volatile unsigned int atomic_uint; +typedef volatile long atomic_long; +typedef volatile unsigned long atomic_ulong; +typedef volatile long long atomic_llong; +typedef volatile unsigned long long atomic_ullong; +typedef volatile wchar_t atomic_wchar_t; + +/**************************************************************************** + * Public Function Prototypes + ****************************************************************************/ + +void __atomic_store_1(FAR volatile void *ptr, uint8_t value, int memorder); +void __atomic_store_2(FAR volatile void *ptr, uint16_t value, int memorder); +void __atomic_store_4(FAR volatile void *ptr, uint32_t value, int memorder); +void __atomic_store_8(FAR volatile void *ptr, uint64_t value, int memorder); +uint8_t __atomic_load_1(FAR const volatile void *ptr, int memorder); +uint16_t __atomic_load_2(FAR const volatile void *ptr, int memorder); +uint32_t __atomic_load_4(FAR const volatile void *ptr, int memorder); +uint64_t __atomic_load_8(FAR const volatile void *ptr, int memorder); +uint8_t __atomic_exchange_1(FAR volatile void *ptr, uint8_t value, + int memorder); +uint16_t __atomic_exchange_2(FAR volatile void *ptr, uint16_t value, + int memorder); +uint32_t __atomic_exchange_4(FAR volatile void *ptr, uint32_t value, + int memorder); +uint64_t __atomic_exchange_8(FAR volatile void *ptr, uint64_t value, + int memorder); +bool __atomic_compare_exchange_1(FAR volatile void *mem, FAR void *expect, + uint8_t desired, bool weak, int success, + int failure); +bool __atomic_compare_exchange_2(FAR volatile void *mem, FAR void *expect, + uint16_t desired, bool weak, int success, + int failure); +bool __atomic_compare_exchange_4(FAR volatile void *mem, FAR void *expect, + uint32_t desired, bool weak, int success, + int failure); +bool __atomic_compare_exchange_8(FAR volatile void *mem, FAR void *expect, + uint64_t desired, bool weak, int success, + int failure); +uint8_t __atomic_fetch_add_1(FAR volatile void *ptr, uint8_t value, + int memorder); +uint16_t __atomic_fetch_add_2(FAR volatile void *ptr, uint16_t value, + int memorder); +uint32_t __atomic_fetch_add_4(FAR volatile void *ptr, uint32_t value, + int memorder); +uint64_t __atomic_fetch_add_8(FAR volatile void *ptr, uint64_t value, + int memorder); +uint8_t __atomic_fetch_sub_1(FAR volatile void *ptr, uint8_t value, + int memorder); +uint16_t __atomic_fetch_sub_2(FAR volatile void *ptr, uint16_t value, + int memorder); +uint32_t __atomic_fetch_sub_4(FAR volatile void *ptr, uint32_t value, + int memorder); +uint64_t __atomic_fetch_sub_8(FAR volatile void *ptr, uint64_t value, + int memorder); +uint8_t __atomic_fetch_and_1(FAR volatile void *ptr, uint8_t value, + int memorder); +uint16_t __atomic_fetch_and_2(FAR volatile void *ptr, uint16_t value, + int memorder); +uint32_t __atomic_fetch_and_4(FAR volatile void *ptr, uint32_t value, + int memorder); +uint64_t __atomic_fetch_and_8(FAR volatile void *ptr, uint64_t value, + int memorder); +uint8_t __atomic_fetch_or_1(FAR volatile void *ptr, uint8_t value, + int memorder); +uint16_t __atomic_fetch_or_2(FAR volatile void *ptr, uint16_t value, + int memorder); +uint32_t __atomic_fetch_or_4(FAR volatile void *ptr, uint32_t value, + int memorder); +uint64_t __atomic_fetch_or_8(FAR volatile void *ptr, uint64_t value, + int memorder); +uint8_t __atomic_fetch_xor_1(FAR volatile void *ptr, uint8_t value, + int memorder); +uint16_t __atomic_fetch_xor_2(FAR volatile void *ptr, uint16_t value, + int memorder); +uint32_t __atomic_fetch_xor_4(FAR volatile void *ptr, uint32_t value, + int memorder); +uint64_t __atomic_fetch_xor_8(FAR volatile void *ptr, uint64_t value, + int memorder); + +#endif /* __INCLUDE_NUTTX_LIB_STDATOMIC_H */ diff --git a/include/stdbool.h b/include/stdbool.h index 3a71183139..8c973cccd4 100644 --- a/include/stdbool.h +++ b/include/stdbool.h @@ -78,6 +78,8 @@ # define false (bool)0 # define __bool_true_false_are_defined 1 +# else +# define _Bool uint8_t # endif /* __cplusplus */ # endif /* CONFIG_ARCH_STDBOOL_H */ diff --git a/libs/libc/machine/CMakeLists.txt b/libs/libc/machine/CMakeLists.txt index 21230144d0..bb003e3112 100644 --- a/libs/libc/machine/CMakeLists.txt +++ b/libs/libc/machine/CMakeLists.txt @@ -20,6 +20,4 @@ add_subdirectory(${CONFIG_ARCH}) -if(CONFIG_LIBC_ARCH_ATOMIC) - target_sources(c PRIVATE arch_atomic.c) -endif() +target_sources(c PRIVATE arch_atomic.c) diff --git a/libs/libc/machine/Kconfig b/libs/libc/machine/Kconfig index 1c1c601186..5b0caacccd 100644 --- a/libs/libc/machine/Kconfig +++ b/libs/libc/machine/Kconfig @@ -44,10 +44,6 @@ config ARCH_ROMGETC # Default settings for C library functions that may be replaced with # architecture-specific versions. -config LIBC_ARCH_ATOMIC - bool - default n - config LIBC_ARCH_MEMCHR bool default n diff --git a/libs/libc/machine/Make.defs b/libs/libc/machine/Make.defs index 2acd957e52..8c82ae76c2 100644 --- a/libs/libc/machine/Make.defs +++ b/libs/libc/machine/Make.defs @@ -18,9 +18,7 @@ # ############################################################################ -ifeq ($(CONFIG_LIBC_ARCH_ATOMIC),y) - CSRCS += arch_atomic.c -endif +CSRCS += arch_atomic.c ifeq ($(CONFIG_ARCH_ARM),y) include $(TOPDIR)/libs/libc/machine/arm/Make.defs diff --git a/libs/libc/machine/arch_atomic.c b/libs/libc/machine/arch_atomic.c index 146b998fba..a04e9ecacf 100644 --- a/libs/libc/machine/arch_atomic.c +++ b/libs/libc/machine/arch_atomic.c @@ -32,286 +32,268 @@ * Pre-processor Definitions ****************************************************************************/ -#define STORE(n, type) \ - \ - void __atomic_store_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - \ - *(FAR type *)ptr = value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ +#define STORE(n, type) \ + \ + void weak_function __atomic_store_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + \ + *(FAR type *)ptr = value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ } -#define LOAD(n, type) \ - \ - type __atomic_load_ ## n (FAR const volatile void *ptr, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - \ - type ret = *(FAR type *)ptr; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define LOAD(n, type) \ + \ + type weak_function __atomic_load_##n (FAR const volatile void *ptr, \ + int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + \ + type ret = *(FAR type *)ptr; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define EXCHANGE(n, type) \ - \ - type __atomic_exchange_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - type ret = *tmp; \ - *tmp = value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define EXCHANGE(n, type) \ + \ + type weak_function __atomic_exchange_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + type ret = *tmp; \ + *tmp = value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define CMP_EXCHANGE(n, type) \ - \ - bool __atomic_compare_exchange_ ## n ( \ - FAR volatile void *mem, \ - FAR void *expect, \ - type desired, \ - bool weak, \ - int success, \ - int failure) \ - { \ - bool ret = false; \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmpmem = (FAR type *)mem; \ - FAR type *tmpexp = (FAR type *)expect; \ - \ - if (*tmpmem == *tmpexp) \ - { \ - ret = true; \ - *tmpmem = desired; \ - } \ - else \ - { \ - *tmpexp = *tmpmem; \ - } \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define CMP_EXCHANGE(n, type) \ + \ + bool weak_function __atomic_compare_exchange_##n (FAR volatile void *mem, \ + FAR void *expect, \ + type desired, bool weak, \ + int success, int failure) \ + { \ + bool ret = false; \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmpmem = (FAR type *)mem; \ + FAR type *tmpexp = (FAR type *)expect; \ + \ + if (*tmpmem == *tmpexp) \ + { \ + ret = true; \ + *tmpmem = desired; \ + } \ + else \ + { \ + *tmpexp = *tmpmem; \ + } \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define FETCH_ADD(n, type) \ - \ - type __atomic_fetch_add_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - type ret = *tmp; \ - \ - *tmp = *tmp + value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define FETCH_ADD(n, type) \ + \ + type weak_function __atomic_fetch_add_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + type ret = *tmp; \ + \ + *tmp = *tmp + value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define FETCH_SUB(n, type) \ - \ - type __atomic_fetch_sub_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - type ret = *tmp; \ - \ - *tmp = *tmp - value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define FETCH_SUB(n, type) \ + \ + type weak_function __atomic_fetch_sub_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + type ret = *tmp; \ + \ + *tmp = *tmp - value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define FETCH_AND(n, type) \ - \ - type __atomic_fetch_and_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - type ret = *tmp; \ - \ - *tmp = *tmp & value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define FETCH_AND(n, type) \ + \ + type weak_function __atomic_fetch_and_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + type ret = *tmp; \ + \ + *tmp = *tmp & value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define FETCH_OR(n, type) \ - \ - type __atomic_fetch_or_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - type ret = *tmp; \ - \ - *tmp = *tmp | value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define FETCH_OR(n, type) \ + \ + type weak_function __atomic_fetch_or_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + type ret = *tmp; \ + \ + *tmp = *tmp | value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define FETCH_XOR(n, type) \ - \ - type __atomic_fetch_xor_ ## n (FAR volatile void *ptr, \ - type value, \ - int memorder) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - type ret = *tmp; \ - \ - *tmp = *tmp ^ value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define FETCH_XOR(n, type) \ + \ + type weak_function __atomic_fetch_xor_##n (FAR volatile void *ptr, \ + type value, int memorder) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + type ret = *tmp; \ + \ + *tmp = *tmp ^ value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define SYNC_ADD_FETCH(n, type) \ - \ - type __sync_add_and_fetch_ ## n ( \ - FAR volatile void *ptr, \ - type value) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - *tmp = *tmp + value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return *tmp; \ +#define SYNC_ADD_FETCH(n, type) \ + \ + type weak_function __sync_add_and_fetch_##n (FAR volatile void *ptr, \ + type value) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + *tmp = *tmp + value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return *tmp; \ } -#define SYNC_SUB_FETCH(n, type) \ - \ - type __sync_sub_and_fetch_ ## n ( \ - FAR volatile void *ptr, \ - type value) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - *tmp = *tmp - value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return *tmp; \ +#define SYNC_SUB_FETCH(n, type) \ + \ + type weak_function __sync_sub_and_fetch_##n (FAR volatile void *ptr, \ + type value) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + *tmp = *tmp - value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return *tmp; \ } -#define SYNC_OR_FETCH(n, type) \ - \ - type __sync_or_and_fetch_ ## n ( \ - FAR volatile void *ptr, \ - type value) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - *tmp = *tmp | value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return *tmp; \ +#define SYNC_OR_FETCH(n, type) \ + \ + type weak_function __sync_or_and_fetch_##n (FAR volatile void *ptr, \ + type value) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + *tmp = *tmp | value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return *tmp; \ } -#define SYNC_AND_FETCH(n, type) \ - \ - type __sync_and_and_fetch_ ## n ( \ - FAR volatile void *ptr, \ - type value) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - *tmp = *tmp & value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return *tmp; \ +#define SYNC_AND_FETCH(n, type) \ + \ + type weak_function __sync_and_and_fetch_##n (FAR volatile void *ptr, \ + type value) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + *tmp = *tmp & value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return *tmp; \ } -#define SYNC_XOR_FETCH(n, type) \ - \ - type __sync_xor_and_fetch_ ## n ( \ - FAR volatile void *ptr, \ - type value) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - *tmp = *tmp ^ value; \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return *tmp; \ +#define SYNC_XOR_FETCH(n, type) \ + \ + type weak_function __sync_xor_and_fetch_##n (FAR volatile void *ptr, \ + type value) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + *tmp = *tmp ^ value; \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return *tmp; \ } -#define SYNC_NAND_FETCH(n, type) \ - \ - type __sync_nand_and_fetch_ ## n ( \ - FAR volatile void *ptr, \ - type value) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - *tmp = ~(*tmp & value); \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return *tmp; \ +#define SYNC_NAND_FETCH(n, type) \ + \ + type weak_function __sync_nand_and_fetch_##n (FAR volatile void *ptr, \ + type value) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + *tmp = ~(*tmp & value); \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return *tmp; \ } -#define SYNC_BOOL_CMP_SWAP(n, type) \ - \ - bool __sync_bool_compare_and_swap_ ## n ( \ - FAR volatile void *ptr, \ - type oldvalue, \ - type newvalue) \ - { \ - bool ret = false; \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - \ - if (*tmp == oldvalue) \ - { \ - ret = true; \ - *tmp = newvalue; \ - } \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define SYNC_BOOL_CMP_SWAP(n, type) \ + \ + bool weak_function __sync_bool_compare_and_swap_##n (FAR volatile void *ptr, \ + type oldvalue, \ + type newvalue) \ + { \ + bool ret = false; \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + \ + if (*tmp == oldvalue) \ + { \ + ret = true; \ + *tmp = newvalue; \ + } \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } -#define SYNC_VAL_CMP_SWAP(n, type) \ - \ - type __sync_val_compare_and_swap_ ## n ( \ - FAR volatile void *ptr, \ - type oldvalue, \ - type newvalue) \ - { \ - irqstate_t irqstate = spin_lock_irqsave(NULL); \ - FAR type *tmp = (FAR type *)ptr; \ - type ret = *tmp; \ - \ - if (*tmp == oldvalue) \ - { \ - *tmp = newvalue; \ - } \ - \ - spin_unlock_irqrestore(NULL, irqstate); \ - return ret; \ +#define SYNC_VAL_CMP_SWAP(n, type) \ + \ + type weak_function __sync_val_compare_and_swap_##n (FAR volatile void *ptr, \ + type oldvalue, \ + type newvalue) \ + { \ + irqstate_t irqstate = spin_lock_irqsave(NULL); \ + FAR type *tmp = (FAR type *)ptr; \ + type ret = *tmp; \ + \ + if (*tmp == oldvalue) \ + { \ + *tmp = newvalue; \ + } \ + \ + spin_unlock_irqrestore(NULL, irqstate); \ + return ret; \ } /****************************************************************************