@@ -606,30 +606,30 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
606
606
607
607
/* Lock-free loads and stores don't need assembler - just aligned accesses */
608
608
/* Silly ordering of `T volatile` is because T can be `void *` */
609
- #define DO_MBED_LOCKFREE_LOADSTORE (T, fn_suffix ) \
610
- MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \
609
+ #define DO_MBED_LOCKFREE_LOADSTORE (T, V, fn_suffix ) \
610
+ MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
611
611
{ \
612
612
T value = *valuePtr; \
613
613
MBED_BARRIER (); \
614
614
return value; \
615
615
} \
616
616
\
617
- MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \
617
+ MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
618
618
{ \
619
619
MBED_CHECK_LOAD_ORDER (order); \
620
620
T value = *valuePtr; \
621
621
MBED_ACQUIRE_BARRIER (order); \
622
622
return value; \
623
623
} \
624
624
\
625
- MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \
625
+ MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
626
626
{ \
627
627
MBED_BARRIER (); \
628
628
*valuePtr = value; \
629
629
MBED_BARRIER (); \
630
630
} \
631
631
\
632
- MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \
632
+ MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
633
633
{ \
634
634
MBED_CHECK_STORE_ORDER (order); \
635
635
MBED_RELEASE_BARRIER (order); \
@@ -651,15 +651,51 @@ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_at
651
651
flagPtr->_flag = false ;
652
652
MBED_SEQ_CST_BARRIER (order);
653
653
}
654
- DO_MBED_LOCKFREE_LOADSTORE (uint8_t , u8 )
655
- DO_MBED_LOCKFREE_LOADSTORE(uint16_t , u16 )
656
- DO_MBED_LOCKFREE_LOADSTORE(uint32_t , u32 )
657
- DO_MBED_LOCKFREE_LOADSTORE(int8_t , s8)
658
- DO_MBED_LOCKFREE_LOADSTORE(int16_t , s16)
659
- DO_MBED_LOCKFREE_LOADSTORE(int32_t , s32)
660
- DO_MBED_LOCKFREE_LOADSTORE(bool , bool )
661
- DO_MBED_LOCKFREE_LOADSTORE(void *, ptr)
662
654
655
+ #ifdef __cplusplus
656
+ // Temporarily turn off extern "C", so we can provide non-volatile load/store
657
+ // overloads for efficiency. All these functions are static inline, so this has
658
+ // no linkage effect exactly, it just permits the overloads.
659
+ } // extern "C"
660
+
661
+ // For efficiency it's worth having non-volatile overloads
662
+ MBED_FORCEINLINE void core_util_atomic_flag_clear (core_util_atomic_flag *flagPtr)
663
+ {
664
+ MBED_BARRIER ();
665
+ flagPtr->_flag = false ;
666
+ MBED_BARRIER ();
667
+ }
668
+
669
+ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit (core_util_atomic_flag *flagPtr, mbed_memory_order order)
670
+ {
671
+ MBED_RELEASE_BARRIER (order);
672
+ flagPtr->_flag = false ;
673
+ MBED_SEQ_CST_BARRIER (order);
674
+ }
675
+
676
+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t ,, u8 )
677
+ DO_MBED_LOCKFREE_LOADSTORE(uint16_t ,, u16 )
678
+ DO_MBED_LOCKFREE_LOADSTORE(uint32_t ,, u32 )
679
+ DO_MBED_LOCKFREE_LOADSTORE(int8_t ,, s8)
680
+ DO_MBED_LOCKFREE_LOADSTORE(int16_t ,, s16)
681
+ DO_MBED_LOCKFREE_LOADSTORE(int32_t ,, s32)
682
+ DO_MBED_LOCKFREE_LOADSTORE(bool ,, bool )
683
+ DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
684
+
685
+ #endif
686
+
687
+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t , volatile , u8 )
688
+ DO_MBED_LOCKFREE_LOADSTORE(uint16_t , volatile , u16 )
689
+ DO_MBED_LOCKFREE_LOADSTORE(uint32_t , volatile , u32 )
690
+ DO_MBED_LOCKFREE_LOADSTORE(int8_t , volatile , s8)
691
+ DO_MBED_LOCKFREE_LOADSTORE(int16_t , volatile , s16)
692
+ DO_MBED_LOCKFREE_LOADSTORE(int32_t , volatile , s32)
693
+ DO_MBED_LOCKFREE_LOADSTORE(bool , volatile , bool )
694
+ DO_MBED_LOCKFREE_LOADSTORE(void *, volatile , ptr)
695
+
696
+ #ifdef __cplusplus
697
+ extern " C" {
698
+ #endif
663
699
664
700
/* ******************** GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
665
701
@@ -901,6 +937,12 @@ DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
901
937
#define DO_MBED_ATOMIC_LOAD_TEMPLATE (T, fn_suffix ) \
902
938
template <> \
903
939
inline T core_util_atomic_load (const volatile T *valuePtr) \
940
+ { \
941
+ return core_util_atomic_load_##fn_suffix (valuePtr); \
942
+ } \
943
+ \
944
+ template <> \
945
+ inline T core_util_atomic_load (const T *valuePtr) \
904
946
{ \
905
947
return core_util_atomic_load_##fn_suffix (valuePtr); \
906
948
}
@@ -911,6 +953,12 @@ inline T *core_util_atomic_load(T *const volatile *valuePtr)
911
953
return (T *) core_util_atomic_load_ptr ((void *const volatile *) valuePtr);
912
954
}
913
955
956
+ template <typename T>
957
+ inline T *core_util_atomic_load (T *const *valuePtr)
958
+ {
959
+ return (T *) core_util_atomic_load_ptr ((void *const *) valuePtr);
960
+ }
961
+
914
962
DO_MBED_ATOMIC_LOAD_TEMPLATE (uint8_t , u8 )
915
963
DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t , u16 )
916
964
DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t , u32 )
@@ -924,6 +972,12 @@ DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
924
972
#define DO_MBED_ATOMIC_STORE_TEMPLATE (T, fn_suffix ) \
925
973
template <> \
926
974
inline void core_util_atomic_store (volatile T *valuePtr, T val) \
975
+ { \
976
+ core_util_atomic_store_##fn_suffix (valuePtr, val); \
977
+ } \
978
+ \
979
+ template <> \
980
+ inline void core_util_atomic_store (T *valuePtr, T val) \
927
981
{ \
928
982
core_util_atomic_store_##fn_suffix (valuePtr, val); \
929
983
}
@@ -934,6 +988,12 @@ inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
934
988
core_util_atomic_store_ptr ((void *volatile *) valuePtr, val);
935
989
}
936
990
991
+ template <typename T>
992
+ inline void core_util_atomic_store (T **valuePtr, T *val)
993
+ {
994
+ core_util_atomic_store_ptr ((void **) valuePtr, val);
995
+ }
996
+
937
997
DO_MBED_ATOMIC_STORE_TEMPLATE (uint8_t , u8 )
938
998
DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t , u16 )
939
999
DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t , u32 )
0 commit comments