@@ -606,30 +606,30 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
606
606
607
607
/* Lock-free loads and stores don't need assembler - just aligned accesses */
608
608
/* Silly ordering of `T volatile` is because T can be `void *` */
609
- #define DO_MBED_LOCKFREE_LOADSTORE (T, fn_suffix ) \
610
- MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \
609
+ #define DO_MBED_LOCKFREE_LOADSTORE (T, V, fn_suffix ) \
610
+ MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
611
611
{ \
612
612
T value = *valuePtr; \
613
613
MBED_BARRIER (); \
614
614
return value; \
615
615
} \
616
616
\
617
- MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \
617
+ MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
618
618
{ \
619
619
MBED_CHECK_LOAD_ORDER (order); \
620
620
T value = *valuePtr; \
621
621
MBED_ACQUIRE_BARRIER (order); \
622
622
return value; \
623
623
} \
624
624
\
625
- MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \
625
+ MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
626
626
{ \
627
627
MBED_BARRIER (); \
628
628
*valuePtr = value; \
629
629
MBED_BARRIER (); \
630
630
} \
631
631
\
632
- MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \
632
+ MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
633
633
{ \
634
634
MBED_CHECK_STORE_ORDER (order); \
635
635
MBED_RELEASE_BARRIER (order); \
@@ -651,15 +651,51 @@ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_at
651
651
flagPtr->_flag = false ;
652
652
MBED_SEQ_CST_BARRIER (order);
653
653
}
654
- DO_MBED_LOCKFREE_LOADSTORE (uint8_t , u8 )
655
- DO_MBED_LOCKFREE_LOADSTORE(uint16_t , u16 )
656
- DO_MBED_LOCKFREE_LOADSTORE(uint32_t , u32 )
657
- DO_MBED_LOCKFREE_LOADSTORE(int8_t , s8)
658
- DO_MBED_LOCKFREE_LOADSTORE(int16_t , s16)
659
- DO_MBED_LOCKFREE_LOADSTORE(int32_t , s32)
660
- DO_MBED_LOCKFREE_LOADSTORE(bool , bool )
661
- DO_MBED_LOCKFREE_LOADSTORE(void *, ptr)
662
654
655
+ #ifdef __cplusplus
656
+ // Temporarily turn off extern "C", so we can provide non-volatile load/store
657
+ // overloads for efficiency. All these functions are static inline, so this has
658
+ // no linkage effect exactly, it just permits the overloads.
659
+ } // extern "C"
660
+
661
+ // For efficiency it's worth having non-volatile overloads
662
+ MBED_FORCEINLINE void core_util_atomic_flag_clear (core_util_atomic_flag *flagPtr)
663
+ {
664
+ MBED_BARRIER ();
665
+ flagPtr->_flag = false ;
666
+ MBED_BARRIER ();
667
+ }
668
+
669
+ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit (core_util_atomic_flag *flagPtr, mbed_memory_order order)
670
+ {
671
+ MBED_RELEASE_BARRIER (order);
672
+ flagPtr->_flag = false ;
673
+ MBED_SEQ_CST_BARRIER (order);
674
+ }
675
+
676
+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t ,, u8 )
677
+ DO_MBED_LOCKFREE_LOADSTORE(uint16_t ,, u16 )
678
+ DO_MBED_LOCKFREE_LOADSTORE(uint32_t ,, u32 )
679
+ DO_MBED_LOCKFREE_LOADSTORE(int8_t ,, s8)
680
+ DO_MBED_LOCKFREE_LOADSTORE(int16_t ,, s16)
681
+ DO_MBED_LOCKFREE_LOADSTORE(int32_t ,, s32)
682
+ DO_MBED_LOCKFREE_LOADSTORE(bool ,, bool )
683
+ DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
684
+
685
+ #endif
686
+
687
+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t , volatile , u8 )
688
+ DO_MBED_LOCKFREE_LOADSTORE(uint16_t , volatile , u16 )
689
+ DO_MBED_LOCKFREE_LOADSTORE(uint32_t , volatile , u32 )
690
+ DO_MBED_LOCKFREE_LOADSTORE(int8_t , volatile , s8)
691
+ DO_MBED_LOCKFREE_LOADSTORE(int16_t , volatile , s16)
692
+ DO_MBED_LOCKFREE_LOADSTORE(int32_t , volatile , s32)
693
+ DO_MBED_LOCKFREE_LOADSTORE(bool , volatile , bool )
694
+ DO_MBED_LOCKFREE_LOADSTORE(void *, volatile , ptr)
695
+
696
+ #ifdef __cplusplus
697
+ extern " C" {
698
+ #endif
663
699
664
700
/* ******************** GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
665
701
@@ -906,7 +942,19 @@ inline T core_util_atomic_load(const volatile T *valuePtr)
906
942
} \
907
943
\
908
944
template <> \
945
+ inline T core_util_atomic_load (const T *valuePtr) \
946
+ { \
947
+ return core_util_atomic_load_##fn_suffix (valuePtr); \
948
+ } \
949
+ \
950
+ template <> \
909
951
inline T core_util_atomic_load_explicit (const volatile T *valuePtr, mbed_memory_order order) \
952
+ { \
953
+ return core_util_atomic_load_explicit_##fn_suffix (valuePtr, order); \
954
+ } \
955
+ \
956
+ template <> \
957
+ inline T core_util_atomic_load_explicit (const T *valuePtr, mbed_memory_order order) \
910
958
{ \
911
959
return core_util_atomic_load_explicit_##fn_suffix (valuePtr, order); \
912
960
}
@@ -917,12 +965,24 @@ inline T *core_util_atomic_load(T *const volatile *valuePtr)
917
965
return (T *) core_util_atomic_load_ptr ((void *const volatile *) valuePtr);
918
966
}
919
967
968
+ template <typename T>
969
+ inline T *core_util_atomic_load (T *const *valuePtr)
970
+ {
971
+ return (T *) core_util_atomic_load_ptr ((void *const *) valuePtr);
972
+ }
973
+
920
974
template <typename T>
921
975
inline T *core_util_atomic_load_explicit (T *const volatile *valuePtr, mbed_memory_order order)
922
976
{
923
977
return (T *) core_util_atomic_load_explicit_ptr ((void *const volatile *) valuePtr, order);
924
978
}
925
979
980
+ template <typename T>
981
+ inline T *core_util_atomic_load_explicit (T *const *valuePtr, mbed_memory_order order)
982
+ {
983
+ return (T *) core_util_atomic_load_explicit_ptr ((void *const *) valuePtr, order);
984
+ }
985
+
926
986
DO_MBED_ATOMIC_LOAD_TEMPLATE (uint8_t , u8 )
927
987
DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t , u16 )
928
988
DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t , u32 )
@@ -941,7 +1001,19 @@ inline void core_util_atomic_store(volatile T *valuePtr, T val)
941
1001
} \
942
1002
\
943
1003
template <> \
1004
+ inline void core_util_atomic_store (T *valuePtr, T val) \
1005
+ { \
1006
+ core_util_atomic_store_##fn_suffix (valuePtr, val); \
1007
+ } \
1008
+ \
1009
+ template <> \
944
1010
inline void core_util_atomic_store_explicit (volatile T *valuePtr, T val, mbed_memory_order order) \
1011
+ { \
1012
+ core_util_atomic_store_explicit_##fn_suffix (valuePtr, val, order); \
1013
+ } \
1014
+ \
1015
+ template <> \
1016
+ inline void core_util_atomic_store_explicit (T *valuePtr, T val, mbed_memory_order order) \
945
1017
{ \
946
1018
core_util_atomic_store_explicit_##fn_suffix (valuePtr, val, order); \
947
1019
}
@@ -952,12 +1024,24 @@ inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
952
1024
core_util_atomic_store_ptr ((void *volatile *) valuePtr, val);
953
1025
}
954
1026
1027
+ template <typename T>
1028
+ inline void core_util_atomic_store (T **valuePtr, T *val)
1029
+ {
1030
+ core_util_atomic_store_ptr ((void **) valuePtr, val);
1031
+ }
1032
+
955
1033
template <typename T>
956
1034
inline void core_util_atomic_store_explicit (T *volatile *valuePtr, T *val, mbed_memory_order order)
957
1035
{
958
1036
core_util_atomic_store_ptr ((void *volatile *) valuePtr, val, order);
959
1037
}
960
1038
1039
+ template <typename T>
1040
+ inline void core_util_atomic_store_explicit (T **valuePtr, T *val, mbed_memory_order order)
1041
+ {
1042
+ core_util_atomic_store_ptr ((void **) valuePtr, val, order);
1043
+ }
1044
+
961
1045
DO_MBED_ATOMIC_STORE_TEMPLATE (uint8_t , u8 )
962
1046
DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t , u16 )
963
1047
DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t , u32 )
0 commit comments