@@ -613,30 +613,30 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
613
613
614
614
/* Lock-free loads and stores don't need assembler - just aligned accesses */
615
615
/* Silly ordering of `T volatile` is because T can be `void *` */
616
- #define DO_MBED_LOCKFREE_LOADSTORE (T, fn_suffix ) \
617
- MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \
616
+ #define DO_MBED_LOCKFREE_LOADSTORE (T , V , fn_suffix ) \
617
+ MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
618
618
{ \
619
619
T value = *valuePtr; \
620
620
MBED_BARRIER(); \
621
621
return value; \
622
622
} \
623
623
\
624
- MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \
624
+ MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
625
625
{ \
626
626
MBED_CHECK_LOAD_ORDER(order); \
627
627
T value = *valuePtr; \
628
628
MBED_ACQUIRE_BARRIER(order); \
629
629
return value; \
630
630
} \
631
631
\
632
- MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \
632
+ MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
633
633
{ \
634
634
MBED_BARRIER(); \
635
635
*valuePtr = value; \
636
636
MBED_BARRIER(); \
637
637
} \
638
638
\
639
- MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \
639
+ MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
640
640
{ \
641
641
MBED_CHECK_STORE_ORDER(order); \
642
642
MBED_RELEASE_BARRIER(order); \
@@ -658,15 +658,51 @@ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_at
658
658
flagPtr -> _flag = false;
659
659
MBED_SEQ_CST_BARRIER (order );
660
660
}
661
- DO_MBED_LOCKFREE_LOADSTORE (uint8_t , u8 )
662
- DO_MBED_LOCKFREE_LOADSTORE(uint16_t , u16 )
663
- DO_MBED_LOCKFREE_LOADSTORE(uint32_t , u32 )
664
- DO_MBED_LOCKFREE_LOADSTORE(int8_t , s8)
665
- DO_MBED_LOCKFREE_LOADSTORE(int16_t , s16)
666
- DO_MBED_LOCKFREE_LOADSTORE(int32_t , s32)
667
- DO_MBED_LOCKFREE_LOADSTORE(bool , bool )
668
- DO_MBED_LOCKFREE_LOADSTORE(void *, ptr)
669
661
662
+ #ifdef __cplusplus
663
+ // Temporarily turn off extern "C", so we can provide non-volatile load/store
664
+ // overloads for efficiency. All these functions are static inline, so this has
665
+ // no linkage effect exactly, it just permits the overloads.
666
+ } // extern "C"
667
+
668
+ // For efficiency it's worth having non-volatile overloads
669
+ MBED_FORCEINLINE void core_util_atomic_flag_clear (core_util_atomic_flag * flagPtr )
670
+ {
671
+ MBED_BARRIER ();
672
+ flagPtr -> _flag = false;
673
+ MBED_BARRIER ();
674
+ }
675
+
676
+ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit (core_util_atomic_flag * flagPtr , mbed_memory_order order )
677
+ {
678
+ MBED_RELEASE_BARRIER (order );
679
+ flagPtr -> _flag = false;
680
+ MBED_SEQ_CST_BARRIER (order );
681
+ }
682
+
683
+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t ,, u8 )
684
+ DO_MBED_LOCKFREE_LOADSTORE (uint16_t ,, u16 )
685
+ DO_MBED_LOCKFREE_LOADSTORE (uint32_t ,, u32 )
686
+ DO_MBED_LOCKFREE_LOADSTORE (int8_t ,, s8 )
687
+ DO_MBED_LOCKFREE_LOADSTORE (int16_t ,, s16 )
688
+ DO_MBED_LOCKFREE_LOADSTORE (int32_t ,, s32 )
689
+ DO_MBED_LOCKFREE_LOADSTORE (bool ,, bool )
690
+ DO_MBED_LOCKFREE_LOADSTORE (void * ,, ptr )
691
+
692
+ #endif
693
+
694
+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t , volatile , u8 )
695
+ DO_MBED_LOCKFREE_LOADSTORE (uint16_t , volatile , u16 )
696
+ DO_MBED_LOCKFREE_LOADSTORE (uint32_t , volatile , u32 )
697
+ DO_MBED_LOCKFREE_LOADSTORE (int8_t , volatile , s8 )
698
+ DO_MBED_LOCKFREE_LOADSTORE (int16_t , volatile , s16 )
699
+ DO_MBED_LOCKFREE_LOADSTORE (int32_t , volatile , s32 )
700
+ DO_MBED_LOCKFREE_LOADSTORE (bool , volatile , bool )
701
+ DO_MBED_LOCKFREE_LOADSTORE (void * , volatile , ptr )
702
+
703
+ #ifdef __cplusplus
704
+ extern "C" {
705
+ #endif
670
706
671
707
/********************* GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
672
708
@@ -975,7 +1011,19 @@ inline T core_util_atomic_load(const volatile T *valuePtr)
975
1011
} \
976
1012
\
977
1013
template<> \
1014
+ inline T core_util_atomic_load(const T *valuePtr) \
1015
+ { \
1016
+ return core_util_atomic_load_##fn_suffix(valuePtr); \
1017
+ } \
1018
+ \
1019
+ template<> \
978
1020
inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \
1021
+ { \
1022
+ return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1023
+ } \
1024
+ \
1025
+ template<> \
1026
+ inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \
979
1027
{ \
980
1028
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
981
1029
}
@@ -986,12 +1034,24 @@ inline T *core_util_atomic_load(T *const volatile *valuePtr)
986
1034
return (T * ) core_util_atomic_load_ptr ((void * const volatile * ) valuePtr );
987
1035
}
988
1036
1037
+ template < typename T >
1038
+ inline T * core_util_atomic_load (T * const * valuePtr )
1039
+ {
1040
+ return (T * ) core_util_atomic_load_ptr ((void * const * ) valuePtr );
1041
+ }
1042
+
989
1043
template < typename T >
990
1044
inline T * core_util_atomic_load_explicit (T * const volatile * valuePtr , mbed_memory_order order )
991
1045
{
992
1046
return (T * ) core_util_atomic_load_explicit_ptr ((void * const volatile * ) valuePtr , order );
993
1047
}
994
1048
1049
+ template < typename T >
1050
+ inline T * core_util_atomic_load_explicit (T * const * valuePtr , mbed_memory_order order )
1051
+ {
1052
+ return (T * ) core_util_atomic_load_explicit_ptr ((void * const * ) valuePtr , order );
1053
+ }
1054
+
995
1055
DO_MBED_ATOMIC_LOAD_TEMPLATE (uint8_t , u8 )
996
1056
DO_MBED_ATOMIC_LOAD_TEMPLATE (uint16_t , u16 )
997
1057
DO_MBED_ATOMIC_LOAD_TEMPLATE (uint32_t , u32 )
@@ -1010,7 +1070,19 @@ inline void core_util_atomic_store(volatile T *valuePtr, T val)
1010
1070
} \
1011
1071
\
1012
1072
template<> \
1073
+ inline void core_util_atomic_store(T *valuePtr, T val) \
1074
+ { \
1075
+ core_util_atomic_store_##fn_suffix(valuePtr, val); \
1076
+ } \
1077
+ \
1078
+ template<> \
1013
1079
inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \
1080
+ { \
1081
+ core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1082
+ } \
1083
+ \
1084
+ template<> \
1085
+ inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \
1014
1086
{ \
1015
1087
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1016
1088
}
@@ -1021,12 +1093,24 @@ inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
1021
1093
core_util_atomic_store_ptr ((void * volatile * ) valuePtr , val );
1022
1094
}
1023
1095
1096
+ template < typename T >
1097
+ inline void core_util_atomic_store (T * * valuePtr , T * val )
1098
+ {
1099
+ core_util_atomic_store_ptr ((void * * ) valuePtr , val );
1100
+ }
1101
+
1024
1102
template < typename T >
1025
1103
inline void core_util_atomic_store_explicit (T * volatile * valuePtr , T * val , mbed_memory_order order )
1026
1104
{
1027
1105
core_util_atomic_store_ptr ((void * volatile * ) valuePtr , val , order );
1028
1106
}
1029
1107
1108
+ template < typename T >
1109
+ inline void core_util_atomic_store_explicit (T * * valuePtr , T * val , mbed_memory_order order )
1110
+ {
1111
+ core_util_atomic_store_ptr ((void * * ) valuePtr , val , order );
1112
+ }
1113
+
1030
1114
DO_MBED_ATOMIC_STORE_TEMPLATE (uint8_t , u8 )
1031
1115
DO_MBED_ATOMIC_STORE_TEMPLATE (uint16_t , u16 )
1032
1116
DO_MBED_ATOMIC_STORE_TEMPLATE (uint32_t , u32 )
0 commit comments