@@ -1089,49 +1089,49 @@ DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
1089
1089
*/
1090
1090
#define DO_MBED_ATOMIC_LOAD_TEMPLATE (T , fn_suffix ) \
1091
1091
template<> \
1092
- inline T core_util_atomic_load(const volatile T *valuePtr) \
1092
+ inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \
1093
1093
{ \
1094
1094
return core_util_atomic_load_##fn_suffix(valuePtr); \
1095
1095
} \
1096
1096
\
1097
1097
template<> \
1098
- inline T core_util_atomic_load(const T *valuePtr) \
1098
+ inline T core_util_atomic_load(const T *valuePtr) noexcept \
1099
1099
{ \
1100
1100
return core_util_atomic_load_##fn_suffix(valuePtr); \
1101
1101
} \
1102
1102
\
1103
1103
template<> \
1104
- inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \
1104
+ inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \
1105
1105
{ \
1106
1106
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1107
1107
} \
1108
1108
\
1109
1109
template<> \
1110
- inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \
1110
+ inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
1111
1111
{ \
1112
1112
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
1113
1113
}
1114
1114
1115
1115
template < typename T >
1116
- inline T * core_util_atomic_load (T * const volatile * valuePtr )
1116
+ inline T * core_util_atomic_load (T * const volatile * valuePtr ) noexcept
1117
1117
{
1118
1118
return (T * ) core_util_atomic_load_ptr ((void * const volatile * ) valuePtr );
1119
1119
}
1120
1120
1121
1121
template < typename T >
1122
- inline T * core_util_atomic_load (T * const * valuePtr )
1122
+ inline T * core_util_atomic_load (T * const * valuePtr ) noexcept
1123
1123
{
1124
1124
return (T * ) core_util_atomic_load_ptr ((void * const * ) valuePtr );
1125
1125
}
1126
1126
1127
1127
template < typename T >
1128
- inline T * core_util_atomic_load_explicit (T * const volatile * valuePtr , mbed_memory_order order )
1128
+ inline T * core_util_atomic_load_explicit (T * const volatile * valuePtr , mbed_memory_order order ) noexcept
1129
1129
{
1130
1130
return (T * ) core_util_atomic_load_explicit_ptr ((void * const volatile * ) valuePtr , order );
1131
1131
}
1132
1132
1133
1133
template < typename T >
1134
- inline T * core_util_atomic_load_explicit (T * const * valuePtr , mbed_memory_order order )
1134
+ inline T * core_util_atomic_load_explicit (T * const * valuePtr , mbed_memory_order order ) noexcept
1135
1135
{
1136
1136
return (T * ) core_util_atomic_load_explicit_ptr ((void * const * ) valuePtr , order );
1137
1137
}
@@ -1148,49 +1148,49 @@ DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
1148
1148
1149
1149
#define DO_MBED_ATOMIC_STORE_TEMPLATE (T , fn_suffix ) \
1150
1150
template<> \
1151
- inline void core_util_atomic_store(volatile T *valuePtr, T val) \
1151
+ inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \
1152
1152
{ \
1153
1153
core_util_atomic_store_##fn_suffix(valuePtr, val); \
1154
1154
} \
1155
1155
\
1156
1156
template<> \
1157
- inline void core_util_atomic_store(T *valuePtr, T val) \
1157
+ inline void core_util_atomic_store(T *valuePtr, T val) noexcept \
1158
1158
{ \
1159
1159
core_util_atomic_store_##fn_suffix(valuePtr, val); \
1160
1160
} \
1161
1161
\
1162
1162
template<> \
1163
- inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \
1163
+ inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
1164
1164
{ \
1165
1165
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1166
1166
} \
1167
1167
\
1168
1168
template<> \
1169
- inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \
1169
+ inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
1170
1170
{ \
1171
1171
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
1172
1172
}
1173
1173
1174
1174
template < typename T >
1175
- inline void core_util_atomic_store (T * volatile * valuePtr , T * val )
1175
+ inline void core_util_atomic_store (T * volatile * valuePtr , T * val ) noexcept
1176
1176
{
1177
1177
core_util_atomic_store_ptr ((void * volatile * ) valuePtr , val );
1178
1178
}
1179
1179
1180
1180
template < typename T >
1181
- inline void core_util_atomic_store (T * * valuePtr , T * val )
1181
+ inline void core_util_atomic_store (T * * valuePtr , T * val ) noexcept
1182
1182
{
1183
1183
core_util_atomic_store_ptr ((void * * ) valuePtr , val );
1184
1184
}
1185
1185
1186
1186
template < typename T >
1187
- inline void core_util_atomic_store_explicit (T * volatile * valuePtr , T * val , mbed_memory_order order )
1187
+ inline void core_util_atomic_store_explicit (T * volatile * valuePtr , T * val , mbed_memory_order order ) noexcept
1188
1188
{
1189
1189
core_util_atomic_store_ptr ((void * volatile * ) valuePtr , val , order );
1190
1190
}
1191
1191
1192
1192
template < typename T >
1193
- inline void core_util_atomic_store_explicit (T * * valuePtr , T * val , mbed_memory_order order )
1193
+ inline void core_util_atomic_store_explicit (T * * valuePtr , T * val , mbed_memory_order order ) noexcept
1194
1194
{
1195
1195
core_util_atomic_store_ptr ((void * * ) valuePtr , val , order );
1196
1196
}
@@ -1207,19 +1207,19 @@ DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
1207
1207
1208
1208
#define DO_MBED_ATOMIC_CAS_TEMPLATE (tname , fname , T , fn_suffix ) \
1209
1209
template<> inline \
1210
- bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
1210
+ bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
1211
1211
{ \
1212
1212
return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
1213
1213
}
1214
1214
1215
1215
template < typename T >
1216
- inline bool core_util_atomic_compare_exchange_strong (T * volatile * ptr , T * * expectedCurrentValue , T * desiredValue )
1216
+ inline bool core_util_atomic_compare_exchange_strong (T * volatile * ptr , T * * expectedCurrentValue , T * desiredValue ) noexcept
1217
1217
{
1218
1218
return core_util_atomic_cas_ptr ((void * volatile * ) ptr , (void * * ) expectedCurrentValue , desiredValue );
1219
1219
}
1220
1220
1221
1221
template < typename T >
1222
- inline bool core_util_atomic_compare_exchange_weak (T * volatile * ptr , T * * expectedCurrentValue , T * desiredValue )
1222
+ inline bool core_util_atomic_compare_exchange_weak (T * volatile * ptr , T * * expectedCurrentValue , T * desiredValue ) noexcept
1223
1223
{
1224
1224
return core_util_atomic_compare_exchange_weak_ptr ((void * volatile * ) ptr , (void * * ) expectedCurrentValue , desiredValue );
1225
1225
}
@@ -1240,63 +1240,63 @@ DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
1240
1240
1241
1241
#define DO_MBED_ATOMIC_OP_TEMPLATE (name , T , fn_suffix ) \
1242
1242
template<> \
1243
- inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \
1243
+ inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1244
1244
{ \
1245
1245
return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
1246
1246
} \
1247
1247
\
1248
1248
template<> \
1249
1249
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1250
- mbed_memory_order order) \
1250
+ mbed_memory_order order) noexcept \
1251
1251
{ \
1252
1252
return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
1253
1253
}
1254
1254
1255
1255
1256
1256
template < >
1257
- inline bool core_util_atomic_exchange (volatile bool * valuePtr , bool arg )
1257
+ inline bool core_util_atomic_exchange (volatile bool * valuePtr , bool arg ) noexcept
1258
1258
{
1259
1259
return core_util_atomic_exchange_bool (valuePtr , arg );
1260
1260
}
1261
1261
1262
1262
template < >
1263
- inline bool core_util_atomic_exchange_explicit (volatile bool * valuePtr , bool arg , mbed_memory_order order )
1263
+ inline bool core_util_atomic_exchange_explicit (volatile bool * valuePtr , bool arg , mbed_memory_order order ) noexcept
1264
1264
{
1265
1265
return core_util_atomic_exchange_explicit_bool (valuePtr , arg , order );
1266
1266
}
1267
1267
1268
1268
template < typename T >
1269
- inline T * core_util_atomic_exchange (T * volatile * valuePtr , T * arg )
1269
+ inline T * core_util_atomic_exchange (T * volatile * valuePtr , T * arg ) noexcept
1270
1270
{
1271
1271
return (T * ) core_util_atomic_exchange_ptr ((void * volatile * ) valuePtr , arg );
1272
1272
}
1273
1273
1274
1274
template < typename T >
1275
- inline T * core_util_atomic_exchange_explicit (T * volatile * valuePtr , T * arg , mbed_memory_order order )
1275
+ inline T * core_util_atomic_exchange_explicit (T * volatile * valuePtr , T * arg , mbed_memory_order order ) noexcept
1276
1276
{
1277
1277
return (T * ) core_util_atomic_fetch_add_explicit_ptr ((void * volatile * ) valuePtr , arg , order );
1278
1278
}
1279
1279
1280
1280
template < typename T >
1281
- inline T * core_util_atomic_fetch_add (T * volatile * valuePtr , ptrdiff_t arg )
1281
+ inline T * core_util_atomic_fetch_add (T * volatile * valuePtr , ptrdiff_t arg ) noexcept
1282
1282
{
1283
1283
return (T * ) core_util_atomic_fetch_add_ptr ((void * volatile * ) valuePtr , arg * sizeof (T ));
1284
1284
}
1285
1285
1286
1286
template < typename T >
1287
- inline T * core_util_atomic_fetch_add_explicit (T * volatile * valuePtr , ptrdiff_t arg , mbed_memory_order order )
1287
+ inline T * core_util_atomic_fetch_add_explicit (T * volatile * valuePtr , ptrdiff_t arg , mbed_memory_order order ) noexcept
1288
1288
{
1289
1289
return (T * ) core_util_atomic_fetch_add_explicit_ptr ((void * volatile * ) valuePtr , arg * sizeof (T ), order );
1290
1290
}
1291
1291
1292
1292
template < typename T >
1293
- inline T * core_util_atomic_fetch_sub (T * volatile * valuePtr , ptrdiff_t arg )
1293
+ inline T * core_util_atomic_fetch_sub (T * volatile * valuePtr , ptrdiff_t arg ) noexcept
1294
1294
{
1295
1295
return (T * ) core_util_atomic_fetch_sub_ptr ((void * volatile * ) valuePtr , arg * sizeof (T ));
1296
1296
}
1297
1297
1298
1298
template < typename T >
1299
- inline T * core_util_atomic_fetch_sub_explicit (T * volatile * valuePtr , ptrdiff_t arg , mbed_memory_order order )
1299
+ inline T * core_util_atomic_fetch_sub_explicit (T * volatile * valuePtr , ptrdiff_t arg , mbed_memory_order order ) noexcept
1300
1300
{
1301
1301
return (T * ) core_util_atomic_fetch_sub_explicit_ptr ((void * volatile * ) valuePtr , arg * sizeof (T ), order );
1302
1302
}
@@ -1316,14 +1316,14 @@ inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t a
1316
1316
1317
1317
#define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE (name , T , fn_suffix , postname , OP ) \
1318
1318
template<> \
1319
- inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \
1319
+ inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
1320
1320
{ \
1321
1321
return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
1322
1322
} \
1323
1323
\
1324
1324
template<> \
1325
1325
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
1326
- mbed_memory_order order) \
1326
+ mbed_memory_order order) noexcept \
1327
1327
{ \
1328
1328
return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
1329
1329
}
@@ -1344,16 +1344,16 @@ namespace impl {
1344
1344
// Use custom assembler forms for pre-ops where available, else construct from post-ops
1345
1345
#if MBED_EXCLUSIVE_ACCESS
1346
1346
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES (name , postname , OP ) \
1347
- template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg); \
1348
- template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); \
1347
+ template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept ; \
1348
+ template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept ; \
1349
1349
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
1350
1350
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
1351
1351
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
1352
1352
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
1353
1353
#else
1354
1354
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES (name , postname , OP ) \
1355
- template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg); \
1356
- template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); \
1355
+ template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept ; \
1356
+ template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept ; \
1357
1357
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
1358
1358
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
1359
1359
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
0 commit comments