-
Notifications
You must be signed in to change notification settings - Fork 3k
Add atomic loads and stores and barriers #9247
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -22,6 +22,7 @@ | |
#include <stdbool.h> | ||
#include <stdint.h> | ||
#include <stddef.h> | ||
#include "mbed_toolchain.h" | ||
|
||
#ifdef __cplusplus | ||
extern "C" { | ||
|
@@ -89,6 +90,19 @@ void core_util_critical_section_exit(void); | |
*/ | ||
bool core_util_in_critical_section(void); | ||
|
||
/**@}*/ | ||
|
||
/** | ||
* \defgroup platform_atomic atomic functions | ||
* | ||
* Atomic functions function analogously to C11 and C++11 - loads have | ||
* acquire semantics, stores have release semantics, and atomic operations | ||
* are sequentially consistent. Atomicity is enforced both between threads and | ||
* interrupt handlers. | ||
* | ||
* @{ | ||
*/ | ||
|
||
/** | ||
* A lock-free, primitive atomic flag. | ||
* | ||
|
@@ -124,7 +138,11 @@ bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) | |
* | ||
* @param flagPtr Target flag being cleared. | ||
*/ | ||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr); | ||
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr) | ||
{ | ||
MBED_BARRIER(); | ||
flagPtr->_flag = false; | ||
} | ||
|
||
/** | ||
* Atomic compare and set. It compares the contents of a memory location to a | ||
|
@@ -354,6 +372,102 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV | |
*/ | ||
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); | ||
|
||
/** | ||
* Atomic load. | ||
* @param valuePtr Target memory location. | ||
* @return The loaded value. | ||
*/ | ||
MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr) | ||
{ | ||
uint8_t value = *valuePtr; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why isn't there a memory barrier before There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Because load has "acquire" semantics, as per C++11/C11. It only needs to stop stuff from afterwards moving before it. |
||
MBED_BARRIER(); | ||
return value; | ||
} | ||
|
||
/** | ||
* Atomic load. | ||
* @param valuePtr Target memory location. | ||
* @return The loaded value. | ||
*/ | ||
MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr) | ||
{ | ||
uint16_t value = *valuePtr; | ||
MBED_BARRIER(); | ||
return value; | ||
} | ||
|
||
/** | ||
* Atomic load. | ||
* @param valuePtr Target memory location. | ||
* @return The loaded value. | ||
*/ | ||
MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr) | ||
{ | ||
uint32_t value = *valuePtr; | ||
MBED_BARRIER(); | ||
return value; | ||
} | ||
|
||
/** | ||
* Atomic load. | ||
* @param valuePtr Target memory location. | ||
* @return The loaded value. | ||
*/ | ||
MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr) | ||
{ | ||
void *value = *valuePtr; | ||
MBED_BARRIER(); | ||
return value; | ||
} | ||
|
||
/** | ||
* Atomic store. | ||
* @param valuePtr Target memory location. | ||
* @param desiredValue The value to store. | ||
*/ | ||
MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue) | ||
{ | ||
MBED_BARRIER(); | ||
*valuePtr = desiredValue; | ||
MBED_BARRIER(); | ||
} | ||
|
||
/** | ||
* Atomic store. | ||
* @param valuePtr Target memory location. | ||
* @param desiredValue The value to store. | ||
*/ | ||
MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue) | ||
{ | ||
MBED_BARRIER(); | ||
*valuePtr = desiredValue; | ||
MBED_BARRIER(); | ||
} | ||
|
||
/** | ||
* Atomic store. | ||
* @param valuePtr Target memory location. | ||
* @param desiredValue The value to store. | ||
*/ | ||
MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue) | ||
{ | ||
MBED_BARRIER(); | ||
*valuePtr = desiredValue; | ||
MBED_BARRIER(); | ||
} | ||
|
||
/** | ||
* Atomic store. | ||
* @param valuePtr Target memory location. | ||
* @param desiredValue The value to store. | ||
*/ | ||
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue) | ||
{ | ||
MBED_BARRIER(); | ||
*valuePtr = desiredValue; | ||
MBED_BARRIER(); | ||
} | ||
|
||
/** | ||
* Atomic increment. | ||
* @param valuePtr Target memory location being incremented. | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why isn't there a memory barrier after
_flag
is set to false?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You're right, one is needed there. I was thinking it wasn't needed for single-CPU, but sequential consistency requires it for the same reason you would need two DMBs (https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html). Assuming we want sequential consistency (the C++11 default), then we need to guarantee that
can't be reordered. So that has to end up as:
And it's store that gets the extra sequential-consistency barrier (like the extra DMB), because stores are probably less common.