Skip to content

[compiler-rt] Use __atomic builtins whenever possible #84439

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions compiler-rt/lib/sanitizer_common/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,6 @@ set(SANITIZER_IMPL_HEADERS
sanitizer_asm.h
sanitizer_atomic.h
sanitizer_atomic_clang.h
sanitizer_atomic_clang_mips.h
sanitizer_atomic_clang_other.h
sanitizer_atomic_clang_x86.h
sanitizer_atomic_msvc.h
sanitizer_bitvector.h
sanitizer_bvgraph.h
Expand Down
12 changes: 12 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,24 @@
namespace __sanitizer {

enum memory_order {
// If the __atomic atomic builtins are supported (Clang/GCC), use the
// compiler provided macro values so that we can map the atomic operations
// to __atomic_* directly.
#ifdef __ATOMIC_SEQ_CST
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
#else
memory_order_relaxed = 1 << 0,
memory_order_consume = 1 << 1,
memory_order_acquire = 1 << 2,
memory_order_release = 1 << 3,
memory_order_acq_rel = 1 << 4,
memory_order_seq_cst = 1 << 5
#endif
};

struct atomic_uint8_t {
Expand Down
85 changes: 40 additions & 45 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,60 +14,63 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H

#if defined(__i386__) || defined(__x86_64__)
# include "sanitizer_atomic_clang_x86.h"
#else
# include "sanitizer_atomic_clang_other.h"
#endif

namespace __sanitizer {

// We would like to just use compiler builtin atomic operations
// for loads and stores, but they are mostly broken in clang:
// - they lead to vastly inefficient code generation
// (http://llvm.org/bugs/show_bug.cgi?id=17281)
// - 64-bit atomic operations are not implemented on x86_32
// (http://llvm.org/bugs/show_bug.cgi?id=15034)
// - they are not implemented on ARM
// error: undefined reference to '__atomic_load_4'
// We use the compiler builtin atomic operations for loads and stores, which
// generates correct code for all architectures, but may require libatomic
// on platforms where e.g. 64-bit atomics are not supported natively.

// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.

inline void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }

inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }

inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
#if defined(__i386__) || defined(__x86_64__)
for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
__asm__ __volatile__("" ::: "memory");
#endif
}

inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
template <typename T>
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
mo == memory_order_acquire || mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
return __atomic_load_n(&a->val_dont_use, mo);
}

template<typename T>
inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
template <typename T>
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
mo == memory_order_seq_cst);
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, v);
__atomic_store_n(&a->val_dont_use, v, mo);
}

template<typename T>
inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
template <typename T>
inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
return __atomic_fetch_add(&a->val_dont_use, v, mo);
}

template <typename T>
inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return __sync_fetch_and_add(&a->val_dont_use, -v);
return __atomic_fetch_sub(&a->val_dont_use, v, mo);
}

template<typename T>
inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
template <typename T>
inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
__sync_synchronize();
v = __sync_lock_test_and_set(&a->val_dont_use, v);
if (mo == memory_order_seq_cst)
__sync_synchronize();
return v;
return __atomic_exchange_n(&a->val_dont_use, v, mo);
}

template <typename T>
Expand All @@ -82,23 +85,15 @@ inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}

template<typename T>
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
template <typename T>
inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
}

} // namespace __sanitizer

// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
# include "sanitizer_atomic_clang_mips.h"
#endif

#undef ATOMIC_ORDER

#endif // SANITIZER_ATOMIC_CLANG_H
117 changes: 0 additions & 117 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h

This file was deleted.

85 changes: 0 additions & 85 deletions compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h

This file was deleted.

Loading