-
Notifications
You must be signed in to change notification settings - Fork 14.3k
Revert "[tsan] Don't use enum __tsan_memory_order
in tsan interface"
#115032
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
vitalybuka
merged 1 commit into
main
from
revert-114724-users/vitalybuka/spr/tsan-dont-use-enum-__tsan_memory_order-in-tsan-interface
Nov 5, 2024
Merged
Revert "[tsan] Don't use enum __tsan_memory_order
in tsan interface"
#115032
vitalybuka
merged 1 commit into
main
from
revert-114724-users/vitalybuka/spr/tsan-dont-use-enum-__tsan_memory_order-in-tsan-interface
Nov 5, 2024
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
@llvm/pr-subscribers-compiler-rt-sanitizer Author: Vitaly Buka (vitalybuka) ChangesReverts llvm/llvm-project#114724 Breaks OSX builds Patch is 55.93 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/115032.diff 4 Files Affected:
diff --git a/compiler-rt/include/sanitizer/tsan_interface_atomic.h b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
index 74ed91efade040..de3a1c3936097d 100644
--- a/compiler-rt/include/sanitizer/tsan_interface_atomic.h
+++ b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
@@ -43,178 +43,183 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo);
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL
-__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo);
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL
-__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo);
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL
-__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo);
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 SANITIZER_CDECL
-__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo);
+__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
+ const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
#endif
void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, int mo);
+ __tsan_atomic8 v,
+ __tsan_memory_order mo);
void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, int mo);
+ __tsan_atomic16 v,
+ __tsan_memory_order mo);
void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, int mo);
+ __tsan_atomic32 v,
+ __tsan_memory_order mo);
void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, int mo);
+ __tsan_atomic64 v,
+ __tsan_memory_order mo);
#if __TSAN_HAS_INT128
void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, int mo);
+ __tsan_atomic128 v,
+ __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
+__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- int mo, int fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- int mo, int fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo,
- int fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- int mo, int fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
#endif
-void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo);
-void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index b4257e76c3b903..e0e4c5b9d36cd3 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -40,15 +40,14 @@ int setcontext(const ucontext_t *ucp);
namespace __tsan {
-// The non-barrier versions of OSAtomic* functions are semantically
-// morder::relaxed, but the two variants (e.g. OSAtomicAdd32 and
-// OSAtomicAdd32Barrier) are actually aliases of each other, and we cannot have
-// different interceptors for them, because they're actually the same function.
-// Thus, we have to stay conservative and treat the non-barrier versions as
-// morder::acq_rel.
-static constexpr morder kMacOrderBarrier = morder::acq_rel;
-static constexpr morder kMacOrderNonBarrier = morder::acq_rel;
-static constexpr morder kMacFailureOrder = morder::relaxed;
+// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
+// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
+// actually aliases of each other, and we cannot have different interceptors for
+// them, because they're actually the same function. Thus, we have to stay
+// conservative and treat the non-barrier versions as mo_acq_rel.
+static constexpr morder kMacOrderBarrier = mo_acq_rel;
+static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
+static constexpr morder kMacFailureOrder = mo_relaxed;
# define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
@@ -465,7 +464,7 @@ struct fake_shared_weak_count {
// Shared and weak pointers in C++ maintain reference counts via atomics in
// libc++.dylib, which are TSan-invisible, and this leads to false positives in
// destructor code. These interceptors re-implements the whole functions so that
-// the morder::acq_rel semantics of the atomic decrement are visible.
+// the mo_acq_rel semantics of the atomic decrement are visible.
//
// Unfortunately, the interceptors cannot simply Acquire/Release some sync
// object and call the original function, because it would have a race between
@@ -480,11 +479,11 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
o);
- if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, morder::release) == 0) {
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
Acquire(thr, pc, (uptr)&o->shared_owners);
o->on_zero_shared();
- if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1,
- morder::release) == 0) {
+ if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
+ 0) {
Acquire(thr, pc, (uptr)&o->shared_weak_owners);
o->on_zero_shared_weak();
}
@@ -497,7 +496,7 @@ STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
- if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, morder::release) == 0) {
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
Acquire(thr, pc, (uptr)&o->shared_owners);
o->on_zero_shared();
return true;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.h b/compiler-rt/lib/tsan/rtl/tsan_interface.h
index 9751e891b6d90c..2b8a13ddb842cc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -219,193 +219,193 @@ __extension__ typedef __int128 a128;
// Part of ABI, do not change.
// https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
-enum class morder : int {
- relaxed,
- consume,
- acquire,
- release,
- acq_rel,
- seq_cst
-};
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
struct ThreadState;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-a8 __tsan_atomic8_load(const volatile a8 *a, int mo);
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a16 __tsan_atomic16_load(const volatile a16 *a, int mo);
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a32 __tsan_atomic32_load(const volatile a32 *a, int mo);
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
-a64 __tsan_atomic64_load(const volatile a64 *a, int mo);
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
-a128 __tsan_atomic128_load(const volatile a128 *a, int mo);
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic8_store(volatile a8 *a, a8 v, int mo);
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic16_store(volatile a16 *a, a16 v, int mo);
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic32_store(volatile a32 *a, a32 v, int mo);
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic64_store(volatile a64 *a, a64 v, int mo);
+void __tsan_atomic64_store(volatile a64 *a, a64 v...
[truncated]
|
vitalybuka
added a commit
that referenced
this pull request
Nov 5, 2024
…e"" (#115034) In C++ it's UB to use undeclared values as enum. And there is support __ATOMIC_HLE_ACQUIRE and __ATOMIC_HLE_RELEASE need such values. So use `int` in TSAN interface, and mask out irrelevant bits and cast to enum ASAP. `ThreadSanitizer.cpp` already declare morder parameterd in these functions as `i32`. This may looks like a slight change, as we previously didn't mask out additional bits for `fmo`, and `NoTsanAtomic` call. But from implementation it's clear that they are expecting exact enum. Reverts #115032 Reapply #114724
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Labels
compiler-rt:sanitizer
compiler-rt:tsan
Thread sanitizer
compiler-rt
skip-precommit-approval
PR for CI feedback, not intended for review
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Reverts #114724
Breaks OSX builds