-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[tsan] Don't use enum __tsan_memory_order
in tsan interface
#114724
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[tsan] Don't use enum __tsan_memory_order
in tsan interface
#114724
Conversation
Created using spr 1.3.4 [skip ci]
Created using spr 1.3.4
@llvm/pr-subscribers-compiler-rt-sanitizer Author: Vitaly Buka (vitalybuka) ChangesIn C++ it's UB to use undeclared values as enum. Internal implementation was switched to Patch is 32.28 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/114724.diff 4 Files Affected:
diff --git a/compiler-rt/include/sanitizer/tsan_interface_atomic.h b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
index de3a1c3936097d..74ed91efade040 100644
--- a/compiler-rt/include/sanitizer/tsan_interface_atomic.h
+++ b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
@@ -43,183 +43,178 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo);
__tsan_atomic16 SANITIZER_CDECL
-__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo);
__tsan_atomic32 SANITIZER_CDECL
-__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo);
__tsan_atomic64 SANITIZER_CDECL
-__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo);
#if __TSAN_HAS_INT128
-__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
- const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL
+__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo);
#endif
void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v,
- __tsan_memory_order mo);
+ __tsan_atomic8 v, int mo);
void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v,
- __tsan_memory_order mo);
+ __tsan_atomic16 v, int mo);
void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v,
- __tsan_memory_order mo);
+ __tsan_atomic32 v, int mo);
void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v,
- __tsan_memory_order mo);
+ __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v,
- __tsan_memory_order mo);
+ __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
- volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
- volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
- volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
- volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
- volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+ volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
- volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
- volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
- volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+ int fail_mo);
int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
- volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo,
+ int fail_mo);
__tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo,
+ int fail_mo);
__tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo,
+ int fail_mo);
__tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo,
+ int fail_mo);
#if __TSAN_HAS_INT128
__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ int mo, int fail_mo);
#endif
-void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo);
#ifdef __cplusplus
} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index 9db0eebd923696..1f6e0ab9f49347 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -40,62 +40,64 @@ int setcontext(const ucontext_t *ucp);
namespace __tsan {
-// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
-// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
-// actually aliases of each other, and we cannot have different interceptors for
-// them, because they're actually the same function. Thus, we have to stay
-// conservative and treat the non-barrier versions as mo_acq_rel.
-static constexpr morder kMacOrderBarrier = mo_acq_rel;
-static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
-static constexpr morder kMacFailureOrder = mo_relaxed;
-
-#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
- }
-
-#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
- }
-
-#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
- TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
- }
+// The non-barrier versions of OSAtomic* functions are semantically
+// morder::relaxed, but the two variants (e.g. OSAtomicAdd32 and
+// OSAtomicAdd32Barrier) are actually aliases of each other, and we cannot have
+// different interceptors for them, because they're actually the same function.
+// Thus, we have to stay conservative and treat the non-barrier versions as
+// morder::acq_rel.
+static constexpr morder kMacOrderBarrier = morder::acq_rel;
+static constexpr morder kMacOrderNonBarrier = morder::acq_rel;
+static constexpr morder kMacFailureOrder = morder::relaxed;
+
+# define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
-#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
- mo) \
- TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
- SCOPED_TSAN_INTERCEPTOR(f, ptr); \
- return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
- }
+# define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
-#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
- m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderBarrier) \
- m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
- kMacOrderBarrier)
-
-#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
- m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderBarrier) \
- m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
- kMacOrderNonBarrier) \
- m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
- __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+# define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
+# define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) ...
[truncated]
|
Created using spr 1.3.4 [skip ci]
@@ -219,14 +219,14 @@ __extension__ typedef __int128 a128; | |||
|
|||
// Part of ABI, do not change. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is it still following the ABI - if we do not static_cast it to int, then I don't think this is strictly required to follow ABI.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Does #114916 help?
Prepare to replace macro with template. Related to #114724, but it's not strictly needed.
Created using spr 1.3.4 [skip ci]
In #114724 I'd like to cast from int to enum, but existing code `mo = convert_morder(mo)` does not allow that.
This is breaking the greendragon bots, could you revert or submit a fix if it's a quick one?
https://green.lab.llvm.org/job/llvm.org/view/LLDB/job/as-lldb-cmake/14686/console |
…e"" (#115034) In C++ it's UB to use undeclared values as enum. And there is support __ATOMIC_HLE_ACQUIRE and __ATOMIC_HLE_RELEASE need such values. So use `int` in TSAN interface, and mask out irrelevant bits and cast to enum ASAP. `ThreadSanitizer.cpp` already declare morder parameterd in these functions as `i32`. This may looks like a slight change, as we previously didn't mask out additional bits for `fmo`, and `NoTsanAtomic` call. But from implementation it's clear that they are expecting exact enum. Reverts #115032 Reapply #114724
Prepare to replace macro with template. Related to llvm#114724, but it's not strictly needed.
In llvm#114724 I'd like to cast from int to enum, but existing code `mo = convert_morder(mo)` does not allow that.
In C++ it's UB to use undeclared values as enum.
And there is support
__ATOMIC_HLE_ACQUIRE
and__ATOMIC_HLE_RELEASE
need such values.Internal implementation was switched to
class enum
,where that behavior is defined. But interface is C, so
we just switch to
int
.