Skip to content

[tsan] Don't use enum __tsan_memory_order in tsan interface #114724

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Conversation

vitalybuka
Copy link
Collaborator

@vitalybuka vitalybuka commented Nov 4, 2024

In C++ it's UB to use undeclared values as enum.
And there is support __ATOMIC_HLE_ACQUIRE and
__ATOMIC_HLE_RELEASE need such values.

Internal implementation was switched to class enum,
where that behavior is defined. But interface is C, so
we just switch to int.

@llvmbot
Copy link
Member

llvmbot commented Nov 4, 2024

@llvm/pr-subscribers-compiler-rt-sanitizer

Author: Vitaly Buka (vitalybuka)

Changes

In C++ it's UB to use undeclared values as enum.
And there is support __ATOMIC_HLE_ACQUIRE and
__ATOMIC_HLE_RELEASE need such values.

Internal implementation was switched to class enum, where that behavior is defined. But
interface is C, so we just switch to int.


Patch is 32.28 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/114724.diff

4 Files Affected:

  • (modified) compiler-rt/include/sanitizer/tsan_interface_atomic.h (+82-87)
  • (modified) compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp (+59-57)
  • (modified) compiler-rt/lib/tsan/rtl/tsan_interface.h (+8-8)
  • (modified) compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp (+46-32)
diff --git a/compiler-rt/include/sanitizer/tsan_interface_atomic.h b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
index de3a1c3936097d..74ed91efade040 100644
--- a/compiler-rt/include/sanitizer/tsan_interface_atomic.h
+++ b/compiler-rt/include/sanitizer/tsan_interface_atomic.h
@@ -43,183 +43,178 @@ typedef enum {
 } __tsan_memory_order;
 
 __tsan_atomic8 SANITIZER_CDECL
-__tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo);
+__tsan_atomic8_load(const volatile __tsan_atomic8 *a, int mo);
 __tsan_atomic16 SANITIZER_CDECL
-__tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo);
+__tsan_atomic16_load(const volatile __tsan_atomic16 *a, int mo);
 __tsan_atomic32 SANITIZER_CDECL
-__tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo);
+__tsan_atomic32_load(const volatile __tsan_atomic32 *a, int mo);
 __tsan_atomic64 SANITIZER_CDECL
-__tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo);
+__tsan_atomic64_load(const volatile __tsan_atomic64 *a, int mo);
 #if __TSAN_HAS_INT128
-__tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_load(
-    const volatile __tsan_atomic128 *a, __tsan_memory_order mo);
+__tsan_atomic128 SANITIZER_CDECL
+__tsan_atomic128_load(const volatile __tsan_atomic128 *a, int mo);
 #endif
 
 void SANITIZER_CDECL __tsan_atomic8_store(volatile __tsan_atomic8 *a,
-                                          __tsan_atomic8 v,
-                                          __tsan_memory_order mo);
+                                          __tsan_atomic8 v, int mo);
 void SANITIZER_CDECL __tsan_atomic16_store(volatile __tsan_atomic16 *a,
-                                           __tsan_atomic16 v,
-                                           __tsan_memory_order mo);
+                                           __tsan_atomic16 v, int mo);
 void SANITIZER_CDECL __tsan_atomic32_store(volatile __tsan_atomic32 *a,
-                                           __tsan_atomic32 v,
-                                           __tsan_memory_order mo);
+                                           __tsan_atomic32 v, int mo);
 void SANITIZER_CDECL __tsan_atomic64_store(volatile __tsan_atomic64 *a,
-                                           __tsan_atomic64 v,
-                                           __tsan_memory_order mo);
+                                           __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 void SANITIZER_CDECL __tsan_atomic128_store(volatile __tsan_atomic128 *a,
-                                            __tsan_atomic128 v,
-                                            __tsan_memory_order mo);
+                                            __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_exchange(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_exchange(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_exchange(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_exchange(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_exchange(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_add(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_add(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_add(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_add(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_add(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_sub(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_sub(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_sub(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_sub(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_sub(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_and(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_and(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_and(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_and(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_and(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_or(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_or(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_or(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_or(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_or(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_xor(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_xor(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_xor(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_xor(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_xor(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
-__tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_fetch_nand(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic8 SANITIZER_CDECL
+__tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, int mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_fetch_nand(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 v, int mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_fetch_nand(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 v, int mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_fetch_nand(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 v, int mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_fetch_nand(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo);
+    volatile __tsan_atomic128 *a, __tsan_atomic128 v, int mo);
 #endif
 
 int SANITIZER_CDECL __tsan_atomic8_compare_exchange_weak(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic16_compare_exchange_weak(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic32_compare_exchange_weak(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic64_compare_exchange_weak(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+    int fail_mo);
 #if __TSAN_HAS_INT128
 int SANITIZER_CDECL __tsan_atomic128_compare_exchange_weak(
     volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    int mo, int fail_mo);
 #endif
 
 int SANITIZER_CDECL __tsan_atomic8_compare_exchange_strong(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic16_compare_exchange_strong(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic32_compare_exchange_strong(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, int mo,
+    int fail_mo);
 int SANITIZER_CDECL __tsan_atomic64_compare_exchange_strong(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, int mo,
+    int fail_mo);
 #if __TSAN_HAS_INT128
 int SANITIZER_CDECL __tsan_atomic128_compare_exchange_strong(
     volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    int mo, int fail_mo);
 #endif
 
 __tsan_atomic8 SANITIZER_CDECL __tsan_atomic8_compare_exchange_val(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, int mo,
+    int fail_mo);
 __tsan_atomic16 SANITIZER_CDECL __tsan_atomic16_compare_exchange_val(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, int mo,
+    int fail_mo);
 __tsan_atomic32 SANITIZER_CDECL __tsan_atomic32_compare_exchange_val(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, int mo,
+    int fail_mo);
 __tsan_atomic64 SANITIZER_CDECL __tsan_atomic64_compare_exchange_val(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, int mo,
+    int fail_mo);
 #if __TSAN_HAS_INT128
 __tsan_atomic128 SANITIZER_CDECL __tsan_atomic128_compare_exchange_val(
     volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
-    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+    int mo, int fail_mo);
 #endif
 
-void SANITIZER_CDECL __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void SANITIZER_CDECL __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void SANITIZER_CDECL __tsan_atomic_thread_fence(int mo);
+void SANITIZER_CDECL __tsan_atomic_signal_fence(int mo);
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index 9db0eebd923696..1f6e0ab9f49347 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -40,62 +40,64 @@ int setcontext(const ucontext_t *ucp);
 
 namespace __tsan {
 
-// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
-// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
-// actually aliases of each other, and we cannot have different interceptors for
-// them, because they're actually the same function.  Thus, we have to stay
-// conservative and treat the non-barrier versions as mo_acq_rel.
-static constexpr morder kMacOrderBarrier = mo_acq_rel;
-static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
-static constexpr morder kMacFailureOrder = mo_relaxed;
-
-#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
-  TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                 \
-    SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                 \
-    return tsan_atomic_f((volatile tsan_t *)ptr, x, mo);                \
-  }
-
-#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
-  TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                        \
-    SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                        \
-    return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x;                   \
-  }
-
-#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
-  TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                             \
-    SCOPED_TSAN_INTERCEPTOR(f, ptr);                                           \
-    return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1;                   \
-  }
+// The non-barrier versions of OSAtomic* functions are semantically
+// morder::relaxed, but the two variants (e.g. OSAtomicAdd32 and
+// OSAtomicAdd32Barrier) are actually aliases of each other, and we cannot have
+// different interceptors for them, because they're actually the same function.
+// Thus, we have to stay conservative and treat the non-barrier versions as
+// morder::acq_rel.
+static constexpr morder kMacOrderBarrier = morder::acq_rel;
+static constexpr morder kMacOrderNonBarrier = morder::acq_rel;
+static constexpr morder kMacFailureOrder = morder::relaxed;
+
+#  define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+    TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                 \
+      SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                 \
+      return tsan_atomic_f((volatile tsan_t *)ptr, x, mo);                \
+    }
 
-#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
-                                     mo)                                    \
-  TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                          \
-    SCOPED_TSAN_INTERCEPTOR(f, ptr);                                        \
-    return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1;                \
-  }
+#  define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, \
+                                      mo)                                    \
+    TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                    \
+      SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                    \
+      return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x;               \
+    }
 
-#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m)                  \
-  m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,             \
-    kMacOrderNonBarrier)                                                       \
-  m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f,    \
-    kMacOrderBarrier)                                                          \
-  m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f,             \
-    kMacOrderNonBarrier)                                                       \
-  m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f,    \
-    kMacOrderBarrier)
-
-#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig)             \
-  m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,            \
-    kMacOrderNonBarrier)                                                       \
-  m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f,   \
-    kMacOrderBarrier)                                                          \
-  m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
-    kMacOrderNonBarrier)                                                       \
-  m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier,                           \
-    __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+#  define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+                                      mo)                                    \
+    TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                         \
+      SCOPED_TSAN_INTERCEPTOR(f, ptr);                                       \
+      return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1;               \
+    }
 
+#  define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+                                       mo)      ...
[truncated]

@vitalybuka vitalybuka requested review from melver and dvyukov November 4, 2024 00:20
Created using spr 1.3.4

[skip ci]
Created using spr 1.3.4
@@ -219,14 +219,14 @@ __extension__ typedef __int128 a128;

// Part of ABI, do not change.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it still following the ABI - if we do not static_cast it to int, then I don't think this is strictly required to follow ABI.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does #114916 help?

vitalybuka added a commit that referenced this pull request Nov 5, 2024
Prepare to replace macro with template.

Related to #114724, but it's not strictly needed.
Created using spr 1.3.4

[skip ci]
Created using spr 1.3.4
vitalybuka added a commit that referenced this pull request Nov 5, 2024
In #114724 I'd like to cast from int to enum, but
existing code `mo = convert_morder(mo)` does not
allow that.
@vitalybuka vitalybuka requested a review from melver November 5, 2024 05:06
@vitalybuka vitalybuka changed the base branch from users/vitalybuka/spr/main.tsan-dont-use-enum-__tsan_memory_order-in-tsan-interface to main November 5, 2024 05:16
Created using spr 1.3.4
@vitalybuka vitalybuka merged commit 1e50958 into main Nov 5, 2024
7 checks passed
@vitalybuka vitalybuka deleted the users/vitalybuka/spr/tsan-dont-use-enum-__tsan_memory_order-in-tsan-interface branch November 5, 2024 17:21
@felipepiovezan
Copy link
Contributor

felipepiovezan commented Nov 5, 2024

This is breaking the greendragon bots, could you revert or submit a fix if it's a quick one?

/Users/ec2-user/jenkins/workspace/[llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp:103:1](http://llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp:103:1): error: no matching function for call to '__tsan_atomic32_fetch_add'
  103 | OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
      | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  104 |                                  OSATOMIC_INTERCEPTOR_PLUS_X)
      |                                  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/Users/ec2-user/jenkins/workspace/[llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp:81:37](http://llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp:81:37): note: expanded from macro 'OSATOMIC_INTERCEPTORS_ARITHMETIC'
   81 |     m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,         \
      |                                     ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
<scratch space>:6:1: note: expanded from here
    6 | __tsan_atomic32_fetch_add
      | ^~~~~~~~~~~~~~~~~~~~~~~~~
/Users/ec2-user/jenkins/workspace/[llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp:63:14](http://llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp:63:14): note: expanded from macro 'OSATOMIC_INTERCEPTOR_PLUS_X'
   63 |       return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x;               \
      |              ^~~~~~~~~~~~~
/Users/ec2-user/jenkins/workspace/[llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h:278:5](http://llvm.org/as-lldb-cmake/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h:278:5): note: candidate function not viable: no known conversion from 'const morder' to 'int' for 3rd argument
  278 | a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, int mo);
      |     ^                                                 ~~~~~~

https://green.lab.llvm.org/job/llvm.org/view/LLDB/job/as-lldb-cmake/14686/console

vitalybuka added a commit that referenced this pull request Nov 5, 2024
vitalybuka added a commit that referenced this pull request Nov 5, 2024
vitalybuka added a commit that referenced this pull request Nov 5, 2024
…e"" (#115034)

In C++ it's UB to use undeclared values as enum.
And there is support __ATOMIC_HLE_ACQUIRE and
__ATOMIC_HLE_RELEASE need such values.

So use `int` in TSAN interface, and mask out
irrelevant bits and cast to enum ASAP.

`ThreadSanitizer.cpp` already declare morder parameterd
in these functions as `i32`.

This may looks like a slight change, as we
previously didn't mask out additional bits for `fmo`,
and `NoTsanAtomic` call. But from implementation
it's clear that they are expecting exact enum.


Reverts #115032
Reapply #114724
PhilippRados pushed a commit to PhilippRados/llvm-project that referenced this pull request Nov 6, 2024
Prepare to replace macro with template.

Related to llvm#114724, but it's not strictly needed.
PhilippRados pushed a commit to PhilippRados/llvm-project that referenced this pull request Nov 6, 2024
In llvm#114724 I'd like to cast from int to enum, but
existing code `mo = convert_morder(mo)` does not
allow that.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants