Skip to content

[libc] Add support for C++20 'atomic_ref' type #132302

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 148 additions & 0 deletions libc/src/__support/CPP/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,154 @@ template <typename T> struct Atomic {
LIBC_INLINE void set(T rhs) { val = rhs; }
};

template <typename T> struct AtomicRef {
static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
is_move_constructible_v<T> && is_copy_assignable_v<T> &&
is_move_assignable_v<T>,
"AtomicRef<T> requires T to be trivially copyable, copy "
"constructible, move constructible, copy assignable, "
"and move assignable.");

static_assert(cpp::has_unique_object_representations_v<T>,
"AtomicRef<T> only supports types with unique object "
"representations.");

private:
T *ptr;

LIBC_INLINE static int order(MemoryOrder mem_ord) {
return static_cast<int>(mem_ord);
}

LIBC_INLINE static int scope(MemoryScope mem_scope) {
return static_cast<int>(mem_scope);
}

public:
// Constructor from T reference
LIBC_INLINE explicit constexpr AtomicRef(T &obj) : ptr(&obj) {}

// Non-standard Implicit conversion from T*
LIBC_INLINE constexpr AtomicRef(T *obj) : ptr(obj) {}

LIBC_INLINE AtomicRef(const AtomicRef &) = default;
LIBC_INLINE AtomicRef &operator=(const AtomicRef &) = default;

// Atomic load
LIBC_INLINE operator T() const { return load(); }

LIBC_INLINE T
load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
T res;
#if __has_builtin(__scoped_atomic_load)
__scoped_atomic_load(ptr, &res, order(mem_ord), scope(mem_scope));
#else
__atomic_load(ptr, &res, order(mem_ord));
#endif
return res;
}

// Atomic store
LIBC_INLINE T operator=(T rhs) const {
store(rhs);
return rhs;
}

LIBC_INLINE void
store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
#if __has_builtin(__scoped_atomic_store)
__scoped_atomic_store(ptr, &rhs, order(mem_ord), scope(mem_scope));
#else
__atomic_store(ptr, &rhs, order(mem_ord));
#endif
}

// Atomic compare exchange (strong)
LIBC_INLINE bool compare_exchange_strong(
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
return __atomic_compare_exchange(ptr, &expected, &desired, false,
order(mem_ord), order(mem_ord));
}

// Atomic compare exchange (strong, separate success/failure memory orders)
LIBC_INLINE bool compare_exchange_strong(
T &expected, T desired, MemoryOrder success_order,
MemoryOrder failure_order,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
return __atomic_compare_exchange(ptr, &expected, &desired, false,
order(success_order),
order(failure_order));
}

// Atomic exchange
LIBC_INLINE T
exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
T ret;
#if __has_builtin(__scoped_atomic_exchange)
__scoped_atomic_exchange(ptr, &desired, &ret, order(mem_ord),
scope(mem_scope));
#else
__atomic_exchange(ptr, &desired, &ret, order(mem_ord));
#endif
return ret;
}

LIBC_INLINE T fetch_add(
T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_add)
return __scoped_atomic_fetch_add(ptr, increment, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_add(ptr, increment, order(mem_ord));
#endif
}

LIBC_INLINE T
fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_or)
return __scoped_atomic_fetch_or(ptr, mask, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_or(ptr, mask, order(mem_ord));
#endif
}

LIBC_INLINE T fetch_and(
T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_and)
return __scoped_atomic_fetch_and(ptr, mask, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_and(ptr, mask, order(mem_ord));
#endif
}

LIBC_INLINE T fetch_sub(
T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_sub)
return __scoped_atomic_fetch_sub(ptr, decrement, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_sub(ptr, decrement, order(mem_ord));
#endif
}
};

// Permit CTAD when generating an atomic reference.
template <typename T> AtomicRef(T &) -> AtomicRef<T>;

// Issue a thread fence with the given memory ordering.
LIBC_INLINE void atomic_thread_fence(
MemoryOrder mem_ord,
Expand Down
15 changes: 15 additions & 0 deletions libc/test/src/__support/CPP/atomic_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,18 @@ TEST(LlvmLibcAtomicTest, TrivialCompositeData) {
ASSERT_EQ(old.a, 'a');
ASSERT_EQ(old.b, 'b');
}

TEST(LlvmLibcAtomicTest, AtomicRefTest) {
int val = 123;
LIBC_NAMESPACE::cpp::AtomicRef aint(val);
ASSERT_EQ(aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 123);
ASSERT_EQ(aint.fetch_add(1, LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 123);
aint = 1234;
ASSERT_EQ(aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 1234);

// Test the implicit construction from pointer.
auto fn = [](LIBC_NAMESPACE::cpp::AtomicRef<int> aint) -> int {
return aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED);
};
ASSERT_EQ(fn(&val), 1234);
}
Loading