Skip to content

Commit 68d4af7

Browse files
committed
Make atomic test use Atomic<T>
Make the atomic functional test use `Atomic<T>` rather than the standalone functions - this then tests both layers simultaneously.
1 parent f685927 commit 68d4af7

File tree

1 file changed

+44
-38
lines changed

1 file changed

+44
-38
lines changed

TESTS/mbed_platform/atomic/main.cpp

Lines changed: 44 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -26,67 +26,69 @@
2626

2727
using utest::v1::Case;
2828

29-
3029
namespace {
3130

3231
/* Lock-free operations will be much faster - keep runtime down */
33-
#if MBED_ATOMIC_INT_LOCK_FREE
34-
#define ADD_ITERATIONS (SystemCoreClock / 1000)
35-
#else
36-
#define ADD_ITERATIONS (SystemCoreClock / 8000)
37-
#endif
32+
#define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000)
33+
#define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000)
3834

3935
template <typename T>
40-
void add_incrementer(T *ptr)
36+
static inline long add_iterations(const Atomic<T> &a)
4137
{
42-
for (long i = ADD_ITERATIONS; i > 0; i--) {
43-
core_util_atomic_fetch_add(ptr, T(1));
38+
return a.is_lock_free() ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
39+
}
40+
41+
template <typename T>
42+
void add_incrementer(Atomic<T> *ptr)
43+
{
44+
for (long i = add_iterations(*ptr); i > 0; i--) {
45+
++(*ptr);
4446
}
4547
}
4648

4749
template <typename T>
48-
void add_release_incrementer(T *ptr)
50+
void add_release_incrementer(Atomic<T> *ptr)
4951
{
50-
for (long i = ADD_ITERATIONS; i > 0; i--) {
51-
core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release);
52+
for (long i = add_iterations(*ptr); i > 0; i--) {
53+
ptr->fetch_add(1, mbed::memory_order_release);
5254
}
5355
}
5456

5557
template <typename T>
56-
void sub_incrementer(T *ptr)
58+
void sub_incrementer(Atomic<T> *ptr)
5759
{
58-
for (long i = ADD_ITERATIONS; i > 0; i--) {
59-
core_util_atomic_fetch_sub(ptr, T(-1));
60+
for (long i = add_iterations(*ptr); i > 0; i--) {
61+
ptr->fetch_sub(-1);
6062
}
6163
}
6264

6365
template <typename T>
64-
void bitops_incrementer(T *ptr)
66+
void bitops_incrementer(Atomic<T> *ptr)
6567
{
66-
for (long i = ADD_ITERATIONS; i > 0; i--) {
67-
core_util_atomic_fetch_add(ptr, T(1));
68-
core_util_atomic_fetch_and(ptr, T(-1));
69-
core_util_atomic_fetch_or(ptr, T(0));
68+
for (long i = add_iterations(*ptr); i > 0; i--) {
69+
(*ptr) += 1;
70+
(*ptr) &= -1;
71+
(*ptr) |= 0;
7072
}
7173
}
7274

7375
template <typename T>
74-
void weak_incrementer(T *ptr)
76+
void weak_incrementer(Atomic<T> *ptr)
7577
{
76-
for (long i = ADD_ITERATIONS; i > 0; i--) {
77-
T val = core_util_atomic_load(ptr);
78+
for (long i = add_iterations(*ptr); i > 0; i--) {
79+
T val = ptr->load();
7880
do {
79-
} while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1)));
81+
} while (!ptr->compare_exchange_weak(val, val + 1));
8082
}
8183
}
8284

8385
template <typename T>
84-
void strong_incrementer(T *ptr)
86+
void strong_incrementer(Atomic<T> *ptr)
8587
{
86-
for (long i = ADD_ITERATIONS; i > 0; i--) {
87-
T val = core_util_atomic_load(ptr);
88+
for (long i = add_iterations(*ptr); i > 0; i--) {
89+
T val = *ptr;
8890
do {
89-
} while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1)));
91+
} while (!ptr->compare_exchange_strong(val, val + 1));
9092
}
9193
}
9294

@@ -100,19 +102,23 @@ void strong_incrementer(T *ptr)
100102
* Using core_util_atomic_ templates, and exercising
101103
* load and store briefly.
102104
*/
103-
template<typename T, void (*Fn)(T *)>
105+
template<typename T, void (*Fn)(Atomic<T> *)>
104106
void test_atomic_add()
105107
{
106108
struct {
107109
volatile T nonatomic1;
108-
T atomic1;
109-
T atomic2;
110+
Atomic<T> atomic1;
111+
Atomic<T> atomic2;
110112
volatile T nonatomic2;
111113
} data;
112114

115+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.nonatomic1);
116+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.atomic1);
117+
TEST_ASSERT_EQUAL(4 * sizeof(T), sizeof data);
118+
113119
data.nonatomic1 = 0;
114-
core_util_atomic_store(&data.atomic1, T(0));
115-
core_util_atomic_store(&data.atomic2, T(0));
120+
data.atomic1 = 0;
121+
data.atomic2 = 0;
116122
data.nonatomic2 = 0;
117123

118124
Thread t1(osPriorityNormal, THREAD_STACK);
@@ -125,7 +131,7 @@ void test_atomic_add()
125131
TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn, &data.atomic2)));
126132
TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn, &data.atomic2)));
127133

128-
for (long i = ADD_ITERATIONS; i > 0; i--) {
134+
for (long i = ADD_UNLOCKED_ITERATIONS; i > 0; i--) {
129135
data.nonatomic1++;
130136
data.nonatomic2++;
131137
}
@@ -135,10 +141,10 @@ void test_atomic_add()
135141
t3.join();
136142
t4.join();
137143

138-
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1);
139-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1));
140-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2));
141-
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2);
144+
TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic1);
145+
TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic1)), data.atomic1);
146+
TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic2)), data.atomic2);
147+
TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic2);
142148
}
143149

144150
} // namespace

0 commit comments

Comments
 (0)