Skip to content

Commit 2a0af33

Browse files
committed
Make atomic test use Atomic<T>
Make the atomic functional test use `Atomic<T>` rather than the standalone functions - this then tests both layers simultaneously.
1 parent f633abd commit 2a0af33

File tree

1 file changed

+27
-24
lines changed

1 file changed

+27
-24
lines changed

TESTS/mbed_platform/atomic/main.cpp

Lines changed: 27 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -26,62 +26,61 @@
2626

2727
using utest::v1::Case;
2828

29-
3029
namespace {
3130

3231
#define ADD_ITERATIONS (SystemCoreClock / 1000)
3332

3433
template <typename T>
35-
void add_incrementer(T *ptr)
34+
void add_incrementer(Atomic<T> *ptr)
3635
{
3736
for (long i = ADD_ITERATIONS; i > 0; i--) {
38-
core_util_atomic_fetch_add(ptr, T(1));
37+
++(*ptr);
3938
}
4039
}
4140

4241
template <typename T>
43-
void add_release_incrementer(T *ptr)
42+
void add_release_incrementer(Atomic<T> *ptr)
4443
{
4544
for (long i = ADD_ITERATIONS; i > 0; i--) {
46-
core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release);
45+
ptr->fetch_add(1, memory_order_release);
4746
}
4847
}
4948

5049
template <typename T>
51-
void sub_incrementer(T *ptr)
50+
void sub_incrementer(Atomic<T> *ptr)
5251
{
5352
for (long i = ADD_ITERATIONS; i > 0; i--) {
54-
core_util_atomic_fetch_sub(ptr, T(-1));
53+
ptr->fetch_sub(-1);
5554
}
5655
}
5756

5857
template <typename T>
59-
void bitops_incrementer(T *ptr)
58+
void bitops_incrementer(Atomic<T> *ptr)
6059
{
6160
for (long i = ADD_ITERATIONS; i > 0; i--) {
62-
core_util_atomic_fetch_add(ptr, T(1));
63-
core_util_atomic_fetch_and(ptr, T(-1));
64-
core_util_atomic_fetch_or(ptr, T(0));
61+
(*ptr) += 1;
62+
(*ptr) &= -1;
63+
(*ptr) |= 0;
6564
}
6665
}
6766

6867
template <typename T>
69-
void weak_incrementer(T *ptr)
68+
void weak_incrementer(Atomic<T> *ptr)
7069
{
7170
for (long i = ADD_ITERATIONS; i > 0; i--) {
72-
T val = core_util_atomic_load(ptr);
71+
T val = ptr->load();
7372
do {
74-
} while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1)));
73+
} while (!ptr->compare_exchange_weak(val, val + 1));
7574
}
7675
}
7776

7877
template <typename T>
79-
void strong_incrementer(T *ptr)
78+
void strong_incrementer(Atomic<T> *ptr)
8079
{
8180
for (long i = ADD_ITERATIONS; i > 0; i--) {
82-
T val = core_util_atomic_load(ptr);
81+
T val = *ptr;
8382
do {
84-
} while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1)));
83+
} while (!ptr->compare_exchange_strong(val, val + 1));
8584
}
8685
}
8786

@@ -95,19 +94,23 @@ void strong_incrementer(T *ptr)
9594
* Using core_util_atomic_ templates, and exercising
9695
* load and store briefly.
9796
*/
98-
template<typename T, void (*Fn)(T *)>
97+
template<typename T, void (*Fn)(Atomic<T> *)>
9998
void test_atomic_add()
10099
{
101100
struct {
102101
volatile T nonatomic1;
103-
T atomic1;
104-
T atomic2;
102+
Atomic<T> atomic1;
103+
Atomic<T> atomic2;
105104
volatile T nonatomic2;
106105
} data;
107106

107+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.nonatomic1);
108+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.atomic1);
109+
TEST_ASSERT_EQUAL(4 * sizeof(T), sizeof data);
110+
108111
data.nonatomic1 = 0;
109-
core_util_atomic_store(&data.atomic1, T(0));
110-
core_util_atomic_store(&data.atomic2, T(0));
112+
data.atomic1 = 0;
113+
data.atomic2 = 0;
111114
data.nonatomic2 = 0;
112115

113116
Thread t1(osPriorityNormal, THREAD_STACK);
@@ -131,8 +134,8 @@ void test_atomic_add()
131134
t4.join();
132135

133136
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1);
134-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1));
135-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2));
137+
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), data.atomic1);
138+
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), data.atomic2);
136139
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2);
137140
}
138141

0 commit comments

Comments
 (0)