Skip to content

Commit 1e0e131

Browse files
committed
Make atomic test use Atomic<T>
Make the atomic functional test use `Atomic<T>` rather than the standalone functions - this then tests both layers simultaneously.
1 parent 685574f commit 1e0e131

File tree

1 file changed

+27
-24
lines changed

1 file changed

+27
-24
lines changed

TESTS/mbed_platform/atomic/main.cpp

Lines changed: 27 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626

2727
using utest::v1::Case;
2828

29-
3029
namespace {
3130

3231
/* Lock-free operations will be much faster - keep runtime down */
@@ -37,56 +36,56 @@ namespace {
3736
#endif
3837

3938
template <typename T>
40-
void add_incrementer(T *ptr)
39+
void add_incrementer(Atomic<T> *ptr)
4140
{
4241
for (long i = ADD_ITERATIONS; i > 0; i--) {
43-
core_util_atomic_fetch_add(ptr, T(1));
42+
++(*ptr);
4443
}
4544
}
4645

4746
template <typename T>
48-
void add_release_incrementer(T *ptr)
47+
void add_release_incrementer(Atomic<T> *ptr)
4948
{
5049
for (long i = ADD_ITERATIONS; i > 0; i--) {
51-
core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release);
50+
ptr->fetch_add(1, mbed::memory_order_release);
5251
}
5352
}
5453

5554
template <typename T>
56-
void sub_incrementer(T *ptr)
55+
void sub_incrementer(Atomic<T> *ptr)
5756
{
5857
for (long i = ADD_ITERATIONS; i > 0; i--) {
59-
core_util_atomic_fetch_sub(ptr, T(-1));
58+
ptr->fetch_sub(-1);
6059
}
6160
}
6261

6362
template <typename T>
64-
void bitops_incrementer(T *ptr)
63+
void bitops_incrementer(Atomic<T> *ptr)
6564
{
6665
for (long i = ADD_ITERATIONS; i > 0; i--) {
67-
core_util_atomic_fetch_add(ptr, T(1));
68-
core_util_atomic_fetch_and(ptr, T(-1));
69-
core_util_atomic_fetch_or(ptr, T(0));
66+
(*ptr) += 1;
67+
(*ptr) &= -1;
68+
(*ptr) |= 0;
7069
}
7170
}
7271

7372
template <typename T>
74-
void weak_incrementer(T *ptr)
73+
void weak_incrementer(Atomic<T> *ptr)
7574
{
7675
for (long i = ADD_ITERATIONS; i > 0; i--) {
77-
T val = core_util_atomic_load(ptr);
76+
T val = ptr->load();
7877
do {
79-
} while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1)));
78+
} while (!ptr->compare_exchange_weak(val, val + 1));
8079
}
8180
}
8281

8382
template <typename T>
84-
void strong_incrementer(T *ptr)
83+
void strong_incrementer(Atomic<T> *ptr)
8584
{
8685
for (long i = ADD_ITERATIONS; i > 0; i--) {
87-
T val = core_util_atomic_load(ptr);
86+
T val = *ptr;
8887
do {
89-
} while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1)));
88+
} while (!ptr->compare_exchange_strong(val, val + 1));
9089
}
9190
}
9291

@@ -100,19 +99,23 @@ void strong_incrementer(T *ptr)
10099
* Using core_util_atomic_ templates, and exercising
101100
* load and store briefly.
102101
*/
103-
template<typename T, void (*Fn)(T *)>
102+
template<typename T, void (*Fn)(Atomic<T> *)>
104103
void test_atomic_add()
105104
{
106105
struct {
107106
volatile T nonatomic1;
108-
T atomic1;
109-
T atomic2;
107+
Atomic<T> atomic1;
108+
Atomic<T> atomic2;
110109
volatile T nonatomic2;
111110
} data;
112111

112+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.nonatomic1);
113+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.atomic1);
114+
TEST_ASSERT_EQUAL(4 * sizeof(T), sizeof data);
115+
113116
data.nonatomic1 = 0;
114-
core_util_atomic_store(&data.atomic1, T(0));
115-
core_util_atomic_store(&data.atomic2, T(0));
117+
data.atomic1 = 0;
118+
data.atomic2 = 0;
116119
data.nonatomic2 = 0;
117120

118121
Thread t1(osPriorityNormal, THREAD_STACK);
@@ -136,8 +139,8 @@ void test_atomic_add()
136139
t4.join();
137140

138141
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1);
139-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1));
140-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2));
142+
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), data.atomic1);
143+
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), data.atomic2);
141144
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2);
142145
}
143146

0 commit comments

Comments
 (0)