Skip to content

Commit 2a2db67

Browse files
committed
Make atomic test use Atomic<T>
Make the atomic functional test use `Atomic<T>` rather than the standalone functions - this then tests both layers simultaneously.
1 parent 87f7c11 commit 2a2db67

File tree

1 file changed

+82
-59
lines changed

1 file changed

+82
-59
lines changed

TESTS/mbed_platform/atomic/main.cpp

Lines changed: 82 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -26,69 +26,90 @@
2626

2727
using utest::v1::Case;
2828

29-
3029
namespace {
3130

3231
/* Lock-free operations will be much faster - keep runtime down */
33-
#if MBED_ATOMIC_INT_LOCK_FREE
34-
#define ADD_ITERATIONS (SystemCoreClock / 1000)
35-
#else
36-
#define ADD_ITERATIONS (SystemCoreClock / 8000)
37-
#endif
32+
#define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000)
33+
#define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000)
3834

39-
template <typename T>
40-
void add_incrementer(T *ptr)
35+
template <typename A>
36+
static inline long add_iterations(A &a)
4137
{
42-
for (long i = ADD_ITERATIONS; i > 0; i--) {
43-
core_util_atomic_fetch_add(ptr, T(1));
44-
}
38+
return a.is_lock_free() ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
4539
}
4640

47-
template <typename T>
48-
void add_release_incrementer(T *ptr)
41+
template <typename A>
42+
struct add_incrementer
4943
{
50-
for (long i = ADD_ITERATIONS; i > 0; i--) {
51-
core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release);
44+
static void op(A *ptr)
45+
{
46+
for (long i = add_iterations(*ptr); i > 0; i--) {
47+
++(*ptr);
48+
}
5249
}
53-
}
50+
};
5451

55-
template <typename T>
56-
void sub_incrementer(T *ptr)
52+
template <typename A>
53+
struct add_release_incrementer
5754
{
58-
for (long i = ADD_ITERATIONS; i > 0; i--) {
59-
core_util_atomic_fetch_sub(ptr, T(-1));
55+
static void op(A *ptr)
56+
{
57+
for (long i = add_iterations(*ptr); i > 0; i--) {
58+
ptr->fetch_add(1, mbed::memory_order_release);
59+
}
6060
}
61-
}
61+
};
6262

63-
template <typename T>
64-
void bitops_incrementer(T *ptr)
63+
template <typename A>
64+
struct sub_incrementer
6565
{
66-
for (long i = ADD_ITERATIONS; i > 0; i--) {
67-
core_util_atomic_fetch_add(ptr, T(1));
68-
core_util_atomic_fetch_and(ptr, T(-1));
69-
core_util_atomic_fetch_or(ptr, T(0));
66+
static void op(A *ptr)
67+
{
68+
for (long i = add_iterations(*ptr); i > 0; i--) {
69+
ptr->fetch_sub(-1);
70+
}
7071
}
71-
}
72+
};
7273

73-
template <typename T>
74-
void weak_incrementer(T *ptr)
74+
template <typename A>
75+
struct bitops_incrementer
7576
{
76-
for (long i = ADD_ITERATIONS; i > 0; i--) {
77-
T val = core_util_atomic_load(ptr);
78-
do {
79-
} while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1)));
77+
static void op(A *ptr)
78+
{
79+
for (long i = add_iterations(*ptr); i > 0; i--) {
80+
(*ptr) += 1;
81+
(*ptr) &= -1;
82+
(*ptr) |= 0;
83+
}
8084
}
81-
}
85+
};
8286

83-
template <typename T>
84-
void strong_incrementer(T *ptr)
87+
template <typename A>
88+
struct weak_incrementer
8589
{
86-
for (long i = ADD_ITERATIONS; i > 0; i--) {
87-
T val = core_util_atomic_load(ptr);
88-
do {
89-
} while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1)));
90+
static void op(A *ptr)
91+
{
92+
for (long i = add_iterations(*ptr); i > 0; i--) {
93+
typename A::value_type val = ptr->load();
94+
do {
95+
} while (!ptr->compare_exchange_weak(val, val + 1));
96+
}
9097
}
91-
}
98+
};
99+
100+
template <typename A>
101+
struct strong_incrementer
102+
{
103+
static void op(A *ptr)
104+
{
105+
for (long i = add_iterations(*ptr); i > 0; i--) {
106+
typename A::value_type val = ptr->load();
107+
do {
108+
} while (!ptr->compare_exchange_strong(val, val + 1));
109+
}
110+
}
111+
};
112+
92113

93114

94115
/*
@@ -100,32 +121,34 @@ void strong_incrementer(T *ptr)
100121
* Using core_util_atomic_ templates, and exercising
101122
* load and store briefly.
102123
*/
103-
template<typename T, void (*Fn)(T *)>
124+
template<typename T, template<typename A> class Fn>
104125
void test_atomic_add()
105126
{
106127
struct {
107128
volatile T nonatomic1;
108-
T atomic1;
109-
T atomic2;
129+
Atomic<T> atomic1;
130+
volatile Atomic<T> atomic2; // use volatile just to exercise the templates' volatile methods
110131
volatile T nonatomic2;
111-
} data;
132+
} data = { 0, { 0 }, { 1 }, 0 }; // test initialisation
133+
134+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.nonatomic1);
135+
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.atomic1);
136+
TEST_ASSERT_EQUAL(4 * sizeof(T), sizeof data);
112137

113-
data.nonatomic1 = 0;
114-
core_util_atomic_store(&data.atomic1, T(0));
115-
core_util_atomic_store(&data.atomic2, T(0));
116-
data.nonatomic2 = 0;
138+
// test store
139+
data.atomic2 = 0;
117140

118141
Thread t1(osPriorityNormal, THREAD_STACK);
119142
Thread t2(osPriorityNormal, THREAD_STACK);
120143
Thread t3(osPriorityNormal, THREAD_STACK);
121144
Thread t4(osPriorityNormal, THREAD_STACK);
122145

123-
TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn, &data.atomic1)));
124-
TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn, &data.atomic1)));
125-
TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn, &data.atomic2)));
126-
TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn, &data.atomic2)));
146+
TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn<decltype(data.atomic1)>::op, &data.atomic1)));
147+
TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn<decltype(data.atomic1)>::op, &data.atomic1)));
148+
TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn<decltype(data.atomic2)>::op, &data.atomic2)));
149+
TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn<decltype(data.atomic2)>::op, &data.atomic2)));
127150

128-
for (long i = ADD_ITERATIONS; i > 0; i--) {
151+
for (long i = ADD_UNLOCKED_ITERATIONS; i > 0; i--) {
129152
data.nonatomic1++;
130153
data.nonatomic2++;
131154
}
@@ -135,10 +158,10 @@ void test_atomic_add()
135158
t3.join();
136159
t4.join();
137160

138-
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1);
139-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1));
140-
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2));
141-
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2);
161+
TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic1);
162+
TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic1)), data.atomic1);
163+
TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic2)), data.atomic2);
164+
TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic2);
142165
}
143166

144167
} // namespace

0 commit comments

Comments
 (0)