26
26
27
27
using utest::v1::Case;
28
28
29
-
30
29
namespace {
31
30
32
31
#define ADD_ITERATIONS (SystemCoreClock / 1000 )
33
32
34
33
template <typename T>
35
- void add_incrementer (T *ptr)
34
+ void add_incrementer (Atomic<T> *ptr)
36
35
{
37
36
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
38
- core_util_atomic_fetch_add ( ptr, T ( 1 ) );
37
+ ++(* ptr);
39
38
}
40
39
}
41
40
42
41
template <typename T>
43
- void add_release_incrementer (T *ptr)
42
+ void add_release_incrementer (Atomic<T> *ptr)
44
43
{
45
44
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
46
- core_util_atomic_fetch_add_explicit ( ptr, T ( 1 ), mbed_memory_order_release );
45
+ ptr-> fetch_add ( 1 , memory_order_release );
47
46
}
48
47
}
49
48
50
49
template <typename T>
51
- void sub_incrementer (T *ptr)
50
+ void sub_incrementer (Atomic<T> *ptr)
52
51
{
53
52
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
54
- core_util_atomic_fetch_sub ( ptr, T (-1 ) );
53
+ ptr-> fetch_sub (-1 );
55
54
}
56
55
}
57
56
58
57
template <typename T>
59
- void bitops_incrementer (T *ptr)
58
+ void bitops_incrementer (Atomic<T> *ptr)
60
59
{
61
60
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
62
- core_util_atomic_fetch_add ( ptr, T ( 1 )) ;
63
- core_util_atomic_fetch_and ( ptr, T (- 1 )) ;
64
- core_util_atomic_fetch_or ( ptr, T ( 0 )) ;
61
+ (* ptr) += 1 ;
62
+ (* ptr) &= - 1 ;
63
+ (* ptr) |= 0 ;
65
64
}
66
65
}
67
66
68
67
template <typename T>
69
- void weak_incrementer (T *ptr)
68
+ void weak_incrementer (Atomic<T> *ptr)
70
69
{
71
70
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
72
- T val = core_util_atomic_load ( ptr);
71
+ T val = ptr-> load ( );
73
72
do {
74
- } while (!core_util_atomic_compare_exchange_weak ( ptr, & val, T ( val + 1 ) ));
73
+ } while (!ptr-> compare_exchange_weak ( val, val + 1 ));
75
74
}
76
75
}
77
76
78
77
template <typename T>
79
- void strong_incrementer (T *ptr)
78
+ void strong_incrementer (Atomic<T> *ptr)
80
79
{
81
80
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
82
- T val = core_util_atomic_load ( ptr) ;
81
+ T val = * ptr;
83
82
do {
84
- } while (!core_util_atomic_compare_exchange_strong ( ptr, & val, T ( val + 1 ) ));
83
+ } while (!ptr-> compare_exchange_strong ( val, val + 1 ));
85
84
}
86
85
}
87
86
@@ -95,19 +94,23 @@ void strong_incrementer(T *ptr)
95
94
* Using core_util_atomic_ templates, and exercising
96
95
* load and store briefly.
97
96
*/
98
- template <typename T, void (*Fn)(T *)>
97
+ template <typename T, void (*Fn)(Atomic<T> *)>
99
98
void test_atomic_add ()
100
99
{
101
100
struct {
102
101
volatile T nonatomic1;
103
- T atomic1;
104
- T atomic2;
102
+ Atomic<T> atomic1;
103
+ Atomic<T> atomic2;
105
104
volatile T nonatomic2;
106
105
} data;
107
106
107
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.nonatomic1 );
108
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.atomic1 );
109
+ TEST_ASSERT_EQUAL (4 * sizeof (T), sizeof data);
110
+
108
111
data.nonatomic1 = 0 ;
109
- core_util_atomic_store (& data.atomic1 , T ( 0 )) ;
110
- core_util_atomic_store (& data.atomic2 , T ( 0 )) ;
112
+ data.atomic1 = 0 ;
113
+ data.atomic2 = 0 ;
111
114
data.nonatomic2 = 0 ;
112
115
113
116
Thread t1 (osPriorityNormal, THREAD_STACK);
@@ -131,8 +134,8 @@ void test_atomic_add()
131
134
t4.join ();
132
135
133
136
TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic1 );
134
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic1 ) );
135
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic2 ) );
137
+ TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), data.atomic1 );
138
+ TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), data.atomic2 );
136
139
TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic2 );
137
140
}
138
141
0 commit comments