26
26
27
27
using utest::v1::Case;
28
28
29
-
30
29
namespace {
31
30
32
31
/* Lock-free operations will be much faster - keep runtime down */
@@ -37,56 +36,56 @@ namespace {
37
36
#endif
38
37
39
38
template <typename T>
40
- void add_incrementer (T *ptr)
39
+ void add_incrementer (Atomic<T> *ptr)
41
40
{
42
41
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
43
- core_util_atomic_fetch_add ( ptr, T ( 1 ) );
42
+ ++(* ptr);
44
43
}
45
44
}
46
45
47
46
template <typename T>
48
- void add_release_incrementer (T *ptr)
47
+ void add_release_incrementer (Atomic<T> *ptr)
49
48
{
50
49
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
51
- core_util_atomic_fetch_add_explicit ( ptr, T ( 1 ), mbed_memory_order_release );
50
+ ptr-> fetch_add ( 1 , mbed::memory_order_release );
52
51
}
53
52
}
54
53
55
54
template <typename T>
56
- void sub_incrementer (T *ptr)
55
+ void sub_incrementer (Atomic<T> *ptr)
57
56
{
58
57
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
59
- core_util_atomic_fetch_sub ( ptr, T (-1 ) );
58
+ ptr-> fetch_sub (-1 );
60
59
}
61
60
}
62
61
63
62
template <typename T>
64
- void bitops_incrementer (T *ptr)
63
+ void bitops_incrementer (Atomic<T> *ptr)
65
64
{
66
65
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
67
- core_util_atomic_fetch_add ( ptr, T ( 1 )) ;
68
- core_util_atomic_fetch_and ( ptr, T (- 1 )) ;
69
- core_util_atomic_fetch_or ( ptr, T ( 0 )) ;
66
+ (* ptr) += 1 ;
67
+ (* ptr) &= - 1 ;
68
+ (* ptr) |= 0 ;
70
69
}
71
70
}
72
71
73
72
template <typename T>
74
- void weak_incrementer (T *ptr)
73
+ void weak_incrementer (Atomic<T> *ptr)
75
74
{
76
75
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
77
- T val = core_util_atomic_load ( ptr);
76
+ T val = ptr-> load ( );
78
77
do {
79
- } while (!core_util_atomic_compare_exchange_weak ( ptr, & val, T ( val + 1 ) ));
78
+ } while (!ptr-> compare_exchange_weak ( val, val + 1 ));
80
79
}
81
80
}
82
81
83
82
template <typename T>
84
- void strong_incrementer (T *ptr)
83
+ void strong_incrementer (Atomic<T> *ptr)
85
84
{
86
85
for (long i = ADD_ITERATIONS; i > 0 ; i--) {
87
- T val = core_util_atomic_load ( ptr) ;
86
+ T val = * ptr;
88
87
do {
89
- } while (!core_util_atomic_compare_exchange_strong ( ptr, & val, T ( val + 1 ) ));
88
+ } while (!ptr-> compare_exchange_strong ( val, val + 1 ));
90
89
}
91
90
}
92
91
@@ -100,19 +99,23 @@ void strong_incrementer(T *ptr)
100
99
* Using core_util_atomic_ templates, and exercising
101
100
* load and store briefly.
102
101
*/
103
- template <typename T, void (*Fn)(T *)>
102
+ template <typename T, void (*Fn)(Atomic<T> *)>
104
103
void test_atomic_add ()
105
104
{
106
105
struct {
107
106
volatile T nonatomic1;
108
- T atomic1;
109
- T atomic2;
107
+ Atomic<T> atomic1;
108
+ Atomic<T> atomic2;
110
109
volatile T nonatomic2;
111
110
} data;
112
111
112
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.nonatomic1 );
113
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.atomic1 );
114
+ TEST_ASSERT_EQUAL (4 * sizeof (T), sizeof data);
115
+
113
116
data.nonatomic1 = 0 ;
114
- core_util_atomic_store (& data.atomic1 , T ( 0 )) ;
115
- core_util_atomic_store (& data.atomic2 , T ( 0 )) ;
117
+ data.atomic1 = 0 ;
118
+ data.atomic2 = 0 ;
116
119
data.nonatomic2 = 0 ;
117
120
118
121
Thread t1 (osPriorityNormal, THREAD_STACK);
@@ -136,8 +139,8 @@ void test_atomic_add()
136
139
t4.join ();
137
140
138
141
TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic1 );
139
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic1 ) );
140
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic2 ) );
142
+ TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), data.atomic1 );
143
+ TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), data.atomic2 );
141
144
TEST_ASSERT_EQUAL (T (ADD_ITERATIONS), data.nonatomic2 );
142
145
}
143
146
0 commit comments