26
26
27
27
using utest::v1::Case;
28
28
29
-
30
29
namespace {
31
30
32
31
/* Lock-free operations will be much faster - keep runtime down */
33
- #if MBED_ATOMIC_INT_LOCK_FREE
34
- #define ADD_ITERATIONS (SystemCoreClock / 1000 )
35
- #else
36
- #define ADD_ITERATIONS (SystemCoreClock / 8000 )
37
- #endif
32
+ #define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000 )
33
+ #define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000 )
38
34
39
35
template <typename T>
40
- void add_incrementer (T *ptr )
36
+ static inline long add_iterations ( const Atomic<T> &a )
41
37
{
42
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
43
- core_util_atomic_fetch_add (ptr, T (1 ));
38
+ return a.is_lock_free () ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
39
+ }
40
+
41
+ template <typename T>
42
+ void add_incrementer (Atomic<T> *ptr)
43
+ {
44
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
45
+ ++(*ptr);
44
46
}
45
47
}
46
48
47
49
template <typename T>
48
- void add_release_incrementer (T *ptr)
50
+ void add_release_incrementer (Atomic<T> *ptr)
49
51
{
50
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
51
- core_util_atomic_fetch_add_explicit ( ptr, T ( 1 ), mbed_memory_order_release );
52
+ for (long i = add_iterations (*ptr) ; i > 0 ; i--) {
53
+ ptr-> fetch_add ( 1 , mbed::memory_order_release );
52
54
}
53
55
}
54
56
55
57
template <typename T>
56
- void sub_incrementer (T *ptr)
58
+ void sub_incrementer (Atomic<T> *ptr)
57
59
{
58
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
59
- core_util_atomic_fetch_sub ( ptr, T (-1 ) );
60
+ for (long i = add_iterations (*ptr) ; i > 0 ; i--) {
61
+ ptr-> fetch_sub (-1 );
60
62
}
61
63
}
62
64
63
65
template <typename T>
64
- void bitops_incrementer (T *ptr)
66
+ void bitops_incrementer (Atomic<T> *ptr)
65
67
{
66
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
67
- core_util_atomic_fetch_add ( ptr, T ( 1 )) ;
68
- core_util_atomic_fetch_and ( ptr, T (- 1 )) ;
69
- core_util_atomic_fetch_or ( ptr, T ( 0 )) ;
68
+ for (long i = add_iterations (*ptr) ; i > 0 ; i--) {
69
+ (* ptr) += 1 ;
70
+ (* ptr) &= - 1 ;
71
+ (* ptr) |= 0 ;
70
72
}
71
73
}
72
74
73
75
template <typename T>
74
- void weak_incrementer (T *ptr)
76
+ void weak_incrementer (Atomic<T> *ptr)
75
77
{
76
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
77
- T val = core_util_atomic_load ( ptr);
78
+ for (long i = add_iterations (*ptr) ; i > 0 ; i--) {
79
+ T val = ptr-> load ( );
78
80
do {
79
- } while (!core_util_atomic_compare_exchange_weak ( ptr, & val, T ( val + 1 ) ));
81
+ } while (!ptr-> compare_exchange_weak ( val, val + 1 ));
80
82
}
81
83
}
82
84
83
85
template <typename T>
84
- void strong_incrementer (T *ptr)
86
+ void strong_incrementer (Atomic<T> *ptr)
85
87
{
86
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
87
- T val = core_util_atomic_load ( ptr) ;
88
+ for (long i = add_iterations (*ptr) ; i > 0 ; i--) {
89
+ T val = * ptr;
88
90
do {
89
- } while (!core_util_atomic_compare_exchange_strong ( ptr, & val, T ( val + 1 ) ));
91
+ } while (!ptr-> compare_exchange_strong ( val, val + 1 ));
90
92
}
91
93
}
92
94
@@ -100,19 +102,23 @@ void strong_incrementer(T *ptr)
100
102
* Using core_util_atomic_ templates, and exercising
101
103
* load and store briefly.
102
104
*/
103
- template <typename T, void (*Fn)(T *)>
105
+ template <typename T, void (*Fn)(Atomic<T> *)>
104
106
void test_atomic_add ()
105
107
{
106
108
struct {
107
109
volatile T nonatomic1;
108
- T atomic1;
109
- T atomic2;
110
+ Atomic<T> atomic1;
111
+ Atomic<T> atomic2;
110
112
volatile T nonatomic2;
111
113
} data;
112
114
115
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.nonatomic1 );
116
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.atomic1 );
117
+ TEST_ASSERT_EQUAL (4 * sizeof (T), sizeof data);
118
+
113
119
data.nonatomic1 = 0 ;
114
- core_util_atomic_store (& data.atomic1 , T ( 0 )) ;
115
- core_util_atomic_store (& data.atomic2 , T ( 0 )) ;
120
+ data.atomic1 = 0 ;
121
+ data.atomic2 = 0 ;
116
122
data.nonatomic2 = 0 ;
117
123
118
124
Thread t1 (osPriorityNormal, THREAD_STACK);
@@ -125,7 +131,7 @@ void test_atomic_add()
125
131
TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn, &data.atomic2 )));
126
132
TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn, &data.atomic2 )));
127
133
128
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
134
+ for (long i = ADD_UNLOCKED_ITERATIONS ; i > 0 ; i--) {
129
135
data.nonatomic1 ++;
130
136
data.nonatomic2 ++;
131
137
}
@@ -135,10 +141,10 @@ void test_atomic_add()
135
141
t3.join ();
136
142
t4.join ();
137
143
138
- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS ), data.nonatomic1 );
139
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic1 ) );
140
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic2 ) );
141
- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS ), data.nonatomic2 );
144
+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS ), data.nonatomic1 );
145
+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data. atomic1 )), data.atomic1 );
146
+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data. atomic2 )), data.atomic2 );
147
+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS ), data.nonatomic2 );
142
148
}
143
149
144
150
} // namespace
0 commit comments