26
26
27
27
using utest::v1::Case;
28
28
29
-
30
29
namespace {
31
30
32
31
/* Lock-free operations will be much faster - keep runtime down */
33
- #if MBED_ATOMIC_INT_LOCK_FREE
34
- #define ADD_ITERATIONS (SystemCoreClock / 1000 )
35
- #else
36
- #define ADD_ITERATIONS (SystemCoreClock / 8000 )
37
- #endif
32
+ #define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000 )
33
+ #define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000 )
38
34
39
- template <typename T >
40
- void add_incrementer (T *ptr )
35
+ template <typename A >
36
+ static inline long add_iterations (A &a )
41
37
{
42
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
43
- core_util_atomic_fetch_add (ptr, T (1 ));
44
- }
38
+ return a.is_lock_free () ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
45
39
}
46
40
47
- template <typename T >
48
- void add_release_incrementer (T *ptr)
41
+ template <typename A >
42
+ struct add_incrementer
49
43
{
50
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
51
- core_util_atomic_fetch_add_explicit (ptr, T (1 ), mbed_memory_order_release);
44
+ static void op (A *ptr)
45
+ {
46
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
47
+ ++(*ptr);
48
+ }
52
49
}
53
- }
50
+ };
54
51
55
- template <typename T >
56
- void sub_incrementer (T *ptr)
52
+ template <typename A >
53
+ struct add_release_incrementer
57
54
{
58
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
59
- core_util_atomic_fetch_sub (ptr, T (-1 ));
55
+ static void op (A *ptr)
56
+ {
57
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
58
+ ptr->fetch_add (1 , mbed::memory_order_release);
59
+ }
60
60
}
61
- }
61
+ };
62
62
63
- template <typename T >
64
- void bitops_incrementer (T *ptr)
63
+ template <typename A >
64
+ struct sub_incrementer
65
65
{
66
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
67
- core_util_atomic_fetch_add (ptr, T (1 ));
68
- core_util_atomic_fetch_and (ptr, T (-1 ));
69
- core_util_atomic_fetch_or (ptr, T (0 ));
66
+ static void op (A *ptr)
67
+ {
68
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
69
+ ptr->fetch_sub (-1 );
70
+ }
70
71
}
71
- }
72
+ };
72
73
73
- template <typename T >
74
- void weak_incrementer (T *ptr)
74
+ template <typename A >
75
+ struct bitops_incrementer
75
76
{
76
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
77
- T val = core_util_atomic_load (ptr);
78
- do {
79
- } while (!core_util_atomic_compare_exchange_weak (ptr, &val, T (val + 1 )));
77
+ static void op (A *ptr)
78
+ {
79
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
80
+ (*ptr) += 1 ;
81
+ (*ptr) &= -1 ;
82
+ (*ptr) |= 0 ;
83
+ }
80
84
}
81
- }
85
+ };
82
86
83
- template <typename T >
84
- void strong_incrementer (T *ptr)
87
+ template <typename A >
88
+ struct weak_incrementer
85
89
{
86
- for (long i = ADD_ITERATIONS; i > 0 ; i--) {
87
- T val = core_util_atomic_load (ptr);
88
- do {
89
- } while (!core_util_atomic_compare_exchange_strong (ptr, &val, T (val + 1 )));
90
+ static void op (A *ptr)
91
+ {
92
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
93
+ typename A::value_type val = ptr->load ();
94
+ do {
95
+ } while (!ptr->compare_exchange_weak (val, val + 1 ));
96
+ }
90
97
}
91
- }
98
+ };
99
+
100
+ template <typename A>
101
+ struct strong_incrementer
102
+ {
103
+ static void op (A *ptr)
104
+ {
105
+ for (long i = add_iterations (*ptr); i > 0 ; i--) {
106
+ typename A::value_type val = ptr->load ();
107
+ do {
108
+ } while (!ptr->compare_exchange_strong (val, val + 1 ));
109
+ }
110
+ }
111
+ };
112
+
92
113
93
114
94
115
/*
@@ -100,32 +121,34 @@ void strong_incrementer(T *ptr)
100
121
* Using core_util_atomic_ templates, and exercising
101
122
* load and store briefly.
102
123
*/
103
- template <typename T, void (*Fn)(T *) >
124
+ template <typename T, template < typename A> class Fn >
104
125
void test_atomic_add ()
105
126
{
106
127
struct {
107
128
volatile T nonatomic1;
108
- T atomic1;
109
- T atomic2;
129
+ Atomic<T> atomic1;
130
+ volatile Atomic<T> atomic2; // use volatile just to exercise the templates' volatile methods
110
131
volatile T nonatomic2;
111
- } data;
132
+ } data = { 0 , { 0 }, { 1 }, 0 }; // test initialisation
133
+
134
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.nonatomic1 );
135
+ TEST_ASSERT_EQUAL (sizeof (T), sizeof data.atomic1 );
136
+ TEST_ASSERT_EQUAL (4 * sizeof (T), sizeof data);
112
137
113
- data.nonatomic1 = 0 ;
114
- core_util_atomic_store (&data.atomic1 , T (0 ));
115
- core_util_atomic_store (&data.atomic2 , T (0 ));
116
- data.nonatomic2 = 0 ;
138
+ // test store
139
+ data.atomic2 = 0 ;
117
140
118
141
Thread t1 (osPriorityNormal, THREAD_STACK);
119
142
Thread t2 (osPriorityNormal, THREAD_STACK);
120
143
Thread t3 (osPriorityNormal, THREAD_STACK);
121
144
Thread t4 (osPriorityNormal, THREAD_STACK);
122
145
123
- TEST_ASSERT_EQUAL (osOK, t1.start (callback (Fn, &data.atomic1 )));
124
- TEST_ASSERT_EQUAL (osOK, t2.start (callback (Fn, &data.atomic1 )));
125
- TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn, &data.atomic2 )));
126
- TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn, &data.atomic2 )));
146
+ TEST_ASSERT_EQUAL (osOK, t1.start (callback (Fn< decltype (data. atomic1 )>::op , &data.atomic1 )));
147
+ TEST_ASSERT_EQUAL (osOK, t2.start (callback (Fn< decltype (data. atomic1 )>::op , &data.atomic1 )));
148
+ TEST_ASSERT_EQUAL (osOK, t3.start (callback (Fn< decltype (data. atomic2 )>::op , &data.atomic2 )));
149
+ TEST_ASSERT_EQUAL (osOK, t4.start (callback (Fn< decltype (data. atomic2 )>::op , &data.atomic2 )));
127
150
128
- for (long i = ADD_ITERATIONS ; i > 0 ; i--) {
151
+ for (long i = ADD_UNLOCKED_ITERATIONS ; i > 0 ; i--) {
129
152
data.nonatomic1 ++;
130
153
data.nonatomic2 ++;
131
154
}
@@ -135,10 +158,10 @@ void test_atomic_add()
135
158
t3.join ();
136
159
t4.join ();
137
160
138
- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS ), data.nonatomic1 );
139
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic1 ) );
140
- TEST_ASSERT_EQUAL (T (2 * ADD_ITERATIONS), core_util_atomic_load (& data.atomic2 ) );
141
- TEST_ASSERT_EQUAL (T (ADD_ITERATIONS ), data.nonatomic2 );
161
+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS ), data.nonatomic1 );
162
+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data. atomic1 )), data.atomic1 );
163
+ TEST_ASSERT_EQUAL (T (2 * add_iterations (data. atomic2 )), data.atomic2 );
164
+ TEST_ASSERT_EQUAL (T (ADD_UNLOCKED_ITERATIONS ), data.nonatomic2 );
142
165
}
143
166
144
167
} // namespace
0 commit comments