17
17
#define ATOMIC_INIT (i ) { (i) }
18
18
19
19
/**
20
- * atomic_read - read atomic variable
20
+ * arch_atomic_read - read atomic variable
21
21
* @v: pointer of type atomic_t
22
22
*
23
23
* Atomically reads the value of @v.
24
24
*/
25
- static __always_inline int atomic_read (const atomic_t * v )
25
+ static __always_inline int arch_atomic_read (const atomic_t * v )
26
26
{
27
27
return READ_ONCE ((v )-> counter );
28
28
}
29
29
30
30
/**
31
- * atomic_set - set atomic variable
31
+ * arch_atomic_set - set atomic variable
32
32
* @v: pointer of type atomic_t
33
33
* @i: required value
34
34
*
35
35
* Atomically sets the value of @v to @i.
36
36
*/
37
- static __always_inline void atomic_set (atomic_t * v , int i )
37
+ static __always_inline void arch_atomic_set (atomic_t * v , int i )
38
38
{
39
39
WRITE_ONCE (v -> counter , i );
40
40
}
41
41
42
42
/**
43
- * atomic_add - add integer to atomic variable
43
+ * arch_atomic_add - add integer to atomic variable
44
44
* @i: integer value to add
45
45
* @v: pointer of type atomic_t
46
46
*
47
47
* Atomically adds @i to @v.
48
48
*/
49
- static __always_inline void atomic_add (int i , atomic_t * v )
49
+ static __always_inline void arch_atomic_add (int i , atomic_t * v )
50
50
{
51
51
asm volatile (LOCK_PREFIX "addl %1,%0"
52
52
: "+m" (v -> counter )
53
53
: "ir" (i ));
54
54
}
55
55
56
56
/**
57
- * atomic_sub - subtract integer from atomic variable
57
+ * arch_atomic_sub - subtract integer from atomic variable
58
58
* @i: integer value to subtract
59
59
* @v: pointer of type atomic_t
60
60
*
61
61
* Atomically subtracts @i from @v.
62
62
*/
63
- static __always_inline void atomic_sub (int i , atomic_t * v )
63
+ static __always_inline void arch_atomic_sub (int i , atomic_t * v )
64
64
{
65
65
asm volatile (LOCK_PREFIX "subl %1,%0"
66
66
: "+m" (v -> counter )
67
67
: "ir" (i ));
68
68
}
69
69
70
70
/**
71
- * atomic_sub_and_test - subtract value from variable and test result
71
+ * arch_atomic_sub_and_test - subtract value from variable and test result
72
72
* @i: integer value to subtract
73
73
* @v: pointer of type atomic_t
74
74
*
75
75
* Atomically subtracts @i from @v and returns
76
76
* true if the result is zero, or false for all
77
77
* other cases.
78
78
*/
79
- static __always_inline bool atomic_sub_and_test (int i , atomic_t * v )
79
+ static __always_inline bool arch_atomic_sub_and_test (int i , atomic_t * v )
80
80
{
81
81
GEN_BINARY_RMWcc (LOCK_PREFIX "subl" , v -> counter , "er" , i , "%0" , e );
82
82
}
83
83
84
84
/**
85
- * atomic_inc - increment atomic variable
85
+ * arch_atomic_inc - increment atomic variable
86
86
* @v: pointer of type atomic_t
87
87
*
88
88
* Atomically increments @v by 1.
89
89
*/
90
- static __always_inline void atomic_inc (atomic_t * v )
90
+ static __always_inline void arch_atomic_inc (atomic_t * v )
91
91
{
92
92
asm volatile (LOCK_PREFIX "incl %0"
93
93
: "+m" (v -> counter ));
94
94
}
95
95
96
96
/**
97
- * atomic_dec - decrement atomic variable
97
+ * arch_atomic_dec - decrement atomic variable
98
98
* @v: pointer of type atomic_t
99
99
*
100
100
* Atomically decrements @v by 1.
101
101
*/
102
- static __always_inline void atomic_dec (atomic_t * v )
102
+ static __always_inline void arch_atomic_dec (atomic_t * v )
103
103
{
104
104
asm volatile (LOCK_PREFIX "decl %0"
105
105
: "+m" (v -> counter ));
106
106
}
107
107
108
108
/**
109
- * atomic_dec_and_test - decrement and test
109
+ * arch_atomic_dec_and_test - decrement and test
110
110
* @v: pointer of type atomic_t
111
111
*
112
112
* Atomically decrements @v by 1 and
113
113
* returns true if the result is 0, or false for all other
114
114
* cases.
115
115
*/
116
- static __always_inline bool atomic_dec_and_test (atomic_t * v )
116
+ static __always_inline bool arch_atomic_dec_and_test (atomic_t * v )
117
117
{
118
118
GEN_UNARY_RMWcc (LOCK_PREFIX "decl" , v -> counter , "%0" , e );
119
119
}
120
120
121
121
/**
122
- * atomic_inc_and_test - increment and test
122
+ * arch_atomic_inc_and_test - increment and test
123
123
* @v: pointer of type atomic_t
124
124
*
125
125
* Atomically increments @v by 1
126
126
* and returns true if the result is zero, or false for all
127
127
* other cases.
128
128
*/
129
- static __always_inline bool atomic_inc_and_test (atomic_t * v )
129
+ static __always_inline bool arch_atomic_inc_and_test (atomic_t * v )
130
130
{
131
131
GEN_UNARY_RMWcc (LOCK_PREFIX "incl" , v -> counter , "%0" , e );
132
132
}
133
133
134
134
/**
135
- * atomic_add_negative - add and test if negative
135
+ * arch_atomic_add_negative - add and test if negative
136
136
* @i: integer value to add
137
137
* @v: pointer of type atomic_t
138
138
*
139
139
* Atomically adds @i to @v and returns true
140
140
* if the result is negative, or false when
141
141
* result is greater than or equal to zero.
142
142
*/
143
- static __always_inline bool atomic_add_negative (int i , atomic_t * v )
143
+ static __always_inline bool arch_atomic_add_negative (int i , atomic_t * v )
144
144
{
145
145
GEN_BINARY_RMWcc (LOCK_PREFIX "addl" , v -> counter , "er" , i , "%0" , s );
146
146
}
147
147
148
148
/**
149
- * atomic_add_return - add integer and return
149
+ * arch_atomic_add_return - add integer and return
150
150
* @i: integer value to add
151
151
* @v: pointer of type atomic_t
152
152
*
153
153
* Atomically adds @i to @v and returns @i + @v
154
154
*/
155
- static __always_inline int atomic_add_return (int i , atomic_t * v )
155
+ static __always_inline int arch_atomic_add_return (int i , atomic_t * v )
156
156
{
157
157
return i + xadd (& v -> counter , i );
158
158
}
159
159
160
160
/**
161
- * atomic_sub_return - subtract integer and return
161
+ * arch_atomic_sub_return - subtract integer and return
162
162
* @v: pointer of type atomic_t
163
163
* @i: integer value to subtract
164
164
*
165
165
* Atomically subtracts @i from @v and returns @v - @i
166
166
*/
167
- static __always_inline int atomic_sub_return (int i , atomic_t * v )
167
+ static __always_inline int arch_atomic_sub_return (int i , atomic_t * v )
168
168
{
169
- return atomic_add_return (- i , v );
169
+ return arch_atomic_add_return (- i , v );
170
170
}
171
171
172
- #define atomic_inc_return (v ) (atomic_add_return (1, v))
173
- #define atomic_dec_return (v ) (atomic_sub_return (1, v))
172
+ #define arch_atomic_inc_return (v ) (arch_atomic_add_return (1, v))
173
+ #define arch_atomic_dec_return (v ) (arch_atomic_sub_return (1, v))
174
174
175
- static __always_inline int atomic_fetch_add (int i , atomic_t * v )
175
+ static __always_inline int arch_atomic_fetch_add (int i , atomic_t * v )
176
176
{
177
177
return xadd (& v -> counter , i );
178
178
}
179
179
180
- static __always_inline int atomic_fetch_sub (int i , atomic_t * v )
180
+ static __always_inline int arch_atomic_fetch_sub (int i , atomic_t * v )
181
181
{
182
182
return xadd (& v -> counter , - i );
183
183
}
184
184
185
- static __always_inline int atomic_cmpxchg (atomic_t * v , int old , int new )
185
+ static __always_inline int arch_atomic_cmpxchg (atomic_t * v , int old , int new )
186
186
{
187
- return cmpxchg (& v -> counter , old , new );
187
+ return arch_cmpxchg (& v -> counter , old , new );
188
188
}
189
189
190
- #define atomic_try_cmpxchg atomic_try_cmpxchg
191
- static __always_inline bool atomic_try_cmpxchg (atomic_t * v , int * old , int new )
190
+ #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
191
+ static __always_inline bool arch_atomic_try_cmpxchg (atomic_t * v , int * old , int new )
192
192
{
193
193
return try_cmpxchg (& v -> counter , old , new );
194
194
}
195
195
196
- static inline int atomic_xchg (atomic_t * v , int new )
196
+ static inline int arch_atomic_xchg (atomic_t * v , int new )
197
197
{
198
198
return xchg (& v -> counter , new );
199
199
}
200
200
201
- static inline void atomic_and (int i , atomic_t * v )
201
+ static inline void arch_atomic_and (int i , atomic_t * v )
202
202
{
203
203
asm volatile (LOCK_PREFIX "andl %1,%0"
204
204
: "+m" (v -> counter )
205
205
: "ir" (i )
206
206
: "memory" );
207
207
}
208
208
209
- static inline int atomic_fetch_and (int i , atomic_t * v )
209
+ static inline int arch_atomic_fetch_and (int i , atomic_t * v )
210
210
{
211
- int val = atomic_read (v );
211
+ int val = arch_atomic_read (v );
212
212
213
- do { } while (!atomic_try_cmpxchg (v , & val , val & i ));
213
+ do { } while (!arch_atomic_try_cmpxchg (v , & val , val & i ));
214
214
215
215
return val ;
216
216
}
217
217
218
- static inline void atomic_or (int i , atomic_t * v )
218
+ static inline void arch_atomic_or (int i , atomic_t * v )
219
219
{
220
220
asm volatile (LOCK_PREFIX "orl %1,%0"
221
221
: "+m" (v -> counter )
222
222
: "ir" (i )
223
223
: "memory" );
224
224
}
225
225
226
- static inline int atomic_fetch_or (int i , atomic_t * v )
226
+ static inline int arch_atomic_fetch_or (int i , atomic_t * v )
227
227
{
228
- int val = atomic_read (v );
228
+ int val = arch_atomic_read (v );
229
229
230
- do { } while (!atomic_try_cmpxchg (v , & val , val | i ));
230
+ do { } while (!arch_atomic_try_cmpxchg (v , & val , val | i ));
231
231
232
232
return val ;
233
233
}
234
234
235
- static inline void atomic_xor (int i , atomic_t * v )
235
+ static inline void arch_atomic_xor (int i , atomic_t * v )
236
236
{
237
237
asm volatile (LOCK_PREFIX "xorl %1,%0"
238
238
: "+m" (v -> counter )
239
239
: "ir" (i )
240
240
: "memory" );
241
241
}
242
242
243
- static inline int atomic_fetch_xor (int i , atomic_t * v )
243
+ static inline int arch_atomic_fetch_xor (int i , atomic_t * v )
244
244
{
245
- int val = atomic_read (v );
245
+ int val = arch_atomic_read (v );
246
246
247
- do { } while (!atomic_try_cmpxchg (v , & val , val ^ i ));
247
+ do { } while (!arch_atomic_try_cmpxchg (v , & val , val ^ i ));
248
248
249
249
return val ;
250
250
}
251
251
252
252
/**
253
- * __atomic_add_unless - add unless the number is already a given value
253
+ * __arch_atomic_add_unless - add unless the number is already a given value
254
254
* @v: pointer of type atomic_t
255
255
* @a: the amount to add to v...
256
256
* @u: ...unless v is equal to u.
257
257
*
258
258
* Atomically adds @a to @v, so long as @v was not already @u.
259
259
* Returns the old value of @v.
260
260
*/
261
- static __always_inline int __atomic_add_unless (atomic_t * v , int a , int u )
261
+ static __always_inline int __arch_atomic_add_unless (atomic_t * v , int a , int u )
262
262
{
263
- int c = atomic_read (v );
263
+ int c = arch_atomic_read (v );
264
264
265
265
do {
266
266
if (unlikely (c == u ))
267
267
break ;
268
- } while (!atomic_try_cmpxchg (v , & c , c + a ));
268
+ } while (!arch_atomic_try_cmpxchg (v , & c , c + a ));
269
269
270
270
return c ;
271
271
}
@@ -276,4 +276,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
276
276
# include <asm/atomic64_64.h>
277
277
#endif
278
278
279
+ #include <asm-generic/atomic-instrumented.h>
280
+
279
281
#endif /* _ASM_X86_ATOMIC_H */
0 commit comments