Skip to content

Commit 8bf705d

Browse files
dvyukovIngo Molnar
authored andcommitted
locking/atomic/x86: Switch atomic.h to use atomic-instrumented.h
Add arch_ prefix to all atomic operations and include <asm-generic/atomic-instrumented.h>. This will allow to add KASAN instrumentation to all atomic ops. Signed-off-by: Dmitry Vyukov <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Will Deacon <[email protected]> Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/54f0eb64260b84199e538652e079a89b5423ad41.1517246437.git.dvyukov@google.com Signed-off-by: Ingo Molnar <[email protected]>
1 parent b06ed71 commit 8bf705d

File tree

6 files changed

+172
-168
lines changed

6 files changed

+172
-168
lines changed

arch/x86/include/asm/atomic.h

Lines changed: 52 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -17,255 +17,255 @@
1717
#define ATOMIC_INIT(i) { (i) }
1818

1919
/**
20-
* atomic_read - read atomic variable
20+
* arch_atomic_read - read atomic variable
2121
* @v: pointer of type atomic_t
2222
*
2323
* Atomically reads the value of @v.
2424
*/
25-
static __always_inline int atomic_read(const atomic_t *v)
25+
static __always_inline int arch_atomic_read(const atomic_t *v)
2626
{
2727
return READ_ONCE((v)->counter);
2828
}
2929

3030
/**
31-
* atomic_set - set atomic variable
31+
* arch_atomic_set - set atomic variable
3232
* @v: pointer of type atomic_t
3333
* @i: required value
3434
*
3535
* Atomically sets the value of @v to @i.
3636
*/
37-
static __always_inline void atomic_set(atomic_t *v, int i)
37+
static __always_inline void arch_atomic_set(atomic_t *v, int i)
3838
{
3939
WRITE_ONCE(v->counter, i);
4040
}
4141

4242
/**
43-
* atomic_add - add integer to atomic variable
43+
* arch_atomic_add - add integer to atomic variable
4444
* @i: integer value to add
4545
* @v: pointer of type atomic_t
4646
*
4747
* Atomically adds @i to @v.
4848
*/
49-
static __always_inline void atomic_add(int i, atomic_t *v)
49+
static __always_inline void arch_atomic_add(int i, atomic_t *v)
5050
{
5151
asm volatile(LOCK_PREFIX "addl %1,%0"
5252
: "+m" (v->counter)
5353
: "ir" (i));
5454
}
5555

5656
/**
57-
* atomic_sub - subtract integer from atomic variable
57+
* arch_atomic_sub - subtract integer from atomic variable
5858
* @i: integer value to subtract
5959
* @v: pointer of type atomic_t
6060
*
6161
* Atomically subtracts @i from @v.
6262
*/
63-
static __always_inline void atomic_sub(int i, atomic_t *v)
63+
static __always_inline void arch_atomic_sub(int i, atomic_t *v)
6464
{
6565
asm volatile(LOCK_PREFIX "subl %1,%0"
6666
: "+m" (v->counter)
6767
: "ir" (i));
6868
}
6969

7070
/**
71-
* atomic_sub_and_test - subtract value from variable and test result
71+
* arch_atomic_sub_and_test - subtract value from variable and test result
7272
* @i: integer value to subtract
7373
* @v: pointer of type atomic_t
7474
*
7575
* Atomically subtracts @i from @v and returns
7676
* true if the result is zero, or false for all
7777
* other cases.
7878
*/
79-
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
79+
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
8080
{
8181
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
8282
}
8383

8484
/**
85-
* atomic_inc - increment atomic variable
85+
* arch_atomic_inc - increment atomic variable
8686
* @v: pointer of type atomic_t
8787
*
8888
* Atomically increments @v by 1.
8989
*/
90-
static __always_inline void atomic_inc(atomic_t *v)
90+
static __always_inline void arch_atomic_inc(atomic_t *v)
9191
{
9292
asm volatile(LOCK_PREFIX "incl %0"
9393
: "+m" (v->counter));
9494
}
9595

9696
/**
97-
* atomic_dec - decrement atomic variable
97+
* arch_atomic_dec - decrement atomic variable
9898
* @v: pointer of type atomic_t
9999
*
100100
* Atomically decrements @v by 1.
101101
*/
102-
static __always_inline void atomic_dec(atomic_t *v)
102+
static __always_inline void arch_atomic_dec(atomic_t *v)
103103
{
104104
asm volatile(LOCK_PREFIX "decl %0"
105105
: "+m" (v->counter));
106106
}
107107

108108
/**
109-
* atomic_dec_and_test - decrement and test
109+
* arch_atomic_dec_and_test - decrement and test
110110
* @v: pointer of type atomic_t
111111
*
112112
* Atomically decrements @v by 1 and
113113
* returns true if the result is 0, or false for all other
114114
* cases.
115115
*/
116-
static __always_inline bool atomic_dec_and_test(atomic_t *v)
116+
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
117117
{
118118
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
119119
}
120120

121121
/**
122-
* atomic_inc_and_test - increment and test
122+
* arch_atomic_inc_and_test - increment and test
123123
* @v: pointer of type atomic_t
124124
*
125125
* Atomically increments @v by 1
126126
* and returns true if the result is zero, or false for all
127127
* other cases.
128128
*/
129-
static __always_inline bool atomic_inc_and_test(atomic_t *v)
129+
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
130130
{
131131
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
132132
}
133133

134134
/**
135-
* atomic_add_negative - add and test if negative
135+
* arch_atomic_add_negative - add and test if negative
136136
* @i: integer value to add
137137
* @v: pointer of type atomic_t
138138
*
139139
* Atomically adds @i to @v and returns true
140140
* if the result is negative, or false when
141141
* result is greater than or equal to zero.
142142
*/
143-
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
143+
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
144144
{
145145
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
146146
}
147147

148148
/**
149-
* atomic_add_return - add integer and return
149+
* arch_atomic_add_return - add integer and return
150150
* @i: integer value to add
151151
* @v: pointer of type atomic_t
152152
*
153153
* Atomically adds @i to @v and returns @i + @v
154154
*/
155-
static __always_inline int atomic_add_return(int i, atomic_t *v)
155+
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
156156
{
157157
return i + xadd(&v->counter, i);
158158
}
159159

160160
/**
161-
* atomic_sub_return - subtract integer and return
161+
* arch_atomic_sub_return - subtract integer and return
162162
* @v: pointer of type atomic_t
163163
* @i: integer value to subtract
164164
*
165165
* Atomically subtracts @i from @v and returns @v - @i
166166
*/
167-
static __always_inline int atomic_sub_return(int i, atomic_t *v)
167+
static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
168168
{
169-
return atomic_add_return(-i, v);
169+
return arch_atomic_add_return(-i, v);
170170
}
171171

172-
#define atomic_inc_return(v) (atomic_add_return(1, v))
173-
#define atomic_dec_return(v) (atomic_sub_return(1, v))
172+
#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v))
173+
#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v))
174174

175-
static __always_inline int atomic_fetch_add(int i, atomic_t *v)
175+
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
176176
{
177177
return xadd(&v->counter, i);
178178
}
179179

180-
static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
180+
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
181181
{
182182
return xadd(&v->counter, -i);
183183
}
184184

185-
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185+
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
186186
{
187-
return cmpxchg(&v->counter, old, new);
187+
return arch_cmpxchg(&v->counter, old, new);
188188
}
189189

190-
#define atomic_try_cmpxchg atomic_try_cmpxchg
191-
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
190+
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
191+
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
192192
{
193193
return try_cmpxchg(&v->counter, old, new);
194194
}
195195

196-
static inline int atomic_xchg(atomic_t *v, int new)
196+
static inline int arch_atomic_xchg(atomic_t *v, int new)
197197
{
198198
return xchg(&v->counter, new);
199199
}
200200

201-
static inline void atomic_and(int i, atomic_t *v)
201+
static inline void arch_atomic_and(int i, atomic_t *v)
202202
{
203203
asm volatile(LOCK_PREFIX "andl %1,%0"
204204
: "+m" (v->counter)
205205
: "ir" (i)
206206
: "memory");
207207
}
208208

209-
static inline int atomic_fetch_and(int i, atomic_t *v)
209+
static inline int arch_atomic_fetch_and(int i, atomic_t *v)
210210
{
211-
int val = atomic_read(v);
211+
int val = arch_atomic_read(v);
212212

213-
do { } while (!atomic_try_cmpxchg(v, &val, val & i));
213+
do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
214214

215215
return val;
216216
}
217217

218-
static inline void atomic_or(int i, atomic_t *v)
218+
static inline void arch_atomic_or(int i, atomic_t *v)
219219
{
220220
asm volatile(LOCK_PREFIX "orl %1,%0"
221221
: "+m" (v->counter)
222222
: "ir" (i)
223223
: "memory");
224224
}
225225

226-
static inline int atomic_fetch_or(int i, atomic_t *v)
226+
static inline int arch_atomic_fetch_or(int i, atomic_t *v)
227227
{
228-
int val = atomic_read(v);
228+
int val = arch_atomic_read(v);
229229

230-
do { } while (!atomic_try_cmpxchg(v, &val, val | i));
230+
do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
231231

232232
return val;
233233
}
234234

235-
static inline void atomic_xor(int i, atomic_t *v)
235+
static inline void arch_atomic_xor(int i, atomic_t *v)
236236
{
237237
asm volatile(LOCK_PREFIX "xorl %1,%0"
238238
: "+m" (v->counter)
239239
: "ir" (i)
240240
: "memory");
241241
}
242242

243-
static inline int atomic_fetch_xor(int i, atomic_t *v)
243+
static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
244244
{
245-
int val = atomic_read(v);
245+
int val = arch_atomic_read(v);
246246

247-
do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));
247+
do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
248248

249249
return val;
250250
}
251251

252252
/**
253-
* __atomic_add_unless - add unless the number is already a given value
253+
* __arch_atomic_add_unless - add unless the number is already a given value
254254
* @v: pointer of type atomic_t
255255
* @a: the amount to add to v...
256256
* @u: ...unless v is equal to u.
257257
*
258258
* Atomically adds @a to @v, so long as @v was not already @u.
259259
* Returns the old value of @v.
260260
*/
261-
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
261+
static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
262262
{
263-
int c = atomic_read(v);
263+
int c = arch_atomic_read(v);
264264

265265
do {
266266
if (unlikely(c == u))
267267
break;
268-
} while (!atomic_try_cmpxchg(v, &c, c + a));
268+
} while (!arch_atomic_try_cmpxchg(v, &c, c + a));
269269

270270
return c;
271271
}
@@ -276,4 +276,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
276276
# include <asm/atomic64_64.h>
277277
#endif
278278

279+
#include <asm-generic/atomic-instrumented.h>
280+
279281
#endif /* _ASM_X86_ATOMIC_H */

0 commit comments

Comments
 (0)