@@ -228,7 +228,7 @@ static __always_inline int variable_fls(unsigned int x)
228
228
*
229
229
* This operation may be reordered on other architectures than x86.
230
230
*/
231
- static inline int arch_test_and_set_bit (int nr , volatile unsigned long * addr )
231
+ static __always_inline int arch_test_and_set_bit (int nr , volatile unsigned long * addr )
232
232
{
233
233
return __test_and_op_bit (or , __NOP , nr , addr );
234
234
}
@@ -240,7 +240,7 @@ static inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
240
240
*
241
241
* This operation can be reordered on other architectures other than x86.
242
242
*/
243
- static inline int arch_test_and_clear_bit (int nr , volatile unsigned long * addr )
243
+ static __always_inline int arch_test_and_clear_bit (int nr , volatile unsigned long * addr )
244
244
{
245
245
return __test_and_op_bit (and , __NOT , nr , addr );
246
246
}
@@ -253,7 +253,7 @@ static inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
253
253
* This operation is atomic and cannot be reordered.
254
254
* It also implies a memory barrier.
255
255
*/
256
- static inline int arch_test_and_change_bit (int nr , volatile unsigned long * addr )
256
+ static __always_inline int arch_test_and_change_bit (int nr , volatile unsigned long * addr )
257
257
{
258
258
return __test_and_op_bit (xor , __NOP , nr , addr );
259
259
}
@@ -270,7 +270,7 @@ static inline int arch_test_and_change_bit(int nr, volatile unsigned long *addr)
270
270
* Note that @nr may be almost arbitrarily large; this function is not
271
271
* restricted to acting on a single-word quantity.
272
272
*/
273
- static inline void arch_set_bit (int nr , volatile unsigned long * addr )
273
+ static __always_inline void arch_set_bit (int nr , volatile unsigned long * addr )
274
274
{
275
275
__op_bit (or , __NOP , nr , addr );
276
276
}
@@ -284,7 +284,7 @@ static inline void arch_set_bit(int nr, volatile unsigned long *addr)
284
284
* on non x86 architectures, so if you are writing portable code,
285
285
* make sure not to rely on its reordering guarantees.
286
286
*/
287
- static inline void arch_clear_bit (int nr , volatile unsigned long * addr )
287
+ static __always_inline void arch_clear_bit (int nr , volatile unsigned long * addr )
288
288
{
289
289
__op_bit (and , __NOT , nr , addr );
290
290
}
@@ -298,7 +298,7 @@ static inline void arch_clear_bit(int nr, volatile unsigned long *addr)
298
298
* Note that @nr may be almost arbitrarily large; this function is not
299
299
* restricted to acting on a single-word quantity.
300
300
*/
301
- static inline void arch_change_bit (int nr , volatile unsigned long * addr )
301
+ static __always_inline void arch_change_bit (int nr , volatile unsigned long * addr )
302
302
{
303
303
__op_bit (xor , __NOP , nr , addr );
304
304
}
@@ -311,7 +311,7 @@ static inline void arch_change_bit(int nr, volatile unsigned long *addr)
311
311
* This operation is atomic and provides acquire barrier semantics.
312
312
* It can be used to implement bit locks.
313
313
*/
314
- static inline int arch_test_and_set_bit_lock (
314
+ static __always_inline int arch_test_and_set_bit_lock (
315
315
unsigned long nr , volatile unsigned long * addr )
316
316
{
317
317
return __test_and_op_bit_ord (or , __NOP , nr , addr , .aq );
@@ -324,7 +324,7 @@ static inline int arch_test_and_set_bit_lock(
324
324
*
325
325
* This operation is atomic and provides release barrier semantics.
326
326
*/
327
- static inline void arch_clear_bit_unlock (
327
+ static __always_inline void arch_clear_bit_unlock (
328
328
unsigned long nr , volatile unsigned long * addr )
329
329
{
330
330
__op_bit_ord (and , __NOT , nr , addr , .rl );
@@ -345,13 +345,13 @@ static inline void arch_clear_bit_unlock(
345
345
* non-atomic property here: it's a lot more instructions and we still have to
346
346
* provide release semantics anyway.
347
347
*/
348
- static inline void arch___clear_bit_unlock (
348
+ static __always_inline void arch___clear_bit_unlock (
349
349
unsigned long nr , volatile unsigned long * addr )
350
350
{
351
351
arch_clear_bit_unlock (nr , addr );
352
352
}
353
353
354
- static inline bool arch_xor_unlock_is_negative_byte (unsigned long mask ,
354
+ static __always_inline bool arch_xor_unlock_is_negative_byte (unsigned long mask ,
355
355
volatile unsigned long * addr )
356
356
{
357
357
unsigned long res ;
0 commit comments