Skip to content

Commit 472e8c5

Browse files
andrea-parriIngo Molnar
authored andcommitted
locking/xchg/alpha: Fix xchg() and cmpxchg() memory ordering bugs
Successful RMW operations are supposed to be fully ordered, but Alpha's xchg() and cmpxchg() do not meet this requirement. Will Deacon noticed the bug: > So MP using xchg: > > WRITE_ONCE(x, 1) > xchg(y, 1) > > smp_load_acquire(y) == 1 > READ_ONCE(x) == 0 > > would be allowed. ... which thus violates the above requirement. Fix it by adding a leading smp_mb() to the xchg() and cmpxchg() implementations. Reported-by: Will Deacon <[email protected]> Signed-off-by: Andrea Parri <[email protected]> Acked-by: Paul E. McKenney <[email protected]> Cc: Alan Stern <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Ivan Kokshaysky <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Matt Turner <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Richard Henderson <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 79d4424 commit 472e8c5

File tree

1 file changed

+18
-3
lines changed

1 file changed

+18
-3
lines changed

arch/alpha/include/asm/xchg.h

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,18 @@
1212
* Atomic exchange.
1313
* Since it can be used to implement critical sections
1414
* it must clobber "memory" (also for interrupts in UP).
15+
*
16+
* The leading and the trailing memory barriers guarantee that these
17+
* operations are fully ordered.
18+
*
1519
*/
1620

1721
static inline unsigned long
1822
____xchg(_u8, volatile char *m, unsigned long val)
1923
{
2024
unsigned long ret, tmp, addr64;
2125

26+
smp_mb();
2227
__asm__ __volatile__(
2328
" andnot %4,7,%3\n"
2429
" insbl %1,%4,%1\n"
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
4348
{
4449
unsigned long ret, tmp, addr64;
4550

51+
smp_mb();
4652
__asm__ __volatile__(
4753
" andnot %4,7,%3\n"
4854
" inswl %1,%4,%1\n"
@@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
6773
{
6874
unsigned long dummy;
6975

76+
smp_mb();
7077
__asm__ __volatile__(
7178
"1: ldl_l %0,%4\n"
7279
" bis $31,%3,%1\n"
@@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
8794
{
8895
unsigned long dummy;
8996

97+
smp_mb();
9098
__asm__ __volatile__(
9199
"1: ldq_l %0,%4\n"
92100
" bis $31,%3,%1\n"
@@ -128,16 +136,20 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
128136
* store NEW in MEM. Return the initial value in MEM. Success is
129137
* indicated by comparing RETURN with OLD.
130138
*
131-
* The memory barrier is placed in SMP unconditionally, in order to
132-
* guarantee that dependency ordering is preserved when a dependency
133-
* is headed by an unsuccessful operation.
139+
* The leading and the trailing memory barriers guarantee that these
140+
* operations are fully ordered.
141+
*
142+
* The trailing memory barrier is placed in SMP unconditionally, in
143+
* order to guarantee that dependency ordering is preserved when a
144+
* dependency is headed by an unsuccessful operation.
134145
*/
135146

136147
static inline unsigned long
137148
____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
138149
{
139150
unsigned long prev, tmp, cmp, addr64;
140151

152+
smp_mb();
141153
__asm__ __volatile__(
142154
" andnot %5,7,%4\n"
143155
" insbl %1,%5,%1\n"
@@ -165,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
165177
{
166178
unsigned long prev, tmp, cmp, addr64;
167179

180+
smp_mb();
168181
__asm__ __volatile__(
169182
" andnot %5,7,%4\n"
170183
" inswl %1,%5,%1\n"
@@ -192,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
192205
{
193206
unsigned long prev, cmp;
194207

208+
smp_mb();
195209
__asm__ __volatile__(
196210
"1: ldl_l %0,%5\n"
197211
" cmpeq %0,%3,%1\n"
@@ -215,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
215229
{
216230
unsigned long prev, cmp;
217231

232+
smp_mb();
218233
__asm__ __volatile__(
219234
"1: ldq_l %0,%5\n"
220235
" cmpeq %0,%3,%1\n"

0 commit comments

Comments
 (0)