12
12
* Atomic exchange.
13
13
* Since it can be used to implement critical sections
14
14
* it must clobber "memory" (also for interrupts in UP).
15
+ *
16
+ * The leading and the trailing memory barriers guarantee that these
17
+ * operations are fully ordered.
18
+ *
15
19
*/
16
20
17
21
static inline unsigned long
18
22
____xchg (_u8 , volatile char * m , unsigned long val )
19
23
{
20
24
unsigned long ret , tmp , addr64 ;
21
25
26
+ smp_mb ();
22
27
__asm__ __volatile__(
23
28
" andnot %4,7,%3\n"
24
29
" insbl %1,%4,%1\n"
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
43
48
{
44
49
unsigned long ret , tmp , addr64 ;
45
50
51
+ smp_mb ();
46
52
__asm__ __volatile__(
47
53
" andnot %4,7,%3\n"
48
54
" inswl %1,%4,%1\n"
@@ -67,6 +73,7 @@ ____xchg(_u32, volatile int *m, unsigned long val)
67
73
{
68
74
unsigned long dummy ;
69
75
76
+ smp_mb ();
70
77
__asm__ __volatile__(
71
78
"1: ldl_l %0,%4\n"
72
79
" bis $31,%3,%1\n"
@@ -87,6 +94,7 @@ ____xchg(_u64, volatile long *m, unsigned long val)
87
94
{
88
95
unsigned long dummy ;
89
96
97
+ smp_mb ();
90
98
__asm__ __volatile__(
91
99
"1: ldq_l %0,%4\n"
92
100
" bis $31,%3,%1\n"
@@ -128,16 +136,20 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
128
136
* store NEW in MEM. Return the initial value in MEM. Success is
129
137
* indicated by comparing RETURN with OLD.
130
138
*
131
- * The memory barrier is placed in SMP unconditionally, in order to
132
- * guarantee that dependency ordering is preserved when a dependency
133
- * is headed by an unsuccessful operation.
139
+ * The leading and the trailing memory barriers guarantee that these
140
+ * operations are fully ordered.
141
+ *
142
+ * The trailing memory barrier is placed in SMP unconditionally, in
143
+ * order to guarantee that dependency ordering is preserved when a
144
+ * dependency is headed by an unsuccessful operation.
134
145
*/
135
146
136
147
static inline unsigned long
137
148
____cmpxchg (_u8 , volatile char * m , unsigned char old , unsigned char new )
138
149
{
139
150
unsigned long prev , tmp , cmp , addr64 ;
140
151
152
+ smp_mb ();
141
153
__asm__ __volatile__(
142
154
" andnot %5,7,%4\n"
143
155
" insbl %1,%5,%1\n"
@@ -165,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
165
177
{
166
178
unsigned long prev , tmp , cmp , addr64 ;
167
179
180
+ smp_mb ();
168
181
__asm__ __volatile__(
169
182
" andnot %5,7,%4\n"
170
183
" inswl %1,%5,%1\n"
@@ -192,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
192
205
{
193
206
unsigned long prev , cmp ;
194
207
208
+ smp_mb ();
195
209
__asm__ __volatile__(
196
210
"1: ldl_l %0,%5\n"
197
211
" cmpeq %0,%3,%1\n"
@@ -215,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
215
229
{
216
230
unsigned long prev , cmp ;
217
231
232
+ smp_mb ();
218
233
__asm__ __volatile__(
219
234
"1: ldq_l %0,%5\n"
220
235
" cmpeq %0,%3,%1\n"
0 commit comments