|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | +/* |
| 3 | + * Atomic operations. |
| 4 | + * |
| 5 | + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited |
| 6 | + */ |
| 7 | +#ifndef _ASM_ATOMIC_H |
| 8 | +#define _ASM_ATOMIC_H |
| 9 | + |
| 10 | +#include <linux/types.h> |
| 11 | +#include <asm/barrier.h> |
| 12 | +#include <asm/cmpxchg.h> |
| 13 | +#include <asm/compiler.h> |
| 14 | + |
| 15 | +#if __SIZEOF_LONG__ == 4 |
| 16 | +#define __LL "ll.w " |
| 17 | +#define __SC "sc.w " |
| 18 | +#define __AMADD "amadd.w " |
| 19 | +#define __AMAND_DB "amand_db.w " |
| 20 | +#define __AMOR_DB "amor_db.w " |
| 21 | +#define __AMXOR_DB "amxor_db.w " |
| 22 | +#elif __SIZEOF_LONG__ == 8 |
| 23 | +#define __LL "ll.d " |
| 24 | +#define __SC "sc.d " |
| 25 | +#define __AMADD "amadd.d " |
| 26 | +#define __AMAND_DB "amand_db.d " |
| 27 | +#define __AMOR_DB "amor_db.d " |
| 28 | +#define __AMXOR_DB "amxor_db.d " |
| 29 | +#endif |
| 30 | + |
| 31 | +#define ATOMIC_INIT(i) { (i) } |
| 32 | + |
| 33 | +/* |
| 34 | + * arch_atomic_read - read atomic variable |
| 35 | + * @v: pointer of type atomic_t |
| 36 | + * |
| 37 | + * Atomically reads the value of @v. |
| 38 | + */ |
| 39 | +#define arch_atomic_read(v) READ_ONCE((v)->counter) |
| 40 | + |
| 41 | +/* |
| 42 | + * arch_atomic_set - set atomic variable |
| 43 | + * @v: pointer of type atomic_t |
| 44 | + * @i: required value |
| 45 | + * |
| 46 | + * Atomically sets the value of @v to @i. |
| 47 | + */ |
| 48 | +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) |
| 49 | + |
| 50 | +#define ATOMIC_OP(op, I, asm_op) \ |
| 51 | +static inline void arch_atomic_##op(int i, atomic_t *v) \ |
| 52 | +{ \ |
| 53 | + __asm__ __volatile__( \ |
| 54 | + "am"#asm_op"_db.w" " $zero, %1, %0 \n" \ |
| 55 | + : "+ZB" (v->counter) \ |
| 56 | + : "r" (I) \ |
| 57 | + : "memory"); \ |
| 58 | +} |
| 59 | + |
| 60 | +#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ |
| 61 | +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ |
| 62 | +{ \ |
| 63 | + int result; \ |
| 64 | + \ |
| 65 | + __asm__ __volatile__( \ |
| 66 | + "am"#asm_op"_db.w" " %1, %2, %0 \n" \ |
| 67 | + : "+ZB" (v->counter), "=&r" (result) \ |
| 68 | + : "r" (I) \ |
| 69 | + : "memory"); \ |
| 70 | + \ |
| 71 | + return result c_op I; \ |
| 72 | +} |
| 73 | + |
| 74 | +#define ATOMIC_FETCH_OP(op, I, asm_op) \ |
| 75 | +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ |
| 76 | +{ \ |
| 77 | + int result; \ |
| 78 | + \ |
| 79 | + __asm__ __volatile__( \ |
| 80 | + "am"#asm_op"_db.w" " %1, %2, %0 \n" \ |
| 81 | + : "+ZB" (v->counter), "=&r" (result) \ |
| 82 | + : "r" (I) \ |
| 83 | + : "memory"); \ |
| 84 | + \ |
| 85 | + return result; \ |
| 86 | +} |
| 87 | + |
| 88 | +#define ATOMIC_OPS(op, I, asm_op, c_op) \ |
| 89 | + ATOMIC_OP(op, I, asm_op) \ |
| 90 | + ATOMIC_OP_RETURN(op, I, asm_op, c_op) \ |
| 91 | + ATOMIC_FETCH_OP(op, I, asm_op) |
| 92 | + |
| 93 | +ATOMIC_OPS(add, i, add, +) |
| 94 | +ATOMIC_OPS(sub, -i, add, +) |
| 95 | + |
| 96 | +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed |
| 97 | +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed |
| 98 | +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed |
| 99 | +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed |
| 100 | + |
| 101 | +#undef ATOMIC_OPS |
| 102 | + |
| 103 | +#define ATOMIC_OPS(op, I, asm_op) \ |
| 104 | + ATOMIC_OP(op, I, asm_op) \ |
| 105 | + ATOMIC_FETCH_OP(op, I, asm_op) |
| 106 | + |
| 107 | +ATOMIC_OPS(and, i, and) |
| 108 | +ATOMIC_OPS(or, i, or) |
| 109 | +ATOMIC_OPS(xor, i, xor) |
| 110 | + |
| 111 | +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed |
| 112 | +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed |
| 113 | +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed |
| 114 | + |
| 115 | +#undef ATOMIC_OPS |
| 116 | +#undef ATOMIC_FETCH_OP |
| 117 | +#undef ATOMIC_OP_RETURN |
| 118 | +#undef ATOMIC_OP |
| 119 | + |
| 120 | +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) |
| 121 | +{ |
| 122 | + int prev, rc; |
| 123 | + |
| 124 | + __asm__ __volatile__ ( |
| 125 | + "0: ll.w %[p], %[c]\n" |
| 126 | + " beq %[p], %[u], 1f\n" |
| 127 | + " add.w %[rc], %[p], %[a]\n" |
| 128 | + " sc.w %[rc], %[c]\n" |
| 129 | + " beqz %[rc], 0b\n" |
| 130 | + " b 2f\n" |
| 131 | + "1:\n" |
| 132 | + __WEAK_LLSC_MB |
| 133 | + "2:\n" |
| 134 | + : [p]"=&r" (prev), [rc]"=&r" (rc), |
| 135 | + [c]"=ZB" (v->counter) |
| 136 | + : [a]"r" (a), [u]"r" (u) |
| 137 | + : "memory"); |
| 138 | + |
| 139 | + return prev; |
| 140 | +} |
| 141 | +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless |
| 142 | + |
| 143 | +/* |
| 144 | + * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable |
| 145 | + * @i: integer value to subtract |
| 146 | + * @v: pointer of type atomic_t |
| 147 | + * |
| 148 | + * Atomically test @v and subtract @i if @v is greater or equal than @i. |
| 149 | + * The function returns the old value of @v minus @i. |
| 150 | + */ |
| 151 | +static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) |
| 152 | +{ |
| 153 | + int result; |
| 154 | + int temp; |
| 155 | + |
| 156 | + if (__builtin_constant_p(i)) { |
| 157 | + __asm__ __volatile__( |
| 158 | + "1: ll.w %1, %2 # atomic_sub_if_positive\n" |
| 159 | + " addi.w %0, %1, %3 \n" |
| 160 | + " or %1, %0, $zero \n" |
| 161 | + " blt %0, $zero, 2f \n" |
| 162 | + " sc.w %1, %2 \n" |
| 163 | + " beq $zero, %1, 1b \n" |
| 164 | + "2: \n" |
| 165 | + : "=&r" (result), "=&r" (temp), |
| 166 | + "+" GCC_OFF_SMALL_ASM() (v->counter) |
| 167 | + : "I" (-i)); |
| 168 | + } else { |
| 169 | + __asm__ __volatile__( |
| 170 | + "1: ll.w %1, %2 # atomic_sub_if_positive\n" |
| 171 | + " sub.w %0, %1, %3 \n" |
| 172 | + " or %1, %0, $zero \n" |
| 173 | + " blt %0, $zero, 2f \n" |
| 174 | + " sc.w %1, %2 \n" |
| 175 | + " beq $zero, %1, 1b \n" |
| 176 | + "2: \n" |
| 177 | + : "=&r" (result), "=&r" (temp), |
| 178 | + "+" GCC_OFF_SMALL_ASM() (v->counter) |
| 179 | + : "r" (i)); |
| 180 | + } |
| 181 | + |
| 182 | + return result; |
| 183 | +} |
| 184 | + |
| 185 | +#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) |
| 186 | +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new))) |
| 187 | + |
| 188 | +/* |
| 189 | + * arch_atomic_dec_if_positive - decrement by 1 if old value positive |
| 190 | + * @v: pointer of type atomic_t |
| 191 | + */ |
| 192 | +#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v) |
| 193 | + |
| 194 | +#ifdef CONFIG_64BIT |
| 195 | + |
| 196 | +#define ATOMIC64_INIT(i) { (i) } |
| 197 | + |
| 198 | +/* |
| 199 | + * arch_atomic64_read - read atomic variable |
| 200 | + * @v: pointer of type atomic64_t |
| 201 | + * |
| 202 | + */ |
| 203 | +#define arch_atomic64_read(v) READ_ONCE((v)->counter) |
| 204 | + |
| 205 | +/* |
| 206 | + * arch_atomic64_set - set atomic variable |
| 207 | + * @v: pointer of type atomic64_t |
| 208 | + * @i: required value |
| 209 | + */ |
| 210 | +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) |
| 211 | + |
| 212 | +#define ATOMIC64_OP(op, I, asm_op) \ |
| 213 | +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ |
| 214 | +{ \ |
| 215 | + __asm__ __volatile__( \ |
| 216 | + "am"#asm_op"_db.d " " $zero, %1, %0 \n" \ |
| 217 | + : "+ZB" (v->counter) \ |
| 218 | + : "r" (I) \ |
| 219 | + : "memory"); \ |
| 220 | +} |
| 221 | + |
| 222 | +#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ |
| 223 | +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \ |
| 224 | +{ \ |
| 225 | + long result; \ |
| 226 | + __asm__ __volatile__( \ |
| 227 | + "am"#asm_op"_db.d " " %1, %2, %0 \n" \ |
| 228 | + : "+ZB" (v->counter), "=&r" (result) \ |
| 229 | + : "r" (I) \ |
| 230 | + : "memory"); \ |
| 231 | + \ |
| 232 | + return result c_op I; \ |
| 233 | +} |
| 234 | + |
| 235 | +#define ATOMIC64_FETCH_OP(op, I, asm_op) \ |
| 236 | +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ |
| 237 | +{ \ |
| 238 | + long result; \ |
| 239 | + \ |
| 240 | + __asm__ __volatile__( \ |
| 241 | + "am"#asm_op"_db.d " " %1, %2, %0 \n" \ |
| 242 | + : "+ZB" (v->counter), "=&r" (result) \ |
| 243 | + : "r" (I) \ |
| 244 | + : "memory"); \ |
| 245 | + \ |
| 246 | + return result; \ |
| 247 | +} |
| 248 | + |
| 249 | +#define ATOMIC64_OPS(op, I, asm_op, c_op) \ |
| 250 | + ATOMIC64_OP(op, I, asm_op) \ |
| 251 | + ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \ |
| 252 | + ATOMIC64_FETCH_OP(op, I, asm_op) |
| 253 | + |
| 254 | +ATOMIC64_OPS(add, i, add, +) |
| 255 | +ATOMIC64_OPS(sub, -i, add, +) |
| 256 | + |
| 257 | +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed |
| 258 | +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed |
| 259 | +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed |
| 260 | +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed |
| 261 | + |
| 262 | +#undef ATOMIC64_OPS |
| 263 | + |
| 264 | +#define ATOMIC64_OPS(op, I, asm_op) \ |
| 265 | + ATOMIC64_OP(op, I, asm_op) \ |
| 266 | + ATOMIC64_FETCH_OP(op, I, asm_op) |
| 267 | + |
| 268 | +ATOMIC64_OPS(and, i, and) |
| 269 | +ATOMIC64_OPS(or, i, or) |
| 270 | +ATOMIC64_OPS(xor, i, xor) |
| 271 | + |
| 272 | +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed |
| 273 | +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed |
| 274 | +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed |
| 275 | + |
| 276 | +#undef ATOMIC64_OPS |
| 277 | +#undef ATOMIC64_FETCH_OP |
| 278 | +#undef ATOMIC64_OP_RETURN |
| 279 | +#undef ATOMIC64_OP |
| 280 | + |
| 281 | +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) |
| 282 | +{ |
| 283 | + long prev, rc; |
| 284 | + |
| 285 | + __asm__ __volatile__ ( |
| 286 | + "0: ll.d %[p], %[c]\n" |
| 287 | + " beq %[p], %[u], 1f\n" |
| 288 | + " add.d %[rc], %[p], %[a]\n" |
| 289 | + " sc.d %[rc], %[c]\n" |
| 290 | + " beqz %[rc], 0b\n" |
| 291 | + " b 2f\n" |
| 292 | + "1:\n" |
| 293 | + __WEAK_LLSC_MB |
| 294 | + "2:\n" |
| 295 | + : [p]"=&r" (prev), [rc]"=&r" (rc), |
| 296 | + [c] "=ZB" (v->counter) |
| 297 | + : [a]"r" (a), [u]"r" (u) |
| 298 | + : "memory"); |
| 299 | + |
| 300 | + return prev; |
| 301 | +} |
| 302 | +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless |
| 303 | + |
| 304 | +/* |
| 305 | + * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable |
| 306 | + * @i: integer value to subtract |
| 307 | + * @v: pointer of type atomic64_t |
| 308 | + * |
| 309 | + * Atomically test @v and subtract @i if @v is greater or equal than @i. |
| 310 | + * The function returns the old value of @v minus @i. |
| 311 | + */ |
| 312 | +static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) |
| 313 | +{ |
| 314 | + long result; |
| 315 | + long temp; |
| 316 | + |
| 317 | + if (__builtin_constant_p(i)) { |
| 318 | + __asm__ __volatile__( |
| 319 | + "1: ll.d %1, %2 # atomic64_sub_if_positive \n" |
| 320 | + " addi.d %0, %1, %3 \n" |
| 321 | + " or %1, %0, $zero \n" |
| 322 | + " blt %0, $zero, 2f \n" |
| 323 | + " sc.d %1, %2 \n" |
| 324 | + " beq %1, $zero, 1b \n" |
| 325 | + "2: \n" |
| 326 | + : "=&r" (result), "=&r" (temp), |
| 327 | + "+" GCC_OFF_SMALL_ASM() (v->counter) |
| 328 | + : "I" (-i)); |
| 329 | + } else { |
| 330 | + __asm__ __volatile__( |
| 331 | + "1: ll.d %1, %2 # atomic64_sub_if_positive \n" |
| 332 | + " sub.d %0, %1, %3 \n" |
| 333 | + " or %1, %0, $zero \n" |
| 334 | + " blt %0, $zero, 2f \n" |
| 335 | + " sc.d %1, %2 \n" |
| 336 | + " beq %1, $zero, 1b \n" |
| 337 | + "2: \n" |
| 338 | + : "=&r" (result), "=&r" (temp), |
| 339 | + "+" GCC_OFF_SMALL_ASM() (v->counter) |
| 340 | + : "r" (i)); |
| 341 | + } |
| 342 | + |
| 343 | + return result; |
| 344 | +} |
| 345 | + |
| 346 | +#define arch_atomic64_cmpxchg(v, o, n) \ |
| 347 | + ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) |
| 348 | +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new))) |
| 349 | + |
| 350 | +/* |
| 351 | + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive |
| 352 | + * @v: pointer of type atomic64_t |
| 353 | + */ |
| 354 | +#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v) |
| 355 | + |
| 356 | +#endif /* CONFIG_64BIT */ |
| 357 | + |
| 358 | +#endif /* _ASM_ATOMIC_H */ |
0 commit comments