|
15 | 15 | #ifndef _ASM_TILE_PERCPU_H
|
16 | 16 | #define _ASM_TILE_PERCPU_H
|
17 | 17 |
|
18 |
| -register unsigned long __my_cpu_offset __asm__("tp"); |
19 |
| -#define __my_cpu_offset __my_cpu_offset |
20 |
| -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) |
| 18 | +register unsigned long my_cpu_offset_reg asm("tp"); |
| 19 | + |
| 20 | +#ifdef CONFIG_PREEMPT |
| 21 | +/* |
| 22 | + * For full preemption, we can't just use the register variable |
| 23 | + * directly, since we need barrier() to hazard against it, causing the |
| 24 | + * compiler to reload anything computed from a previous "tp" value. |
| 25 | + * But we also don't want to use volatile asm, since we'd like the |
| 26 | + * compiler to be able to cache the value across multiple percpu reads. |
| 27 | + * So we use a fake stack read as a hazard against barrier(). |
| 28 | + * The 'U' constraint is like 'm' but disallows postincrement. |
| 29 | + */ |
| 30 | +static inline unsigned long __my_cpu_offset(void) |
| 31 | +{ |
| 32 | + unsigned long tp; |
| 33 | + register unsigned long *sp asm("sp"); |
| 34 | + asm("move %0, tp" : "=r" (tp) : "U" (*sp)); |
| 35 | + return tp; |
| 36 | +} |
| 37 | +#define __my_cpu_offset __my_cpu_offset() |
| 38 | +#else |
| 39 | +/* |
| 40 | + * We don't need to hazard against barrier() since "tp" doesn't ever |
| 41 | + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only |
| 42 | + * changes at function call points, at which we are already re-reading |
| 43 | + * the value of "tp" due to "my_cpu_offset_reg" being a global variable. |
| 44 | + */ |
| 45 | +#define __my_cpu_offset my_cpu_offset_reg |
| 46 | +#endif |
| 47 | + |
| 48 | +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) |
21 | 49 |
|
22 | 50 | #include <asm-generic/percpu.h>
|
23 | 51 |
|
|
0 commit comments