Skip to content

Commit dae0af7

Browse files
committed
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "Fixes for ARM, the most notable being the fix from Nathan Lynch to fix the state of various registers during execve, to ensure that data can't be leaked between two executables. Fixes from Victor Kamensky for get_user() on big endian platforms, since the addition of 8-byte get_user() support broke these fairly badly. A fix from Sudeep Holla for affinity setting when hotplugging CPU 0. A fix from Stephen Boyd for a perf-induced sleep attempt while atomic. Lastly, a correctness fix for emulation of the SWP instruction on ARMv7+, and a fix for wrong carry handling when updating the translation table base address on LPAE platforms" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts ARM: 8148/1: flush TLS and thumbee register state during exec ARM: 8151/1: add missing exports for asm functions required by get_user macro ARM: 8137/1: fix get_user BE behavior for target variable with size of 8 bytes ARM: 8135/1: Fix in-correct barrier usage in SWP{B} emulation ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs ARM: 8132/1: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET
2 parents c1f03b4 + 505013b commit dae0af7

File tree

11 files changed

+154
-55
lines changed

11 files changed

+154
-55
lines changed

arch/arm/include/asm/tls.h

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
#ifndef __ASMARM_TLS_H
22
#define __ASMARM_TLS_H
33

4+
#include <linux/compiler.h>
5+
#include <asm/thread_info.h>
6+
47
#ifdef __ASSEMBLY__
58
#include <asm/asm-offsets.h>
69
.macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
@@ -50,6 +53,47 @@
5053
#endif
5154

5255
#ifndef __ASSEMBLY__
56+
57+
static inline void set_tls(unsigned long val)
58+
{
59+
struct thread_info *thread;
60+
61+
thread = current_thread_info();
62+
63+
thread->tp_value[0] = val;
64+
65+
/*
66+
* This code runs with preemption enabled and therefore must
67+
* be reentrant with respect to switch_tls.
68+
*
69+
* We need to ensure ordering between the shadow state and the
70+
* hardware state, so that we don't corrupt the hardware state
71+
* with a stale shadow state during context switch.
72+
*
73+
* If we're preempted here, switch_tls will load TPIDRURO from
74+
* thread_info upon resuming execution and the following mcr
75+
* is merely redundant.
76+
*/
77+
barrier();
78+
79+
if (!tls_emu) {
80+
if (has_tls_reg) {
81+
asm("mcr p15, 0, %0, c13, c0, 3"
82+
: : "r" (val));
83+
} else {
84+
/*
85+
* User space must never try to access this
86+
* directly. Expect your app to break
87+
* eventually if you do so. The user helper
88+
* at 0xffff0fe0 must be used instead. (see
89+
* entry-armv.S for details)
90+
*/
91+
*((unsigned int *)0xffff0ff0) = val;
92+
}
93+
94+
}
95+
}
96+
5397
static inline unsigned long get_tpuser(void)
5498
{
5599
unsigned long reg = 0;
@@ -59,5 +103,23 @@ static inline unsigned long get_tpuser(void)
59103

60104
return reg;
61105
}
106+
107+
static inline void set_tpuser(unsigned long val)
108+
{
109+
/* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
110+
* we need not update thread_info.
111+
*/
112+
if (has_tls_reg && !tls_emu) {
113+
asm("mcr p15, 0, %0, c13, c0, 2"
114+
: : "r" (val));
115+
}
116+
}
117+
118+
static inline void flush_tls(void)
119+
{
120+
set_tls(0);
121+
set_tpuser(0);
122+
}
123+
62124
#endif
63125
#endif /* __ASMARM_TLS_H */

arch/arm/include/asm/uaccess.h

Lines changed: 39 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,11 @@ static inline void set_fs(mm_segment_t fs)
107107
extern int __get_user_1(void *);
108108
extern int __get_user_2(void *);
109109
extern int __get_user_4(void *);
110-
extern int __get_user_lo8(void *);
110+
extern int __get_user_32t_8(void *);
111111
extern int __get_user_8(void *);
112+
extern int __get_user_64t_1(void *);
113+
extern int __get_user_64t_2(void *);
114+
extern int __get_user_64t_4(void *);
112115

113116
#define __GUP_CLOBBER_1 "lr", "cc"
114117
#ifdef CONFIG_CPU_USE_DOMAINS
@@ -117,7 +120,7 @@ extern int __get_user_8(void *);
117120
#define __GUP_CLOBBER_2 "lr", "cc"
118121
#endif
119122
#define __GUP_CLOBBER_4 "lr", "cc"
120-
#define __GUP_CLOBBER_lo8 "lr", "cc"
123+
#define __GUP_CLOBBER_32t_8 "lr", "cc"
121124
#define __GUP_CLOBBER_8 "lr", "cc"
122125

123126
#define __get_user_x(__r2,__p,__e,__l,__s) \
@@ -131,12 +134,30 @@ extern int __get_user_8(void *);
131134

132135
/* narrowing a double-word get into a single 32bit word register: */
133136
#ifdef __ARMEB__
134-
#define __get_user_xb(__r2, __p, __e, __l, __s) \
135-
__get_user_x(__r2, __p, __e, __l, lo8)
137+
#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
138+
__get_user_x(__r2, __p, __e, __l, 32t_8)
136139
#else
137-
#define __get_user_xb __get_user_x
140+
#define __get_user_x_32t __get_user_x
138141
#endif
139142

143+
/*
144+
* storing result into proper least significant word of 64bit target var,
145+
* different only for big endian case where 64 bit __r2 lsw is r3:
146+
*/
147+
#ifdef __ARMEB__
148+
#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
149+
__asm__ __volatile__ ( \
150+
__asmeq("%0", "r0") __asmeq("%1", "r2") \
151+
__asmeq("%3", "r1") \
152+
"bl __get_user_64t_" #__s \
153+
: "=&r" (__e), "=r" (__r2) \
154+
: "0" (__p), "r" (__l) \
155+
: __GUP_CLOBBER_##__s)
156+
#else
157+
#define __get_user_x_64t __get_user_x
158+
#endif
159+
160+
140161
#define __get_user_check(x,p) \
141162
({ \
142163
unsigned long __limit = current_thread_info()->addr_limit - 1; \
@@ -146,17 +167,26 @@ extern int __get_user_8(void *);
146167
register int __e asm("r0"); \
147168
switch (sizeof(*(__p))) { \
148169
case 1: \
149-
__get_user_x(__r2, __p, __e, __l, 1); \
170+
if (sizeof((x)) >= 8) \
171+
__get_user_x_64t(__r2, __p, __e, __l, 1); \
172+
else \
173+
__get_user_x(__r2, __p, __e, __l, 1); \
150174
break; \
151175
case 2: \
152-
__get_user_x(__r2, __p, __e, __l, 2); \
176+
if (sizeof((x)) >= 8) \
177+
__get_user_x_64t(__r2, __p, __e, __l, 2); \
178+
else \
179+
__get_user_x(__r2, __p, __e, __l, 2); \
153180
break; \
154181
case 4: \
155-
__get_user_x(__r2, __p, __e, __l, 4); \
182+
if (sizeof((x)) >= 8) \
183+
__get_user_x_64t(__r2, __p, __e, __l, 4); \
184+
else \
185+
__get_user_x(__r2, __p, __e, __l, 4); \
156186
break; \
157187
case 8: \
158188
if (sizeof((x)) < 8) \
159-
__get_user_xb(__r2, __p, __e, __l, 4); \
189+
__get_user_x_32t(__r2, __p, __e, __l, 4); \
160190
else \
161191
__get_user_x(__r2, __p, __e, __l, 8); \
162192
break; \

arch/arm/kernel/armksyms.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,14 @@ EXPORT_SYMBOL(__clear_user);
9898
EXPORT_SYMBOL(__get_user_1);
9999
EXPORT_SYMBOL(__get_user_2);
100100
EXPORT_SYMBOL(__get_user_4);
101+
EXPORT_SYMBOL(__get_user_8);
102+
103+
#ifdef __ARMEB__
104+
EXPORT_SYMBOL(__get_user_64t_1);
105+
EXPORT_SYMBOL(__get_user_64t_2);
106+
EXPORT_SYMBOL(__get_user_64t_4);
107+
EXPORT_SYMBOL(__get_user_32t_8);
108+
#endif
101109

102110
EXPORT_SYMBOL(__put_user_1);
103111
EXPORT_SYMBOL(__put_user_2);

arch/arm/kernel/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
175175
c = irq_data_get_irq_chip(d);
176176
if (!c->irq_set_affinity)
177177
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
178-
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
178+
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
179179
cpumask_copy(d->affinity, affinity);
180180

181181
return ret;

arch/arm/kernel/perf_event_cpu.c

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -76,21 +76,15 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
7676

7777
static void cpu_pmu_enable_percpu_irq(void *data)
7878
{
79-
struct arm_pmu *cpu_pmu = data;
80-
struct platform_device *pmu_device = cpu_pmu->plat_device;
81-
int irq = platform_get_irq(pmu_device, 0);
79+
int irq = *(int *)data;
8280

8381
enable_percpu_irq(irq, IRQ_TYPE_NONE);
84-
cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
8582
}
8683

8784
static void cpu_pmu_disable_percpu_irq(void *data)
8885
{
89-
struct arm_pmu *cpu_pmu = data;
90-
struct platform_device *pmu_device = cpu_pmu->plat_device;
91-
int irq = platform_get_irq(pmu_device, 0);
86+
int irq = *(int *)data;
9287

93-
cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
9488
disable_percpu_irq(irq);
9589
}
9690

@@ -103,7 +97,7 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
10397

10498
irq = platform_get_irq(pmu_device, 0);
10599
if (irq >= 0 && irq_is_percpu(irq)) {
106-
on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
100+
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
107101
free_percpu_irq(irq, &percpu_pmu);
108102
} else {
109103
for (i = 0; i < irqs; ++i) {
@@ -138,7 +132,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
138132
irq);
139133
return err;
140134
}
141-
on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
135+
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
142136
} else {
143137
for (i = 0; i < irqs; ++i) {
144138
err = 0;

arch/arm/kernel/process.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,8 @@ void flush_thread(void)
334334
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
335335
memset(&thread->fpstate, 0, sizeof(union fp_state));
336336

337+
flush_tls();
338+
337339
thread_notify(THREAD_NOTIFY_FLUSH, thread);
338340
}
339341

arch/arm/kernel/swp_emulate.c

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -142,14 +142,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
142142
while (1) {
143143
unsigned long temp;
144144

145-
/*
146-
* Barrier required between accessing protected resource and
147-
* releasing a lock for it. Legacy code might not have done
148-
* this, and we cannot determine that this is not the case
149-
* being emulated, so insert always.
150-
*/
151-
smp_mb();
152-
153145
if (type == TYPE_SWPB)
154146
__user_swpb_asm(*data, address, res, temp);
155147
else
@@ -162,13 +154,6 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
162154
}
163155

164156
if (res == 0) {
165-
/*
166-
* Barrier also required between acquiring a lock for a
167-
* protected resource and accessing the resource. Inserted for
168-
* same reason as above.
169-
*/
170-
smp_mb();
171-
172157
if (type == TYPE_SWPB)
173158
swpbcounter++;
174159
else

arch/arm/kernel/thumbee.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
4545

4646
switch (cmd) {
4747
case THREAD_NOTIFY_FLUSH:
48-
thread->thumbee_state = 0;
48+
teehbr_write(0);
4949
break;
5050
case THREAD_NOTIFY_SWITCH:
5151
current_thread_info()->thumbee_state = teehbr_read();

arch/arm/kernel/traps.c

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -581,7 +581,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
581581
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
582582
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
583583
{
584-
struct thread_info *thread = current_thread_info();
585584
siginfo_t info;
586585

587586
if ((no >> 16) != (__ARM_NR_BASE>> 16))
@@ -632,21 +631,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
632631
return regs->ARM_r0;
633632

634633
case NR(set_tls):
635-
thread->tp_value[0] = regs->ARM_r0;
636-
if (tls_emu)
637-
return 0;
638-
if (has_tls_reg) {
639-
asm ("mcr p15, 0, %0, c13, c0, 3"
640-
: : "r" (regs->ARM_r0));
641-
} else {
642-
/*
643-
* User space must never try to access this directly.
644-
* Expect your app to break eventually if you do so.
645-
* The user helper at 0xffff0fe0 must be used instead.
646-
* (see entry-armv.S for details)
647-
*/
648-
*((unsigned int *)0xffff0ff0) = regs->ARM_r0;
649-
}
634+
set_tls(regs->ARM_r0);
650635
return 0;
651636

652637
#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG

arch/arm/lib/getuser.S

Lines changed: 36 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ ENTRY(__get_user_8)
8080
ENDPROC(__get_user_8)
8181

8282
#ifdef __ARMEB__
83-
ENTRY(__get_user_lo8)
83+
ENTRY(__get_user_32t_8)
8484
check_uaccess r0, 8, r1, r2, __get_user_bad
8585
#ifdef CONFIG_CPU_USE_DOMAINS
8686
add r0, r0, #4
@@ -90,7 +90,37 @@ ENTRY(__get_user_lo8)
9090
#endif
9191
mov r0, #0
9292
ret lr
93-
ENDPROC(__get_user_lo8)
93+
ENDPROC(__get_user_32t_8)
94+
95+
ENTRY(__get_user_64t_1)
96+
check_uaccess r0, 1, r1, r2, __get_user_bad8
97+
8: TUSER(ldrb) r3, [r0]
98+
mov r0, #0
99+
ret lr
100+
ENDPROC(__get_user_64t_1)
101+
102+
ENTRY(__get_user_64t_2)
103+
check_uaccess r0, 2, r1, r2, __get_user_bad8
104+
#ifdef CONFIG_CPU_USE_DOMAINS
105+
rb .req ip
106+
9: ldrbt r3, [r0], #1
107+
10: ldrbt rb, [r0], #0
108+
#else
109+
rb .req r0
110+
9: ldrb r3, [r0]
111+
10: ldrb rb, [r0, #1]
112+
#endif
113+
orr r3, rb, r3, lsl #8
114+
mov r0, #0
115+
ret lr
116+
ENDPROC(__get_user_64t_2)
117+
118+
ENTRY(__get_user_64t_4)
119+
check_uaccess r0, 4, r1, r2, __get_user_bad8
120+
11: TUSER(ldr) r3, [r0]
121+
mov r0, #0
122+
ret lr
123+
ENDPROC(__get_user_64t_4)
94124
#endif
95125

96126
__get_user_bad8:
@@ -111,5 +141,9 @@ ENDPROC(__get_user_bad8)
111141
.long 6b, __get_user_bad8
112142
#ifdef __ARMEB__
113143
.long 7b, __get_user_bad
144+
.long 8b, __get_user_bad8
145+
.long 9b, __get_user_bad8
146+
.long 10b, __get_user_bad8
147+
.long 11b, __get_user_bad8
114148
#endif
115149
.popsection

arch/arm/mm/proc-v7-3level.S

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,6 @@ ENDPROC(cpu_v7_set_pte_ext)
146146
mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
147147
mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
148148
addls \ttbr1, \ttbr1, #TTBR1_OFFSET
149-
adcls \tmp, \tmp, #0
150149
mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
151150
mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
152151
mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits

0 commit comments

Comments
 (0)