Skip to content

Commit 07171da

Browse files
committed
Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "The main item in this pull request are the Spectre variant 1.1 fixes from Julien Thierry. A few other patches to improve various areas, and removal of some obsolete mcount bits and a redundant kbuild conditional" * 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 8802/1: Call syscall_trace_exit even when system call skipped ARM: 8797/1: spectre-v1.1: harden __copy_to_user ARM: 8796/1: spectre-v1,v1.1: provide helpers for address sanitization ARM: 8795/1: spectre-v1.1: use put_user() for __put_user() ARM: 8794/1: uaccess: Prevent speculative use of the current addr_limit ARM: 8793/1: signal: replace __put_user_error with __put_user ARM: 8792/1: oabi-compat: copy oabi events using __copy_to_user() ARM: 8791/1: vfp: use __copy_to_user() when saving VFP state ARM: 8790/1: signal: always use __copy_to_user to save iwmmxt context ARM: 8789/1: signal: copy registers using __copy_to_user() ARM: 8801/1: makefile: use ARMv3M mode for RiscPC ARM: 8800/1: use choice for kernel unwinders ARM: 8798/1: remove unnecessary KBUILD_SRC ifeq conditional ARM: 8788/1: ftrace: remove old mcount support ARM: 8786/1: Debug kernel copy by printing
2 parents 034bda1 + 3e98d24 commit 07171da

File tree

18 files changed

+203
-225
lines changed

18 files changed

+203
-225
lines changed

arch/arm/Kconfig.debug

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -45,35 +45,42 @@ config DEBUG_WX
4545

4646
If in doubt, say "Y".
4747

48-
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
49-
# If you know what you are doing and are willing to live without stack
50-
# traces, you can get a slightly smaller kernel by setting this option to
51-
# n, but then RMK will have to kill you ;).
52-
config FRAME_POINTER
53-
bool
54-
depends on !THUMB2_KERNEL
55-
default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
48+
choice
49+
prompt "Choose kernel unwinder"
50+
default UNWINDER_ARM if AEABI && !FUNCTION_GRAPH_TRACER
51+
default UNWINDER_FRAME_POINTER if !AEABI || FUNCTION_GRAPH_TRACER
5652
help
57-
If you say N here, the resulting kernel will be slightly smaller and
58-
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
59-
when a problem occurs with the kernel, the information that is
60-
reported is severely limited.
53+
This determines which method will be used for unwinding kernel stack
54+
traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
55+
livepatch, lockdep, and more.
56+
57+
config UNWINDER_FRAME_POINTER
58+
bool "Frame pointer unwinder"
59+
depends on !THUMB2_KERNEL && !CC_IS_CLANG
60+
select ARCH_WANT_FRAME_POINTERS
61+
select FRAME_POINTER
62+
help
63+
This option enables the frame pointer unwinder for unwinding
64+
kernel stack traces.
6165

62-
config ARM_UNWIND
63-
bool "Enable stack unwinding support (EXPERIMENTAL)"
66+
config UNWINDER_ARM
67+
bool "ARM EABI stack unwinder"
6468
depends on AEABI
65-
default y
69+
select ARM_UNWIND
6670
help
6771
This option enables stack unwinding support in the kernel
6872
using the information automatically generated by the
6973
compiler. The resulting kernel image is slightly bigger but
7074
the performance is not affected. Currently, this feature
71-
only works with EABI compilers. If unsure say Y.
75+
only works with EABI compilers.
7276

73-
config OLD_MCOUNT
77+
endchoice
78+
79+
config ARM_UNWIND
80+
bool
81+
82+
config FRAME_POINTER
7483
bool
75-
depends on FUNCTION_TRACER && FRAME_POINTER
76-
default y
7784

7885
config DEBUG_USER
7986
bool "Verbose user fault messages"

arch/arm/Makefile

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ endif
7474
arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
7575
arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
7676
arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
77-
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3
77+
arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3m
7878

7979
# Evaluate arch cc-option calls now
8080
arch-y := $(arch-y)
@@ -264,13 +264,9 @@ platdirs := $(patsubst %,arch/arm/plat-%/,$(sort $(plat-y)))
264264

265265
ifneq ($(CONFIG_ARCH_MULTIPLATFORM),y)
266266
ifneq ($(CONFIG_ARM_SINGLE_ARMV7M),y)
267-
ifeq ($(KBUILD_SRC),)
268-
KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(machdirs) $(platdirs))
269-
else
270267
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs) $(platdirs))
271268
endif
272269
endif
273-
endif
274270

275271
export TEXT_OFFSET GZFLAGS MMUEXT
276272

arch/arm/boot/compressed/head.S

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,35 @@
114114
#endif
115115
.endm
116116

117+
/*
118+
* Debug kernel copy by printing the memory addresses involved
119+
*/
120+
.macro dbgkc, begin, end, cbegin, cend
121+
#ifdef DEBUG
122+
kputc #'\n'
123+
kputc #'C'
124+
kputc #':'
125+
kputc #'0'
126+
kputc #'x'
127+
kphex \begin, 8 /* Start of compressed kernel */
128+
kputc #'-'
129+
kputc #'0'
130+
kputc #'x'
131+
kphex \end, 8 /* End of compressed kernel */
132+
kputc #'-'
133+
kputc #'>'
134+
kputc #'0'
135+
kputc #'x'
136+
kphex \cbegin, 8 /* Start of kernel copy */
137+
kputc #'-'
138+
kputc #'0'
139+
kputc #'x'
140+
kphex \cend, 8 /* End of kernel copy */
141+
kputc #'\n'
142+
kputc #'\r'
143+
#endif
144+
.endm
145+
117146
.section ".start", #alloc, #execinstr
118147
/*
119148
* sort out different calling conventions
@@ -450,6 +479,20 @@ dtb_check_done:
450479
add r6, r9, r5
451480
add r9, r9, r10
452481

482+
#ifdef DEBUG
483+
sub r10, r6, r5
484+
sub r10, r9, r10
485+
/*
486+
* We are about to copy the kernel to a new memory area.
487+
* The boundaries of the new memory area can be found in
488+
* r10 and r9, whilst r5 and r6 contain the boundaries
489+
* of the memory we are going to copy.
490+
* Calling dbgkc will help with the printing of this
491+
* information.
492+
*/
493+
dbgkc r5, r6, r10, r9
494+
#endif
495+
453496
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
454497
cmp r6, r5
455498
stmdb r9!, {r0 - r3, r10 - r12, lr}

arch/arm/include/asm/assembler.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
467467
#endif
468468
.endm
469469

470+
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
471+
#ifdef CONFIG_CPU_SPECTRE
472+
sub \tmp, \limit, #1
473+
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
474+
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
475+
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
476+
movlo \addr, #0 @ if (tmp < 0) addr = NULL
477+
csdb
478+
#endif
479+
.endm
480+
470481
.macro uaccess_disable, tmp, isb=1
471482
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
472483
/*

arch/arm/include/asm/ftrace.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@ extern void __gnu_mcount_nc(void);
1616

1717
#ifdef CONFIG_DYNAMIC_FTRACE
1818
struct dyn_arch_ftrace {
19-
#ifdef CONFIG_OLD_MCOUNT
20-
bool old_mcount;
21-
#endif
2219
};
2320

2421
static inline unsigned long ftrace_call_adjust(unsigned long addr)

arch/arm/include/asm/thread_info.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
121121
struct user_vfp;
122122
struct user_vfp_exc;
123123

124-
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
125-
struct user_vfp_exc __user *);
124+
extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
125+
struct user_vfp_exc *);
126126
extern int vfp_restore_user_hwstate(struct user_vfp *,
127127
struct user_vfp_exc *);
128128
#endif

arch/arm/include/asm/uaccess.h

Lines changed: 43 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,14 @@ extern int __put_user_bad(void);
6969
static inline void set_fs(mm_segment_t fs)
7070
{
7171
current_thread_info()->addr_limit = fs;
72+
73+
/*
74+
* Prevent a mispredicted conditional call to set_fs from forwarding
75+
* the wrong address limit to access_ok under speculation.
76+
*/
77+
dsb(nsh);
78+
isb();
79+
7280
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
7381
}
7482

@@ -91,6 +99,32 @@ static inline void set_fs(mm_segment_t fs)
9199
#define __inttype(x) \
92100
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
93101

102+
/*
103+
* Sanitise a uaccess pointer such that it becomes NULL if addr+size
104+
* is above the current addr_limit.
105+
*/
106+
#define uaccess_mask_range_ptr(ptr, size) \
107+
((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
108+
static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
109+
size_t size)
110+
{
111+
void __user *safe_ptr = (void __user *)ptr;
112+
unsigned long tmp;
113+
114+
asm volatile(
115+
" sub %1, %3, #1\n"
116+
" subs %1, %1, %0\n"
117+
" addhs %1, %1, #1\n"
118+
" subhss %1, %1, %2\n"
119+
" movlo %0, #0\n"
120+
: "+r" (safe_ptr), "=&r" (tmp)
121+
: "r" (size), "r" (current_thread_info()->addr_limit)
122+
: "cc");
123+
124+
csdb();
125+
return safe_ptr;
126+
}
127+
94128
/*
95129
* Single-value transfer routines. They automatically use the right
96130
* size if we just have the right pointer type. Note that the functions
@@ -362,19 +396,21 @@ do { \
362396
__pu_err; \
363397
})
364398

399+
#ifdef CONFIG_CPU_SPECTRE
400+
/*
401+
* When mitigating Spectre variant 1.1, all accessors need to include
402+
* verification of the address space.
403+
*/
404+
#define __put_user(x, ptr) put_user(x, ptr)
405+
406+
#else
365407
#define __put_user(x, ptr) \
366408
({ \
367409
long __pu_err = 0; \
368410
__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
369411
__pu_err; \
370412
})
371413

372-
#define __put_user_error(x, ptr, err) \
373-
({ \
374-
__put_user_switch((x), (ptr), (err), __put_user_nocheck); \
375-
(void) 0; \
376-
})
377-
378414
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
379415
do { \
380416
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
@@ -454,6 +490,7 @@ do { \
454490
: "r" (x), "i" (-EFAULT) \
455491
: "cc")
456492

493+
#endif /* !CONFIG_CPU_SPECTRE */
457494

458495
#ifdef CONFIG_MMU
459496
extern unsigned long __must_check

arch/arm/kernel/armksyms.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
167167
#endif
168168

169169
#ifdef CONFIG_FUNCTION_TRACER
170-
#ifdef CONFIG_OLD_MCOUNT
171-
EXPORT_SYMBOL(mcount);
172-
#endif
173170
EXPORT_SYMBOL(__gnu_mcount_nc);
174171
#endif
175172

arch/arm/kernel/entry-common.S

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -296,16 +296,15 @@ __sys_trace:
296296
cmp scno, #-1 @ skip the syscall?
297297
bne 2b
298298
add sp, sp, #S_OFF @ restore stack
299-
b ret_slow_syscall
300299

301-
__sys_trace_return:
302-
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
300+
__sys_trace_return_nosave:
301+
enable_irq_notrace
303302
mov r0, sp
304303
bl syscall_trace_exit
305304
b ret_slow_syscall
306305

307-
__sys_trace_return_nosave:
308-
enable_irq_notrace
306+
__sys_trace_return:
307+
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
309308
mov r0, sp
310309
bl syscall_trace_exit
311310
b ret_slow_syscall

arch/arm/kernel/entry-ftrace.S

Lines changed: 4 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -15,23 +15,8 @@
1515
* start of every function. In mcount, apart from the function's address (in
1616
* lr), we need to get hold of the function's caller's address.
1717
*
18-
* Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
19-
*
20-
* bl mcount
21-
*
22-
* These versions have the limitation that in order for the mcount routine to
23-
* be able to determine the function's caller's address, an APCS-style frame
24-
* pointer (which is set up with something like the code below) is required.
25-
*
26-
* mov ip, sp
27-
* push {fp, ip, lr, pc}
28-
* sub fp, ip, #4
29-
*
30-
* With EABI, these frame pointers are not available unless -mapcs-frame is
31-
* specified, and if building as Thumb-2, not even then.
32-
*
33-
* Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
34-
* with call sites like:
18+
* Newer GCCs (4.4+) solve this problem by using a version of mcount with call
19+
* sites like:
3520
*
3621
* push {lr}
3722
* bl __gnu_mcount_nc
@@ -46,17 +31,10 @@
4631
* allows it to be clobbered in subroutines and doesn't use it to hold
4732
* parameters.)
4833
*
49-
* When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
50-
* for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
51-
* arch/arm/kernel/ftrace.c).
34+
* When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
35+
* instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
5236
*/
5337

54-
#ifndef CONFIG_OLD_MCOUNT
55-
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
56-
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
57-
#endif
58-
#endif
59-
6038
.macro mcount_adjust_addr rd, rn
6139
bic \rd, \rn, #1 @ clear the Thumb bit if present
6240
sub \rd, \rd, #MCOUNT_INSN_SIZE
@@ -209,51 +187,6 @@ ftrace_graph_call\suffix:
209187
mcount_exit
210188
.endm
211189

212-
#ifdef CONFIG_OLD_MCOUNT
213-
/*
214-
* mcount
215-
*/
216-
217-
.macro mcount_enter
218-
stmdb sp!, {r0-r3, lr}
219-
.endm
220-
221-
.macro mcount_get_lr reg
222-
ldr \reg, [fp, #-4]
223-
.endm
224-
225-
.macro mcount_exit
226-
ldr lr, [fp, #-4]
227-
ldmia sp!, {r0-r3, pc}
228-
.endm
229-
230-
ENTRY(mcount)
231-
#ifdef CONFIG_DYNAMIC_FTRACE
232-
stmdb sp!, {lr}
233-
ldr lr, [fp, #-4]
234-
ldmia sp!, {pc}
235-
#else
236-
__mcount _old
237-
#endif
238-
ENDPROC(mcount)
239-
240-
#ifdef CONFIG_DYNAMIC_FTRACE
241-
ENTRY(ftrace_caller_old)
242-
__ftrace_caller _old
243-
ENDPROC(ftrace_caller_old)
244-
#endif
245-
246-
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
247-
ENTRY(ftrace_graph_caller_old)
248-
__ftrace_graph_caller
249-
ENDPROC(ftrace_graph_caller_old)
250-
#endif
251-
252-
.purgem mcount_enter
253-
.purgem mcount_get_lr
254-
.purgem mcount_exit
255-
#endif
256-
257190
/*
258191
* __gnu_mcount_nc
259192
*/

0 commit comments

Comments
 (0)