Skip to content

Commit c10a263

Browse files
committed
Merge remote-tracking branch 'linux-lmerwick-public/v4.14.35-1831.merge57-v2' into master-next
Merge upstream stable version 4.14.57 with fixes for ARM conflicts. Signed-off-by: Liam Merwick <[email protected]> Signed-off-by: Jack Vogel <[email protected]>
2 parents 45b8433 + 3bc97be commit c10a263

File tree

125 files changed

+1577
-414
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

125 files changed

+1577
-414
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4106,6 +4106,23 @@
41064106
expediting. Set to zero to disable automatic
41074107
expediting.
41084108

4109+
ssbd= [ARM64,HW]
4110+
Speculative Store Bypass Disable control
4111+
4112+
On CPUs that are vulnerable to the Speculative
4113+
Store Bypass vulnerability and offer a
4114+
firmware based mitigation, this parameter
4115+
indicates how the mitigation should be used:
4116+
4117+
force-on: Unconditionally enable mitigation for
4118+
for both kernel and userspace
4119+
force-off: Unconditionally disable mitigation for
4120+
for both kernel and userspace
4121+
kernel: Always enable mitigation in the
4122+
kernel, and offer a prctl interface
4123+
to allow userspace to register its
4124+
interest in being mitigated too.
4125+
41094126
stack_guard_gap= [MM]
41104127
override the default stack gap protection. The value
41114128
is in page units and it defines how many pages prior

arch/arm/include/asm/kvm_host.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -318,6 +318,18 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {}
318318
static inline void kvm_arm_vhe_guest_enter(void) {}
319319
static inline void kvm_arm_vhe_guest_exit(void) {}
320320

321+
#define KVM_SSBD_UNKNOWN -1
322+
#define KVM_SSBD_FORCE_DISABLE 0
323+
#define KVM_SSBD_KERNEL 1
324+
#define KVM_SSBD_FORCE_ENABLE 2
325+
#define KVM_SSBD_MITIGATED 3
326+
327+
static inline int kvm_arm_have_ssbd(void)
328+
{
329+
/* No way to detect it yet, pretend it is not there. */
330+
return KVM_SSBD_UNKNOWN;
331+
}
332+
321333
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
322334
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
323335

arch/arm/include/asm/kvm_mmu.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,13 @@
2828
*/
2929
#define kern_hyp_va(kva) (kva)
3030

31+
/* Contrary to arm64, there is no need to generate a PC-relative address */
32+
#define hyp_symbol_addr(s) \
33+
({ \
34+
typeof(s) *addr = &(s); \
35+
addr; \
36+
})
37+
3138
/*
3239
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
3340
*/
@@ -316,6 +323,11 @@ static inline int kvm_map_vectors(void)
316323
return 0;
317324
}
318325

326+
static inline int hyp_map_aux_data(void)
327+
{
328+
return 0;
329+
}
330+
319331
#endif /* !__ASSEMBLY__ */
320332

321333
#endif /* __ARM_KVM_MMU_H__ */

arch/arm64/Kconfig

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -863,6 +863,15 @@ config HARDEN_BRANCH_PREDICTOR
863863

864864
If unsure, say Y.
865865

866+
config ARM64_SSBD
867+
bool "Speculative Store Bypass Disable" if EXPERT
868+
default y
869+
help
870+
This enables mitigation of the bypassing of previous stores
871+
by speculative loads.
872+
873+
If unsure, say Y.
874+
866875
menuconfig ARMV8_DEPRECATED
867876
bool "Emulate deprecated/obsolete ARMv8 instructions"
868877
depends on COMPAT

arch/arm64/include/asm/alternative.h

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
#include <asm/cpucaps.h>
66
#include <asm/insn.h>
77

8+
#define ARM64_CB_PATCH ARM64_NCAPS
9+
810
#ifndef __ASSEMBLY__
911

1012
#include <linux/init.h>
@@ -22,12 +24,19 @@ struct alt_instr {
2224
u8 alt_len; /* size of new instruction(s), <= orig_len */
2325
};
2426

27+
typedef void (*alternative_cb_t)(struct alt_instr *alt,
28+
__le32 *origptr, __le32 *updptr, int nr_inst);
29+
2530
void __init apply_alternatives_all(void);
2631
void apply_alternatives(void *start, size_t length);
2732

28-
#define ALTINSTR_ENTRY(feature) \
33+
#define ALTINSTR_ENTRY(feature,cb) \
2934
" .word 661b - .\n" /* label */ \
35+
" .if " __stringify(cb) " == 0\n" \
3036
" .word 663f - .\n" /* new instruction */ \
37+
" .else\n" \
38+
" .word " __stringify(cb) "- .\n" /* callback */ \
39+
" .endif\n" \
3140
" .hword " __stringify(feature) "\n" /* feature bit */ \
3241
" .byte 662b-661b\n" /* source len */ \
3342
" .byte 664f-663f\n" /* replacement len */
@@ -45,27 +54,36 @@ void apply_alternatives(void *start, size_t length);
4554
* but most assemblers die if insn1 or insn2 have a .inst. This should
4655
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
4756
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
57+
*
58+
* Alternatives with callbacks do not generate replacement instructions.
4859
*/
49-
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
60+
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
5061
".if "__stringify(cfg_enabled)" == 1\n" \
5162
"661:\n\t" \
5263
oldinstr "\n" \
5364
"662:\n" \
5465
".pushsection .altinstructions,\"a\"\n" \
55-
ALTINSTR_ENTRY(feature) \
66+
ALTINSTR_ENTRY(feature,cb) \
5667
".popsection\n" \
68+
" .if " __stringify(cb) " == 0\n" \
5769
".pushsection .altinstr_replacement, \"a\"\n" \
5870
"663:\n\t" \
5971
newinstr "\n" \
6072
"664:\n\t" \
6173
".popsection\n\t" \
6274
".org . - (664b-663b) + (662b-661b)\n\t" \
6375
".org . - (662b-661b) + (664b-663b)\n" \
76+
".else\n\t" \
77+
"663:\n\t" \
78+
"664:\n\t" \
79+
".endif\n" \
6480
".endif\n"
6581

6682
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
67-
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
83+
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
6884

85+
#define ALTERNATIVE_CB(oldinstr, cb) \
86+
__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
6987
#else
7088

7189
#include <asm/assembler.h>
@@ -132,6 +150,14 @@ void apply_alternatives(void *start, size_t length);
132150
661:
133151
.endm
134152

153+
.macro alternative_cb cb
154+
.set .Lasm_alt_mode, 0
155+
.pushsection .altinstructions, "a"
156+
altinstruction_entry 661f, \cb, ARM64_CB_PATCH, 662f-661f, 0
157+
.popsection
158+
661:
159+
.endm
160+
135161
/*
136162
* Provide the other half of the alternative code sequence.
137163
*/
@@ -157,6 +183,13 @@ void apply_alternatives(void *start, size_t length);
157183
.org . - (662b-661b) + (664b-663b)
158184
.endm
159185

186+
/*
187+
* Callback-based alternative epilogue
188+
*/
189+
.macro alternative_cb_end
190+
662:
191+
.endm
192+
160193
/*
161194
* Provides a trivial alternative or default sequence consisting solely
162195
* of NOPs. The number of NOPs is chosen automatically to match the

arch/arm64/include/asm/cpucaps.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@
4545
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
4646
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
4747
#define ARM64_HAS_RAS_EXTN 26
48+
#define ARM64_SSBD 27
4849

49-
#define ARM64_NCAPS 27
50-
50+
#define ARM64_NCAPS 28
5151

5252
#endif /* __ASM_CPUCAPS_H */

arch/arm64/include/asm/cpufeature.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -312,6 +312,28 @@ static inline u64 read_zcr_features(void)
312312
return zcr;
313313
}
314314

315+
#define ARM64_SSBD_UNKNOWN -1
316+
#define ARM64_SSBD_FORCE_DISABLE 0
317+
#define ARM64_SSBD_KERNEL 1
318+
#define ARM64_SSBD_FORCE_ENABLE 2
319+
#define ARM64_SSBD_MITIGATED 3
320+
321+
static inline int arm64_get_ssbd_state(void)
322+
{
323+
#ifdef CONFIG_ARM64_SSBD
324+
extern int ssbd_state;
325+
return ssbd_state;
326+
#else
327+
return ARM64_SSBD_UNKNOWN;
328+
#endif
329+
}
330+
331+
#ifdef CONFIG_ARM64_SSBD
332+
void arm64_set_ssbd_mitigation(bool state);
333+
#else
334+
static inline void arm64_set_ssbd_mitigation(bool state) {}
335+
#endif
336+
315337
#endif /* __ASSEMBLY__ */
316338

317339
#endif

arch/arm64/include/asm/kvm_asm.h

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,9 @@
2020

2121
#include <asm/virt.h>
2222

23+
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
24+
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
25+
2326
#define ARM_EXIT_WITH_SERROR_BIT 31
2427
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
2528
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
@@ -73,14 +76,37 @@ extern u32 __init_stage2_translation(void);
7376

7477
extern void __qcom_hyp_sanitize_btac_predictors(void);
7578

79+
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
80+
#define __hyp_this_cpu_ptr(sym) \
81+
({ \
82+
void *__ptr = hyp_symbol_addr(sym); \
83+
__ptr += read_sysreg(tpidr_el2); \
84+
(typeof(&sym))__ptr; \
85+
})
86+
87+
#define __hyp_this_cpu_read(sym) \
88+
({ \
89+
*__hyp_this_cpu_ptr(sym); \
90+
})
91+
7692
#else /* __ASSEMBLY__ */
7793

78-
.macro get_host_ctxt reg, tmp
79-
adr_l \reg, kvm_host_cpu_state
94+
.macro hyp_adr_this_cpu reg, sym, tmp
95+
adr_l \reg, \sym
8096
mrs \tmp, tpidr_el2
8197
add \reg, \reg, \tmp
8298
.endm
8399

100+
.macro hyp_ldr_this_cpu reg, sym, tmp
101+
adr_l \reg, \sym
102+
mrs \tmp, tpidr_el2
103+
ldr \reg, [\reg, \tmp]
104+
.endm
105+
106+
.macro get_host_ctxt reg, tmp
107+
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
108+
.endm
109+
84110
.macro get_vcpu_ptr vcpu, ctxt
85111
get_host_ctxt \ctxt, \vcpu
86112
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]

arch/arm64/include/asm/kvm_host.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,9 @@ struct kvm_vcpu_arch {
216216
/* Exception Information */
217217
struct kvm_vcpu_fault_info fault;
218218

219+
/* State of various workarounds, see kvm_asm.h for bit assignment */
220+
u64 workaround_flags;
221+
219222
/* Guest debug state */
220223
u64 debug_flags;
221224

@@ -455,6 +458,29 @@ static inline void kvm_arm_vhe_guest_exit(void)
455458
isb();
456459
}
457460

461+
#define KVM_SSBD_UNKNOWN -1
462+
#define KVM_SSBD_FORCE_DISABLE 0
463+
#define KVM_SSBD_KERNEL 1
464+
#define KVM_SSBD_FORCE_ENABLE 2
465+
#define KVM_SSBD_MITIGATED 3
466+
467+
static inline int kvm_arm_have_ssbd(void)
468+
{
469+
switch (arm64_get_ssbd_state()) {
470+
case ARM64_SSBD_FORCE_DISABLE:
471+
return KVM_SSBD_FORCE_DISABLE;
472+
case ARM64_SSBD_KERNEL:
473+
return KVM_SSBD_KERNEL;
474+
case ARM64_SSBD_FORCE_ENABLE:
475+
return KVM_SSBD_FORCE_ENABLE;
476+
case ARM64_SSBD_MITIGATED:
477+
return KVM_SSBD_MITIGATED;
478+
case ARM64_SSBD_UNKNOWN:
479+
default:
480+
return KVM_SSBD_UNKNOWN;
481+
}
482+
}
483+
458484
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
459485
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
460486

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,26 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
130130

131131
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
132132

133+
/*
134+
* Obtain the PC-relative address of a kernel symbol
135+
* s: symbol
136+
*
137+
* The goal of this macro is to return a symbol's address based on a
138+
* PC-relative computation, as opposed to a loading the VA from a
139+
* constant pool or something similar. This works well for HYP, as an
140+
* absolute VA is guaranteed to be wrong. Only use this if trying to
141+
* obtain the address of a symbol (i.e. not something you obtained by
142+
* following a pointer).
143+
*/
144+
#define hyp_symbol_addr(s) \
145+
({ \
146+
typeof(s) *addr; \
147+
asm("adrp %0, %1\n" \
148+
"add %0, %0, :lo12:%1\n" \
149+
: "=r" (addr) : "S" (&s)); \
150+
addr; \
151+
})
152+
133153
/*
134154
* We currently only support a 40bit IPA.
135155
*/
@@ -389,5 +409,29 @@ static inline int kvm_map_vectors(void)
389409
}
390410
#endif
391411

412+
#ifdef CONFIG_ARM64_SSBD
413+
DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
414+
415+
static inline int hyp_map_aux_data(void)
416+
{
417+
int cpu, err;
418+
419+
for_each_possible_cpu(cpu) {
420+
u64 *ptr;
421+
422+
ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
423+
err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
424+
if (err)
425+
return err;
426+
}
427+
return 0;
428+
}
429+
#else
430+
static inline int hyp_map_aux_data(void)
431+
{
432+
return 0;
433+
}
434+
#endif
435+
392436
#endif /* __ASSEMBLY__ */
393437
#endif /* __ARM64_KVM_MMU_H__ */

arch/arm64/include/asm/thread_info.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ void arch_release_task_struct(struct task_struct *tsk);
9797
#define TIF_32BIT 22 /* 32bit process */
9898
#define TIF_SVE 23 /* Scalable Vector Extension in use */
9999
#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
100+
#define TIF_SSBD 25 /* Wants SSB mitigation */
100101
#define TIF_KSPLICE_FREEZING 26
101102

102103
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
5555
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
5656
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
5757
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
58+
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
5859

5960
ifeq ($(CONFIG_KVM),y)
6061
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o

0 commit comments

Comments
 (0)