Skip to content

Commit 648702f

Browse files
committed
x86/bugs/IBRS: Disable SSB (RDS) if IBRS is selected for spectre_v2.
If =userspace is selected we want to frob the SPEC_CTRL MSR on every userspace entrance (disable memory disambigation), and also on every kernel entrance (enable memory disambiguation). However we have to be careful as having MSR frobbed and retpoline being enabled slows the machine even further. Therefore if possible swap over to using SPEC_CTRL MSR (IBRS) on every kernel entrance instead of using retpoline. Naturally this heuristic is controlled by various knobs. To summarize, if "spectre_v2=retpoline spec_store_bypass_disable=userspace" is set then we will switch the spectre_v2 to IBRS. This table may explain this better: effect | spectre_v2 | spec_store_bypass_disable | remark ==========+=============+===========================+====== IBRS | ibrs | userspace | IBRS | auto | userspace | *1 *2 IBRS | retpoline | userspace | *1 IBRS | ibrs | boot | retpoline | auto | boot | retpoline | retpoline | boot | retpoline | auto | boot | retpoline | auto | auto | *1: If spectre_v2_heuristic=off or spectre_v2_heuristic=rds=off is selected then the spec_store_bypass_disable=userspace parameter is not followed and the effect is both retpoline and IBRS enabled in the kernel. *2: If we run under Skylake+ the 'spec_store_bypass_disable=auto' will disable retpoline and enable IBRS. If not on Skylake+, then retpoline and IBRS are both enabled. Orabug: 28034177 CVE: CVE-2018-3639 Signed-off-by: Konrad Rzeszutek Wilk <[email protected]> Tested-by: Mihai Carabas <[email protected]> Reviewed-by: Mihai Carabas <[email protected]> Reviewed-by: John Haxby <[email protected]> --- v2: Drop the __init for rds_ibrs_selected v4: s/entrace/entrance/ Added Reviewed-by Fixed title
1 parent 10383e7 commit 648702f

File tree

3 files changed

+61
-14
lines changed

3 files changed

+61
-14
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3979,6 +3979,8 @@
39793979
via prctl. Speculative Store Bypass is enabled
39803980
for a process by default. The state of the control
39813981
is inherited on fork.
3982+
userspace - Disable Speculative Store Bypass when entering
3983+
userspace.
39823984

39833985
Not specifying this option is equivalent to
39843986
spec_store_bypass_disable=auto.
@@ -3990,6 +3992,8 @@
39903992
skylake=off - do not use IBRS if present on Skylake
39913993
instead of retpoline (this is equivalant
39923994
to spectre_v2=retpoline,generic).
3995+
rds=off - do not activate the Speculative Store Bypass
3996+
mitigation if doing IBRS.
39933997

39943998
spia_io_base= [HW,MTD]
39953999
spia_fio_base=

arch/x86/include/asm/nospec-branch.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ enum ssb_mitigation {
233233
SPEC_STORE_BYPASS_NONE,
234234
SPEC_STORE_BYPASS_DISABLE,
235235
SPEC_STORE_BYPASS_PRCTL,
236+
SPEC_STORE_BYPASS_USERSPACE,
236237
};
237238

238239
extern char __indirect_thunk_start[];

arch/x86/kernel/cpu/bugs.c

Lines changed: 56 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ EXPORT_SYMBOL(spec_ctrl_mutex);
5454
bool use_ibrs_on_skylake = true;
5555
EXPORT_SYMBOL(use_ibrs_on_skylake);
5656

57+
bool use_ibrs_with_rds = true;
5758

5859
int __init spectre_v2_heuristics_setup(char *p)
5960
{
@@ -63,6 +64,7 @@ int __init spectre_v2_heuristics_setup(char *p)
6364
/* Disable all heuristics. */
6465
if (!strncmp(p, "off", 3)) {
6566
use_ibrs_on_skylake = false;
67+
use_ibrs_with_rds = false;
6668
break;
6769
}
6870
len = strlen("skylake");
@@ -75,6 +77,16 @@ int __init spectre_v2_heuristics_setup(char *p)
7577
if (!strncmp(p, "off", 3))
7678
use_ibrs_on_skylake = false;
7779
}
80+
len = strlen("rds");
81+
if (!strncmp(p, "rds", len)) {
82+
p += len;
83+
if (*p == '=')
84+
++p;
85+
if (*p == '\0')
86+
break;
87+
if (!strncmp(p, "off", 3))
88+
use_ibrs_with_rds = false;
89+
}
7890

7991
p = strpbrk(p, ",");
8092
if (!p)
@@ -87,6 +99,7 @@ __setup("spectre_v2_heuristics=", spectre_v2_heuristics_setup);
8799

88100
static void __init spectre_v2_select_mitigation(void);
89101
static void __init ssb_select_mitigation(void);
102+
static bool rds_ibrs_selected(void);
90103

91104
/*
92105
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
@@ -213,14 +226,18 @@ void x86_spec_ctrl_set(u64 val)
213226
/*
214227
* Only two states are allowed - with IBRS or without.
215228
*/
216-
if (check_ibrs_inuse()) {
229+
if (rds_ibrs_selected()) {
217230
if (val & SPEC_CTRL_IBRS)
218231
host = x86_spec_ctrl_priv;
219232
else
220-
host = val;
221-
} else
222-
host = x86_spec_ctrl_base | val;
223-
233+
host = val & ~(SPEC_CTRL_RDS);
234+
} else {
235+
if (ibrs_inuse)
236+
host = x86_spec_ctrl_priv;
237+
else
238+
host = x86_spec_ctrl_base;
239+
host |= val;
240+
}
224241
wrmsrl(MSR_IA32_SPEC_CTRL, host);
225242
}
226243
}
@@ -588,7 +605,8 @@ static void __init spectre_v2_select_mitigation(void)
588605
*/
589606
if (!retp_compiler() /* prefer IBRS over minimal ASM */ ||
590607
(retp_compiler() && !retpoline_selected(cmd) &&
591-
is_skylake_era() && use_ibrs_on_skylake)) {
608+
((is_skylake_era() && use_ibrs_on_skylake) ||
609+
(rds_ibrs_selected() && use_ibrs_with_rds)))) {
592610
/* Start the engine! */
593611
mode = ibrs_select();
594612
if (mode == SPECTRE_V2_IBRS)
@@ -650,18 +668,25 @@ static void __init spectre_v2_select_mitigation(void)
650668

651669
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
652670

671+
bool rds_ibrs_selected(void)
672+
{
673+
return (ssb_mode == SPEC_STORE_BYPASS_USERSPACE);
674+
}
675+
653676
/* The kernel command line selection */
654677
enum ssb_mitigation_cmd {
655678
SPEC_STORE_BYPASS_CMD_NONE,
656679
SPEC_STORE_BYPASS_CMD_AUTO,
657680
SPEC_STORE_BYPASS_CMD_ON,
658681
SPEC_STORE_BYPASS_CMD_PRCTL,
682+
SPEC_STORE_BYPASS_CMD_USERSPACE,
659683
};
660684

661685
static const char *ssb_strings[] = {
662686
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
663687
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
664-
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl"
688+
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
689+
[SPEC_STORE_BYPASS_USERSPACE] = "Mitigation: Speculative Store Bypass disabled for userspace"
665690
};
666691

667692
static const struct {
@@ -672,6 +697,7 @@ static const struct {
672697
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
673698
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
674699
{ "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
700+
{ "userspace", SPEC_STORE_BYPASS_CMD_USERSPACE }, /* Disable Speculative Store Bypass for userspace */
675701
};
676702

677703
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
@@ -721,15 +747,22 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
721747

722748
switch (cmd) {
723749
case SPEC_STORE_BYPASS_CMD_AUTO:
724-
/* Choose prctl as the default mode */
725-
mode = SPEC_STORE_BYPASS_PRCTL;
750+
/* Choose prctl as the default mode unless IBRS is enabled. */
751+
if (spectre_v2_enabled == SPECTRE_V2_IBRS)
752+
mode = SPEC_STORE_BYPASS_USERSPACE;
753+
else
754+
mode = SPEC_STORE_BYPASS_PRCTL;
726755
break;
727756
case SPEC_STORE_BYPASS_CMD_ON:
728757
mode = SPEC_STORE_BYPASS_DISABLE;
729758
break;
730759
case SPEC_STORE_BYPASS_CMD_PRCTL:
731760
mode = SPEC_STORE_BYPASS_PRCTL;
732761
break;
762+
case SPEC_STORE_BYPASS_CMD_USERSPACE:
763+
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
764+
mode = SPEC_STORE_BYPASS_USERSPACE;
765+
break;
733766
case SPEC_STORE_BYPASS_CMD_NONE:
734767
break;
735768
}
@@ -740,20 +773,28 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
740773
* - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
741774
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
742775
*/
743-
if (mode == SPEC_STORE_BYPASS_DISABLE) {
776+
if (mode == SPEC_STORE_BYPASS_DISABLE)
744777
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
778+
779+
if (mode == SPEC_STORE_BYPASS_DISABLE ||
780+
mode == SPEC_STORE_BYPASS_USERSPACE) {
745781
/*
746782
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
747783
* a completely different MSR and bit dependent on family.
748784
*/
749785
switch (boot_cpu_data.x86_vendor) {
750786
case X86_VENDOR_INTEL:
751787
x86_spec_ctrl_base |= SPEC_CTRL_RDS;
752-
x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
753-
x86_spec_ctrl_set(SPEC_CTRL_RDS);
788+
if (mode == SPEC_STORE_BYPASS_DISABLE) {
789+
x86_spec_ctrl_mask &= ~(SPEC_CTRL_RDS);
790+
x86_spec_ctrl_set(SPEC_CTRL_RDS);
791+
}
792+
else
793+
x86_spec_ctrl_priv &= ~(SPEC_CTRL_RDS);
754794
break;
755795
case X86_VENDOR_AMD:
756-
x86_amd_rds_enable();
796+
if (mode == SPEC_STORE_BYPASS_DISABLE)
797+
x86_amd_rds_enable();
757798
break;
758799
}
759800
}
@@ -792,6 +833,7 @@ static int ssb_prctl_set(unsigned long ctrl)
792833
static int ssb_prctl_get(void)
793834
{
794835
switch (ssb_mode) {
836+
case SPEC_STORE_BYPASS_USERSPACE:
795837
case SPEC_STORE_BYPASS_DISABLE:
796838
return PR_SPEC_DISABLE;
797839
case SPEC_STORE_BYPASS_PRCTL:
@@ -830,7 +872,7 @@ int arch_prctl_spec_ctrl_get(unsigned long which)
830872

831873
void x86_spec_ctrl_setup_ap(void)
832874
{
833-
if (boot_cpu_has(X86_FEATURE_IBRS))
875+
if (boot_cpu_has(X86_FEATURE_IBRS) && ssb_mode != SPEC_STORE_BYPASS_USERSPACE)
834876
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
835877

836878
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)

0 commit comments

Comments
 (0)