Skip to content

Commit cb2db5b

Browse files
jpoimboeIngo Molnar
authored andcommitted
x86/bugs: Cache the value of MSR_IA32_ARCH_CAPABILITIES
There's no need to keep reading MSR_IA32_ARCH_CAPABILITIES over and over. It's even read in the BHI sysfs function which is a big no-no. Just read it once and cache it. Fixes: ec9404e ("x86/bhi: Add BHI mitigation knob") Signed-off-by: Josh Poimboeuf <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Nikolay Borisov <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Sean Christopherson <[email protected]> Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
1 parent dfe6489 commit cb2db5b

File tree

1 file changed

+7
-15
lines changed

1 file changed

+7
-15
lines changed

arch/x86/kernel/cpu/bugs.c

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
6161
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
6262
EXPORT_SYMBOL_GPL(x86_pred_cmd);
6363

64+
static u64 __ro_after_init ia32_cap;
65+
6466
static DEFINE_MUTEX(spec_ctrl_mutex);
6567

6668
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
144146
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
145147
}
146148

149+
ia32_cap = x86_read_arch_cap_msr();
150+
147151
/* Select the proper CPU mitigations before patching alternatives: */
148152
spectre_v1_select_mitigation();
149153
spectre_v2_select_mitigation();
@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
301305

302306
static void __init taa_select_mitigation(void)
303307
{
304-
u64 ia32_cap;
305-
306308
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
307309
taa_mitigation = TAA_MITIGATION_OFF;
308310
return;
@@ -341,7 +343,6 @@ static void __init taa_select_mitigation(void)
341343
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
342344
* update is required.
343345
*/
344-
ia32_cap = x86_read_arch_cap_msr();
345346
if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
346347
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
347348
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
401402

402403
static void __init mmio_select_mitigation(void)
403404
{
404-
u64 ia32_cap;
405-
406405
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
407406
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
408407
cpu_mitigations_off()) {
@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
413412
if (mmio_mitigation == MMIO_MITIGATION_OFF)
414413
return;
415414

416-
ia32_cap = x86_read_arch_cap_msr();
417-
418415
/*
419416
* Enable CPU buffer clear mitigation for host and VMM, if also affected
420417
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
508505
if (rfds_mitigation == RFDS_MITIGATION_OFF)
509506
return;
510507

511-
if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
508+
if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
512509
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
513510
else
514511
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -659,8 +656,6 @@ void update_srbds_msr(void)
659656

660657
static void __init srbds_select_mitigation(void)
661658
{
662-
u64 ia32_cap;
663-
664659
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
665660
return;
666661

@@ -669,7 +664,6 @@ static void __init srbds_select_mitigation(void)
669664
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
670665
* by Processor MMIO Stale Data vulnerability.
671666
*/
672-
ia32_cap = x86_read_arch_cap_msr();
673667
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
674668
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
675669
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
813807
/* Will verify below that mitigation _can_ be disabled */
814808

815809
/* No microcode */
816-
if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
810+
if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
817811
if (gds_mitigation == GDS_MITIGATION_FORCE) {
818812
/*
819813
* This only needs to be done on the boot CPU so do it
@@ -1908,8 +1902,6 @@ static void update_indir_branch_cond(void)
19081902
/* Update the static key controlling the MDS CPU buffer clear in idle */
19091903
static void update_mds_branch_idle(void)
19101904
{
1911-
u64 ia32_cap = x86_read_arch_cap_msr();
1912-
19131905
/*
19141906
* Enable the idle clearing if SMT is active on CPUs which are
19151907
* affected only by MSBDS and not any other MDS variant.
@@ -2818,7 +2810,7 @@ static const char *spectre_bhi_state(void)
28182810
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
28192811
return "; BHI: SW loop, KVM: SW loop";
28202812
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2821-
!(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
2813+
!(ia32_cap & ARCH_CAP_RRSBA))
28222814
return "; BHI: Retpoline";
28232815
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
28242816
return "; BHI: Syscall hardening, KVM: SW loop";

0 commit comments

Comments
 (0)