@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
61
61
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB ;
62
62
EXPORT_SYMBOL_GPL (x86_pred_cmd );
63
63
64
+ static u64 __ro_after_init ia32_cap ;
65
+
64
66
static DEFINE_MUTEX (spec_ctrl_mutex );
65
67
66
68
void (* x86_return_thunk )(void ) __ro_after_init = __x86_return_thunk ;
@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
144
146
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK ;
145
147
}
146
148
149
+ ia32_cap = x86_read_arch_cap_msr ();
150
+
147
151
/* Select the proper CPU mitigations before patching alternatives: */
148
152
spectre_v1_select_mitigation ();
149
153
spectre_v2_select_mitigation ();
@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
301
305
302
306
static void __init taa_select_mitigation (void )
303
307
{
304
- u64 ia32_cap ;
305
-
306
308
if (!boot_cpu_has_bug (X86_BUG_TAA )) {
307
309
taa_mitigation = TAA_MITIGATION_OFF ;
308
310
return ;
@@ -341,7 +343,6 @@ static void __init taa_select_mitigation(void)
341
343
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
342
344
* update is required.
343
345
*/
344
- ia32_cap = x86_read_arch_cap_msr ();
345
346
if ( (ia32_cap & ARCH_CAP_MDS_NO ) &&
346
347
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR ))
347
348
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED ;
@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
401
402
402
403
static void __init mmio_select_mitigation (void )
403
404
{
404
- u64 ia32_cap ;
405
-
406
405
if (!boot_cpu_has_bug (X86_BUG_MMIO_STALE_DATA ) ||
407
406
boot_cpu_has_bug (X86_BUG_MMIO_UNKNOWN ) ||
408
407
cpu_mitigations_off ()) {
@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
413
412
if (mmio_mitigation == MMIO_MITIGATION_OFF )
414
413
return ;
415
414
416
- ia32_cap = x86_read_arch_cap_msr ();
417
-
418
415
/*
419
416
* Enable CPU buffer clear mitigation for host and VMM, if also affected
420
417
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
508
505
if (rfds_mitigation == RFDS_MITIGATION_OFF )
509
506
return ;
510
507
511
- if (x86_read_arch_cap_msr () & ARCH_CAP_RFDS_CLEAR )
508
+ if (ia32_cap & ARCH_CAP_RFDS_CLEAR )
512
509
setup_force_cpu_cap (X86_FEATURE_CLEAR_CPU_BUF );
513
510
else
514
511
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED ;
@@ -659,8 +656,6 @@ void update_srbds_msr(void)
659
656
660
657
static void __init srbds_select_mitigation (void )
661
658
{
662
- u64 ia32_cap ;
663
-
664
659
if (!boot_cpu_has_bug (X86_BUG_SRBDS ))
665
660
return ;
666
661
@@ -669,7 +664,6 @@ static void __init srbds_select_mitigation(void)
669
664
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
670
665
* by Processor MMIO Stale Data vulnerability.
671
666
*/
672
- ia32_cap = x86_read_arch_cap_msr ();
673
667
if ((ia32_cap & ARCH_CAP_MDS_NO ) && !boot_cpu_has (X86_FEATURE_RTM ) &&
674
668
!boot_cpu_has_bug (X86_BUG_MMIO_STALE_DATA ))
675
669
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF ;
@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
813
807
/* Will verify below that mitigation _can_ be disabled */
814
808
815
809
/* No microcode */
816
- if (!(x86_read_arch_cap_msr () & ARCH_CAP_GDS_CTRL )) {
810
+ if (!(ia32_cap & ARCH_CAP_GDS_CTRL )) {
817
811
if (gds_mitigation == GDS_MITIGATION_FORCE ) {
818
812
/*
819
813
* This only needs to be done on the boot CPU so do it
@@ -1908,8 +1902,6 @@ static void update_indir_branch_cond(void)
1908
1902
/* Update the static key controlling the MDS CPU buffer clear in idle */
1909
1903
static void update_mds_branch_idle (void )
1910
1904
{
1911
- u64 ia32_cap = x86_read_arch_cap_msr ();
1912
-
1913
1905
/*
1914
1906
* Enable the idle clearing if SMT is active on CPUs which are
1915
1907
* affected only by MSBDS and not any other MDS variant.
@@ -2818,7 +2810,7 @@ static const char *spectre_bhi_state(void)
2818
2810
else if (boot_cpu_has (X86_FEATURE_CLEAR_BHB_LOOP ))
2819
2811
return "; BHI: SW loop, KVM: SW loop" ;
2820
2812
else if (boot_cpu_has (X86_FEATURE_RETPOLINE ) &&
2821
- !(x86_read_arch_cap_msr () & ARCH_CAP_RRSBA ))
2813
+ !(ia32_cap & ARCH_CAP_RRSBA ))
2822
2814
return "; BHI: Retpoline" ;
2823
2815
else if (boot_cpu_has (X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT ))
2824
2816
return "; BHI: Syscall hardening, KVM: SW loop" ;
0 commit comments