Skip to content

Commit d048573

Browse files
author
Ingo Molnar
committed
x86/bugs: Rename various 'ia32_cap' variables to 'x86_arch_cap_msr'
So we are using the 'ia32_cap' value in a number of places, which got its name from MSR_IA32_ARCH_CAPABILITIES MSR register. But there's very little 'IA32' about it - this isn't 32-bit only code, nor does it originate from there, it's just a historic quirk that many Intel MSR names are prefixed with IA32_. This is already clear from the helper method around the MSR: x86_read_arch_cap_msr(), which doesn't have the IA32 prefix. So rename 'ia32_cap' to 'x86_arch_cap_msr' to be consistent with its role and with the naming of the helper function. Signed-off-by: Ingo Molnar <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Nikolay Borisov <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Sean Christopherson <[email protected]> Link: https://lore.kernel.org/r/9592a18a814368e75f8f4b9d74d3883aa4fd1eaf.1712813475.git.jpoimboe@kernel.org
1 parent cb2db5b commit d048573

File tree

3 files changed

+42
-42
lines changed

3 files changed

+42
-42
lines changed

arch/x86/kernel/apic/apic.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1687,11 +1687,11 @@ static int x2apic_state;
16871687

16881688
static bool x2apic_hw_locked(void)
16891689
{
1690-
u64 ia32_cap;
1690+
u64 x86_arch_cap_msr;
16911691
u64 msr;
16921692

1693-
ia32_cap = x86_read_arch_cap_msr();
1694-
if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
1693+
x86_arch_cap_msr = x86_read_arch_cap_msr();
1694+
if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
16951695
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
16961696
return (msr & LEGACY_XAPIC_DISABLED);
16971697
}

arch/x86/kernel/cpu/bugs.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
6161
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
6262
EXPORT_SYMBOL_GPL(x86_pred_cmd);
6363

64-
static u64 __ro_after_init ia32_cap;
64+
static u64 __ro_after_init x86_arch_cap_msr;
6565

6666
static DEFINE_MUTEX(spec_ctrl_mutex);
6767

@@ -146,7 +146,7 @@ void __init cpu_select_mitigations(void)
146146
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
147147
}
148148

149-
ia32_cap = x86_read_arch_cap_msr();
149+
x86_arch_cap_msr = x86_read_arch_cap_msr();
150150

151151
/* Select the proper CPU mitigations before patching alternatives: */
152152
spectre_v1_select_mitigation();
@@ -343,8 +343,8 @@ static void __init taa_select_mitigation(void)
343343
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
344344
* update is required.
345345
*/
346-
if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
347-
!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
346+
if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
347+
!(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
348348
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
349349

350350
/*
@@ -434,7 +434,7 @@ static void __init mmio_select_mitigation(void)
434434
* be propagated to uncore buffers, clearing the Fill buffers on idle
435435
* is required irrespective of SMT state.
436436
*/
437-
if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
437+
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
438438
static_branch_enable(&mds_idle_clear);
439439

440440
/*
@@ -444,10 +444,10 @@ static void __init mmio_select_mitigation(void)
444444
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
445445
* affected systems.
446446
*/
447-
if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
447+
if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
448448
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
449449
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
450-
!(ia32_cap & ARCH_CAP_MDS_NO)))
450+
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
451451
mmio_mitigation = MMIO_MITIGATION_VERW;
452452
else
453453
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@@ -505,7 +505,7 @@ static void __init rfds_select_mitigation(void)
505505
if (rfds_mitigation == RFDS_MITIGATION_OFF)
506506
return;
507507

508-
if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
508+
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
509509
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
510510
else
511511
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -664,7 +664,7 @@ static void __init srbds_select_mitigation(void)
664664
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
665665
* by Processor MMIO Stale Data vulnerability.
666666
*/
667-
if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
667+
if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
668668
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
669669
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
670670
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -807,7 +807,7 @@ static void __init gds_select_mitigation(void)
807807
/* Will verify below that mitigation _can_ be disabled */
808808

809809
/* No microcode */
810-
if (!(ia32_cap & ARCH_CAP_GDS_CTRL)) {
810+
if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
811811
if (gds_mitigation == GDS_MITIGATION_FORCE) {
812812
/*
813813
* This only needs to be done on the boot CPU so do it
@@ -1541,14 +1541,14 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
15411541
/* Disable in-kernel use of non-RSB RET predictors */
15421542
static void __init spec_ctrl_disable_kernel_rrsba(void)
15431543
{
1544-
u64 ia32_cap;
1544+
u64 x86_arch_cap_msr;
15451545

15461546
if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
15471547
return;
15481548

1549-
ia32_cap = x86_read_arch_cap_msr();
1549+
x86_arch_cap_msr = x86_read_arch_cap_msr();
15501550

1551-
if (ia32_cap & ARCH_CAP_RRSBA) {
1551+
if (x86_arch_cap_msr & ARCH_CAP_RRSBA) {
15521552
x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
15531553
update_spec_ctrl(x86_spec_ctrl_base);
15541554
}
@@ -1916,7 +1916,7 @@ static void update_mds_branch_idle(void)
19161916
if (sched_smt_active()) {
19171917
static_branch_enable(&mds_idle_clear);
19181918
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1919-
(ia32_cap & ARCH_CAP_FBSDP_NO)) {
1919+
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
19201920
static_branch_disable(&mds_idle_clear);
19211921
}
19221922
}
@@ -2810,7 +2810,7 @@ static const char *spectre_bhi_state(void)
28102810
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
28112811
return "; BHI: SW loop, KVM: SW loop";
28122812
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
2813-
!(ia32_cap & ARCH_CAP_RRSBA))
2813+
!(x86_arch_cap_msr & ARCH_CAP_RRSBA))
28142814
return "; BHI: Retpoline";
28152815
else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
28162816
return "; BHI: Syscall hardening, KVM: SW loop";

arch/x86/kernel/cpu/common.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1284,33 +1284,33 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
12841284

12851285
u64 x86_read_arch_cap_msr(void)
12861286
{
1287-
u64 ia32_cap = 0;
1287+
u64 x86_arch_cap_msr = 0;
12881288

12891289
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1290-
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1290+
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
12911291

1292-
return ia32_cap;
1292+
return x86_arch_cap_msr;
12931293
}
12941294

1295-
static bool arch_cap_mmio_immune(u64 ia32_cap)
1295+
static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
12961296
{
1297-
return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1298-
ia32_cap & ARCH_CAP_PSDP_NO &&
1299-
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
1297+
return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
1298+
x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
1299+
x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
13001300
}
13011301

1302-
static bool __init vulnerable_to_rfds(u64 ia32_cap)
1302+
static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
13031303
{
13041304
/* The "immunity" bit trumps everything else: */
1305-
if (ia32_cap & ARCH_CAP_RFDS_NO)
1305+
if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
13061306
return false;
13071307

13081308
/*
13091309
* VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
13101310
* indicate that mitigation is needed because guest is running on a
13111311
* vulnerable hardware or may migrate to such hardware:
13121312
*/
1313-
if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
1313+
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
13141314
return true;
13151315

13161316
/* Only consult the blacklist when there is no enumeration: */
@@ -1319,11 +1319,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
13191319

13201320
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
13211321
{
1322-
u64 ia32_cap = x86_read_arch_cap_msr();
1322+
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
13231323

13241324
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
13251325
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1326-
!(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1326+
!(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
13271327
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
13281328

13291329
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@@ -1335,7 +1335,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
13351335
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
13361336

13371337
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1338-
!(ia32_cap & ARCH_CAP_SSB_NO) &&
1338+
!(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
13391339
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
13401340
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
13411341

@@ -1346,17 +1346,17 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
13461346
* Don't use AutoIBRS when SNP is enabled because it degrades host
13471347
* userspace indirect branch performance.
13481348
*/
1349-
if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
1349+
if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
13501350
(cpu_has(c, X86_FEATURE_AUTOIBRS) &&
13511351
!cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
13521352
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
13531353
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1354-
!(ia32_cap & ARCH_CAP_PBRSB_NO))
1354+
!(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
13551355
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
13561356
}
13571357

13581358
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1359-
!(ia32_cap & ARCH_CAP_MDS_NO)) {
1359+
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
13601360
setup_force_cpu_bug(X86_BUG_MDS);
13611361
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
13621362
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@@ -1375,9 +1375,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
13751375
* TSX_CTRL check alone is not sufficient for cases when the microcode
13761376
* update is not present or running as guest that don't get TSX_CTRL.
13771377
*/
1378-
if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1378+
if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
13791379
(cpu_has(c, X86_FEATURE_RTM) ||
1380-
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1380+
(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
13811381
setup_force_cpu_bug(X86_BUG_TAA);
13821382

13831383
/*
@@ -1403,15 +1403,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
14031403
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
14041404
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
14051405
*/
1406-
if (!arch_cap_mmio_immune(ia32_cap)) {
1406+
if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
14071407
if (cpu_matches(cpu_vuln_blacklist, MMIO))
14081408
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
14091409
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
14101410
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
14111411
}
14121412

14131413
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1414-
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
1414+
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
14151415
setup_force_cpu_bug(X86_BUG_RETBLEED);
14161416
}
14171417

@@ -1429,15 +1429,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
14291429
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
14301430
* which means that AVX will be disabled.
14311431
*/
1432-
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
1432+
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
14331433
boot_cpu_has(X86_FEATURE_AVX))
14341434
setup_force_cpu_bug(X86_BUG_GDS);
14351435

1436-
if (vulnerable_to_rfds(ia32_cap))
1436+
if (vulnerable_to_rfds(x86_arch_cap_msr))
14371437
setup_force_cpu_bug(X86_BUG_RFDS);
14381438

14391439
/* When virtualized, eIBRS could be hidden, assume vulnerable */
1440-
if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
1440+
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
14411441
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
14421442
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
14431443
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
@@ -1447,7 +1447,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
14471447
return;
14481448

14491449
/* Rogue Data Cache Load? No! */
1450-
if (ia32_cap & ARCH_CAP_RDCL_NO)
1450+
if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
14511451
return;
14521452

14531453
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);

0 commit comments

Comments
 (0)