@@ -1284,33 +1284,33 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
1284
1284
1285
1285
u64 x86_read_arch_cap_msr (void )
1286
1286
{
1287
- u64 ia32_cap = 0 ;
1287
+ u64 x86_arch_cap_msr = 0 ;
1288
1288
1289
1289
if (boot_cpu_has (X86_FEATURE_ARCH_CAPABILITIES ))
1290
- rdmsrl (MSR_IA32_ARCH_CAPABILITIES , ia32_cap );
1290
+ rdmsrl (MSR_IA32_ARCH_CAPABILITIES , x86_arch_cap_msr );
1291
1291
1292
- return ia32_cap ;
1292
+ return x86_arch_cap_msr ;
1293
1293
}
1294
1294
1295
- static bool arch_cap_mmio_immune (u64 ia32_cap )
1295
+ static bool arch_cap_mmio_immune (u64 x86_arch_cap_msr )
1296
1296
{
1297
- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1298
- ia32_cap & ARCH_CAP_PSDP_NO &&
1299
- ia32_cap & ARCH_CAP_SBDR_SSDP_NO );
1297
+ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
1298
+ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
1299
+ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO );
1300
1300
}
1301
1301
1302
- static bool __init vulnerable_to_rfds (u64 ia32_cap )
1302
+ static bool __init vulnerable_to_rfds (u64 x86_arch_cap_msr )
1303
1303
{
1304
1304
/* The "immunity" bit trumps everything else: */
1305
- if (ia32_cap & ARCH_CAP_RFDS_NO )
1305
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO )
1306
1306
return false;
1307
1307
1308
1308
/*
1309
1309
* VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
1310
1310
* indicate that mitigation is needed because guest is running on a
1311
1311
* vulnerable hardware or may migrate to such hardware:
1312
1312
*/
1313
- if (ia32_cap & ARCH_CAP_RFDS_CLEAR )
1313
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR )
1314
1314
return true;
1315
1315
1316
1316
/* Only consult the blacklist when there is no enumeration: */
@@ -1319,11 +1319,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
1319
1319
1320
1320
static void __init cpu_set_bug_bits (struct cpuinfo_x86 * c )
1321
1321
{
1322
- u64 ia32_cap = x86_read_arch_cap_msr ();
1322
+ u64 x86_arch_cap_msr = x86_read_arch_cap_msr ();
1323
1323
1324
1324
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1325
1325
if (!cpu_matches (cpu_vuln_whitelist , NO_ITLB_MULTIHIT ) &&
1326
- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO ))
1326
+ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO ))
1327
1327
setup_force_cpu_bug (X86_BUG_ITLB_MULTIHIT );
1328
1328
1329
1329
if (cpu_matches (cpu_vuln_whitelist , NO_SPECULATION ))
@@ -1335,7 +1335,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1335
1335
setup_force_cpu_bug (X86_BUG_SPECTRE_V2 );
1336
1336
1337
1337
if (!cpu_matches (cpu_vuln_whitelist , NO_SSB ) &&
1338
- !(ia32_cap & ARCH_CAP_SSB_NO ) &&
1338
+ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO ) &&
1339
1339
!cpu_has (c , X86_FEATURE_AMD_SSB_NO ))
1340
1340
setup_force_cpu_bug (X86_BUG_SPEC_STORE_BYPASS );
1341
1341
@@ -1346,17 +1346,17 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1346
1346
* Don't use AutoIBRS when SNP is enabled because it degrades host
1347
1347
* userspace indirect branch performance.
1348
1348
*/
1349
- if ((ia32_cap & ARCH_CAP_IBRS_ALL ) ||
1349
+ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL ) ||
1350
1350
(cpu_has (c , X86_FEATURE_AUTOIBRS ) &&
1351
1351
!cpu_feature_enabled (X86_FEATURE_SEV_SNP ))) {
1352
1352
setup_force_cpu_cap (X86_FEATURE_IBRS_ENHANCED );
1353
1353
if (!cpu_matches (cpu_vuln_whitelist , NO_EIBRS_PBRSB ) &&
1354
- !(ia32_cap & ARCH_CAP_PBRSB_NO ))
1354
+ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO ))
1355
1355
setup_force_cpu_bug (X86_BUG_EIBRS_PBRSB );
1356
1356
}
1357
1357
1358
1358
if (!cpu_matches (cpu_vuln_whitelist , NO_MDS ) &&
1359
- !(ia32_cap & ARCH_CAP_MDS_NO )) {
1359
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO )) {
1360
1360
setup_force_cpu_bug (X86_BUG_MDS );
1361
1361
if (cpu_matches (cpu_vuln_whitelist , MSBDS_ONLY ))
1362
1362
setup_force_cpu_bug (X86_BUG_MSBDS_ONLY );
@@ -1375,9 +1375,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1375
1375
* TSX_CTRL check alone is not sufficient for cases when the microcode
1376
1376
* update is not present or running as guest that don't get TSX_CTRL.
1377
1377
*/
1378
- if (!(ia32_cap & ARCH_CAP_TAA_NO ) &&
1378
+ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO ) &&
1379
1379
(cpu_has (c , X86_FEATURE_RTM ) ||
1380
- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR )))
1380
+ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR )))
1381
1381
setup_force_cpu_bug (X86_BUG_TAA );
1382
1382
1383
1383
/*
@@ -1403,15 +1403,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1403
1403
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
1404
1404
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
1405
1405
*/
1406
- if (!arch_cap_mmio_immune (ia32_cap )) {
1406
+ if (!arch_cap_mmio_immune (x86_arch_cap_msr )) {
1407
1407
if (cpu_matches (cpu_vuln_blacklist , MMIO ))
1408
1408
setup_force_cpu_bug (X86_BUG_MMIO_STALE_DATA );
1409
1409
else if (!cpu_matches (cpu_vuln_whitelist , NO_MMIO ))
1410
1410
setup_force_cpu_bug (X86_BUG_MMIO_UNKNOWN );
1411
1411
}
1412
1412
1413
1413
if (!cpu_has (c , X86_FEATURE_BTC_NO )) {
1414
- if (cpu_matches (cpu_vuln_blacklist , RETBLEED ) || (ia32_cap & ARCH_CAP_RSBA ))
1414
+ if (cpu_matches (cpu_vuln_blacklist , RETBLEED ) || (x86_arch_cap_msr & ARCH_CAP_RSBA ))
1415
1415
setup_force_cpu_bug (X86_BUG_RETBLEED );
1416
1416
}
1417
1417
@@ -1429,15 +1429,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1429
1429
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
1430
1430
* which means that AVX will be disabled.
1431
1431
*/
1432
- if (cpu_matches (cpu_vuln_blacklist , GDS ) && !(ia32_cap & ARCH_CAP_GDS_NO ) &&
1432
+ if (cpu_matches (cpu_vuln_blacklist , GDS ) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO ) &&
1433
1433
boot_cpu_has (X86_FEATURE_AVX ))
1434
1434
setup_force_cpu_bug (X86_BUG_GDS );
1435
1435
1436
- if (vulnerable_to_rfds (ia32_cap ))
1436
+ if (vulnerable_to_rfds (x86_arch_cap_msr ))
1437
1437
setup_force_cpu_bug (X86_BUG_RFDS );
1438
1438
1439
1439
/* When virtualized, eIBRS could be hidden, assume vulnerable */
1440
- if (!(ia32_cap & ARCH_CAP_BHI_NO ) &&
1440
+ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO ) &&
1441
1441
!cpu_matches (cpu_vuln_whitelist , NO_BHI ) &&
1442
1442
(boot_cpu_has (X86_FEATURE_IBRS_ENHANCED ) ||
1443
1443
boot_cpu_has (X86_FEATURE_HYPERVISOR )))
@@ -1447,7 +1447,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1447
1447
return ;
1448
1448
1449
1449
/* Rogue Data Cache Load? No! */
1450
- if (ia32_cap & ARCH_CAP_RDCL_NO )
1450
+ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO )
1451
1451
return ;
1452
1452
1453
1453
setup_force_cpu_bug (X86_BUG_CPU_MELTDOWN );
0 commit comments