|
34 | 34 |
|
35 | 35 | #include "cpu.h"
|
36 | 36 |
|
| 37 | +static void __init spectre_v1_select_mitigation(void); |
37 | 38 | static void __init spectre_v2_select_mitigation(void);
|
38 | 39 | static void __init ssb_select_mitigation(void);
|
39 | 40 | static void __init l1tf_select_mitigation(void);
|
@@ -98,17 +99,11 @@ void __init check_bugs(void)
|
98 | 99 | if (boot_cpu_has(X86_FEATURE_STIBP))
|
99 | 100 | x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
100 | 101 |
|
101 |
| - /* Select the proper spectre mitigation before patching alternatives */ |
| 102 | + /* Select the proper CPU mitigations before patching alternatives: */ |
| 103 | + spectre_v1_select_mitigation(); |
102 | 104 | spectre_v2_select_mitigation();
|
103 |
| - |
104 |
| - /* |
105 |
| - * Select proper mitigation for any exposure to the Speculative Store |
106 |
| - * Bypass vulnerability. |
107 |
| - */ |
108 | 105 | ssb_select_mitigation();
|
109 |
| - |
110 | 106 | l1tf_select_mitigation();
|
111 |
| - |
112 | 107 | mds_select_mitigation();
|
113 | 108 |
|
114 | 109 | arch_smt_update();
|
@@ -273,6 +268,108 @@ static int __init mds_cmdline(char *str)
|
273 | 268 | }
|
274 | 269 | early_param("mds", mds_cmdline);
|
275 | 270 |
|
| 271 | +#undef pr_fmt |
| 272 | +#define pr_fmt(fmt) "Spectre V1 : " fmt |
| 273 | + |
| 274 | +enum spectre_v1_mitigation { |
| 275 | + SPECTRE_V1_MITIGATION_NONE, |
| 276 | + SPECTRE_V1_MITIGATION_AUTO, |
| 277 | +}; |
| 278 | + |
| 279 | +static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = |
| 280 | + SPECTRE_V1_MITIGATION_AUTO; |
| 281 | + |
| 282 | +static const char * const spectre_v1_strings[] = { |
| 283 | + [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", |
| 284 | + [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", |
| 285 | +}; |
| 286 | + |
| 287 | +static bool is_swapgs_serializing(void) |
| 288 | +{ |
| 289 | + /* |
| 290 | + * Technically, swapgs isn't serializing on AMD (despite it previously |
| 291 | + * being documented as such in the APM). But according to AMD, %gs is |
| 292 | + * updated non-speculatively, and the issuing of %gs-relative memory |
| 293 | + * operands will be blocked until the %gs update completes, which is |
| 294 | + * good enough for our purposes. |
| 295 | + */ |
| 296 | + return boot_cpu_data.x86_vendor == X86_VENDOR_AMD; |
| 297 | +} |
| 298 | + |
| 299 | +/* |
| 300 | + * Does SMAP provide full mitigation against speculative kernel access to |
| 301 | + * userspace? |
| 302 | + */ |
| 303 | +static bool smap_works_speculatively(void) |
| 304 | +{ |
| 305 | + if (!boot_cpu_has(X86_FEATURE_SMAP)) |
| 306 | + return false; |
| 307 | + |
| 308 | + /* |
| 309 | + * On CPUs which are vulnerable to Meltdown, SMAP does not |
| 310 | + * prevent speculative access to user data in the L1 cache. |
| 311 | + * Consider SMAP to be non-functional as a mitigation on these |
| 312 | + * CPUs. |
| 313 | + */ |
| 314 | + if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) |
| 315 | + return false; |
| 316 | + |
| 317 | + return true; |
| 318 | +} |
| 319 | + |
| 320 | +static void __init spectre_v1_select_mitigation(void) |
| 321 | +{ |
| 322 | + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) { |
| 323 | + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
| 324 | + return; |
| 325 | + } |
| 326 | + |
| 327 | + if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { |
| 328 | + /* |
| 329 | + * With Spectre v1, a user can speculatively control either |
| 330 | + * path of a conditional swapgs with a user-controlled GS |
| 331 | + * value. The mitigation is to add lfences to both code paths. |
| 332 | + * |
| 333 | + * If FSGSBASE is enabled, the user can put a kernel address in |
| 334 | + * GS, in which case SMAP provides no protection. |
| 335 | + * |
| 336 | + * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the |
| 337 | + * FSGSBASE enablement patches have been merged. ] |
| 338 | + * |
| 339 | + * If FSGSBASE is disabled, the user can only put a user space |
| 340 | + * address in GS. That makes an attack harder, but still |
| 341 | + * possible if there's no SMAP protection. |
| 342 | + */ |
| 343 | + if (!smap_works_speculatively()) { |
| 344 | + /* |
| 345 | + * Mitigation can be provided from SWAPGS itself or |
| 346 | + * PTI as the CR3 write in the Meltdown mitigation |
| 347 | + * is serializing. |
| 348 | + * |
| 349 | + * If neither is there, mitigate with an LFENCE. |
| 350 | + */ |
| 351 | + if (!is_swapgs_serializing() && !boot_cpu_has(X86_FEATURE_PTI)) |
| 352 | + setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); |
| 353 | + |
| 354 | + /* |
| 355 | + * Enable lfences in the kernel entry (non-swapgs) |
| 356 | + * paths, to prevent user entry from speculatively |
| 357 | + * skipping swapgs. |
| 358 | + */ |
| 359 | + setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); |
| 360 | + } |
| 361 | + } |
| 362 | + |
| 363 | + pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); |
| 364 | +} |
| 365 | + |
| 366 | +static int __init nospectre_v1_cmdline(char *str) |
| 367 | +{ |
| 368 | + spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
| 369 | + return 0; |
| 370 | +} |
| 371 | +early_param("nospectre_v1", nospectre_v1_cmdline); |
| 372 | + |
276 | 373 | #undef pr_fmt
|
277 | 374 | #define pr_fmt(fmt) "Spectre V2 : " fmt
|
278 | 375 |
|
@@ -1290,7 +1387,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
1290 | 1387 | break;
|
1291 | 1388 |
|
1292 | 1389 | case X86_BUG_SPECTRE_V1:
|
1293 |
| - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
| 1390 | + return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); |
1294 | 1391 |
|
1295 | 1392 | case X86_BUG_SPECTRE_V2:
|
1296 | 1393 | return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
|
0 commit comments