Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 1994 Linus Torvalds
4 : *
5 : * Cyrix stuff, June 1998 by:
6 : * - Rafael R. Reilova (moved everything from head.S),
7 : * <rreilova@ececs.uc.edu>
8 : * - Channing Corn (tests & fixes),
9 : * - Andrew D. Balsa (code cleanup).
10 : */
11 : #include <linux/init.h>
12 : #include <linux/utsname.h>
13 : #include <linux/cpu.h>
14 : #include <linux/module.h>
15 : #include <linux/nospec.h>
16 : #include <linux/prctl.h>
17 : #include <linux/sched/smt.h>
18 : #include <linux/pgtable.h>
19 :
20 : #include <asm/spec-ctrl.h>
21 : #include <asm/cmdline.h>
22 : #include <asm/bugs.h>
23 : #include <asm/processor.h>
24 : #include <asm/processor-flags.h>
25 : #include <asm/fpu/internal.h>
26 : #include <asm/msr.h>
27 : #include <asm/vmx.h>
28 : #include <asm/paravirt.h>
29 : #include <asm/alternative.h>
30 : #include <asm/set_memory.h>
31 : #include <asm/intel-family.h>
32 : #include <asm/e820/api.h>
33 : #include <asm/hypervisor.h>
34 : #include <asm/tlbflush.h>
35 :
36 : #include "cpu.h"
37 :
38 : static void __init spectre_v1_select_mitigation(void);
39 : static void __init spectre_v2_select_mitigation(void);
40 : static void __init ssb_select_mitigation(void);
41 : static void __init l1tf_select_mitigation(void);
42 : static void __init mds_select_mitigation(void);
43 : static void __init mds_print_mitigation(void);
44 : static void __init taa_select_mitigation(void);
45 : static void __init srbds_select_mitigation(void);
46 :
47 : /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
48 : u64 x86_spec_ctrl_base;
49 : EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
50 : static DEFINE_MUTEX(spec_ctrl_mutex);
51 :
52 : /*
53 : * The vendor and possibly platform specific bits which can be modified in
54 : * x86_spec_ctrl_base.
55 : */
56 : static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
57 :
58 : /*
59 : * AMD specific MSR info for Speculative Store Bypass control.
60 : * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
61 : */
62 : u64 __ro_after_init x86_amd_ls_cfg_base;
63 : u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
64 :
65 : /* Control conditional STIBP in switch_to() */
66 : DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
67 : /* Control conditional IBPB in switch_mm() */
68 : DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
69 : /* Control unconditional IBPB in switch_mm() */
70 : DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
71 :
72 : /* Control MDS CPU buffer clear before returning to user space */
73 : DEFINE_STATIC_KEY_FALSE(mds_user_clear);
74 : EXPORT_SYMBOL_GPL(mds_user_clear);
75 : /* Control MDS CPU buffer clear before idling (halt, mwait) */
76 : DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
77 : EXPORT_SYMBOL_GPL(mds_idle_clear);
78 :
79 1 : void __init check_bugs(void)
80 : {
81 1 : identify_boot_cpu();
82 :
83 : /*
84 : * identify_boot_cpu() initialized SMT support information, let the
85 : * core code know.
86 : */
87 1 : cpu_smt_check_topology();
88 :
89 1 : if (!IS_ENABLED(CONFIG_SMP)) {
90 : pr_info("CPU: ");
91 : print_cpu_info(&boot_cpu_data);
92 : }
93 :
94 : /*
95 : * Read the SPEC_CTRL MSR to account for reserved bits which may
96 : * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
97 : * init code as it is not enumerated and depends on the family.
98 : */
99 1 : if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
100 1 : rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
101 :
102 : /* Allow STIBP in MSR_SPEC_CTRL if supported */
103 1 : if (boot_cpu_has(X86_FEATURE_STIBP))
104 1 : x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
105 :
106 : /* Select the proper CPU mitigations before patching alternatives: */
107 1 : spectre_v1_select_mitigation();
108 1 : spectre_v2_select_mitigation();
109 1 : ssb_select_mitigation();
110 1 : l1tf_select_mitigation();
111 1 : mds_select_mitigation();
112 1 : taa_select_mitigation();
113 1 : srbds_select_mitigation();
114 :
115 : /*
116 : * As MDS and TAA mitigations are inter-related, print MDS
117 : * mitigation until after TAA mitigation selection is done.
118 : */
119 1 : mds_print_mitigation();
120 :
121 1 : arch_smt_update();
122 :
123 : #ifdef CONFIG_X86_32
124 : /*
125 : * Check whether we are able to run this kernel safely on SMP.
126 : *
127 : * - i386 is no longer supported.
128 : * - In order to run on anything without a TSC, we need to be
129 : * compiled for a i486.
130 : */
131 : if (boot_cpu_data.x86 < 4)
132 : panic("Kernel requires i486+ for 'invlpg' and other features");
133 :
134 : init_utsname()->machine[1] =
135 : '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
136 : alternative_instructions();
137 :
138 : fpu__init_check_bugs();
139 : #else /* CONFIG_X86_64 */
140 1 : alternative_instructions();
141 :
142 : /*
143 : * Make sure the first 2MB area is not mapped by huge pages
144 : * There are typically fixed size MTRRs in there and overlapping
145 : * MTRRs into large pages causes slow downs.
146 : *
147 : * Right now we don't do that with gbpages because there seems
148 : * very little benefit for that case.
149 : */
150 1 : if (!direct_gbpages)
151 0 : set_memory_4k((unsigned long)__va(0), 1);
152 : #endif
153 1 : }
154 :
155 : void
156 0 : x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
157 : {
158 0 : u64 msrval, guestval, hostval = x86_spec_ctrl_base;
159 0 : struct thread_info *ti = current_thread_info();
160 :
161 : /* Is MSR_SPEC_CTRL implemented ? */
162 0 : if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
163 : /*
164 : * Restrict guest_spec_ctrl to supported values. Clear the
165 : * modifiable bits in the host base value and or the
166 : * modifiable bits from the guest value.
167 : */
168 0 : guestval = hostval & ~x86_spec_ctrl_mask;
169 0 : guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
170 :
171 : /* SSBD controlled in MSR_SPEC_CTRL */
172 0 : if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
173 0 : static_cpu_has(X86_FEATURE_AMD_SSBD))
174 0 : hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
175 :
176 : /* Conditional STIBP enabled? */
177 0 : if (static_branch_unlikely(&switch_to_cond_stibp))
178 0 : hostval |= stibp_tif_to_spec_ctrl(ti->flags);
179 :
180 0 : if (hostval != guestval) {
181 0 : msrval = setguest ? guestval : hostval;
182 0 : wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
183 : }
184 : }
185 :
186 : /*
187 : * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
188 : * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
189 : */
190 0 : if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
191 0 : !static_cpu_has(X86_FEATURE_VIRT_SSBD))
192 : return;
193 :
194 : /*
195 : * If the host has SSBD mitigation enabled, force it in the host's
196 : * virtual MSR value. If its not permanently enabled, evaluate
197 : * current's TIF_SSBD thread flag.
198 : */
199 0 : if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
200 : hostval = SPEC_CTRL_SSBD;
201 : else
202 0 : hostval = ssbd_tif_to_spec_ctrl(ti->flags);
203 :
204 : /* Sanitize the guest value */
205 0 : guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
206 :
207 0 : if (hostval != guestval) {
208 0 : unsigned long tif;
209 :
210 0 : tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
211 0 : ssbd_spec_ctrl_to_tif(hostval);
212 :
213 0 : speculation_ctrl_update(tif);
214 : }
215 : }
216 : EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
217 :
218 0 : static void x86_amd_ssb_disable(void)
219 : {
220 0 : u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
221 :
222 0 : if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
223 0 : wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
224 0 : else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
225 0 : wrmsrl(MSR_AMD64_LS_CFG, msrval);
226 0 : }
227 :
228 : #undef pr_fmt
229 : #define pr_fmt(fmt) "MDS: " fmt
230 :
231 : /* Default mitigation for MDS-affected CPUs */
232 : static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
233 : static bool mds_nosmt __ro_after_init = false;
234 :
235 : static const char * const mds_strings[] = {
236 : [MDS_MITIGATION_OFF] = "Vulnerable",
237 : [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
238 : [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
239 : };
240 :
241 1 : static void __init mds_select_mitigation(void)
242 : {
243 1 : if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
244 0 : mds_mitigation = MDS_MITIGATION_OFF;
245 0 : return;
246 : }
247 :
248 1 : if (mds_mitigation == MDS_MITIGATION_FULL) {
249 1 : if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
250 1 : mds_mitigation = MDS_MITIGATION_VMWERV;
251 :
252 1 : static_branch_enable(&mds_user_clear);
253 :
254 1 : if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
255 1 : (mds_nosmt || cpu_mitigations_auto_nosmt()))
256 0 : cpu_smt_disable(false);
257 : }
258 : }
259 :
260 1 : static void __init mds_print_mitigation(void)
261 : {
262 1 : if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
263 0 : return;
264 :
265 1 : pr_info("%s\n", mds_strings[mds_mitigation]);
266 : }
267 :
268 0 : static int __init mds_cmdline(char *str)
269 : {
270 0 : if (!boot_cpu_has_bug(X86_BUG_MDS))
271 : return 0;
272 :
273 0 : if (!str)
274 : return -EINVAL;
275 :
276 0 : if (!strcmp(str, "off"))
277 0 : mds_mitigation = MDS_MITIGATION_OFF;
278 0 : else if (!strcmp(str, "full"))
279 0 : mds_mitigation = MDS_MITIGATION_FULL;
280 0 : else if (!strcmp(str, "full,nosmt")) {
281 0 : mds_mitigation = MDS_MITIGATION_FULL;
282 0 : mds_nosmt = true;
283 : }
284 :
285 : return 0;
286 : }
287 : early_param("mds", mds_cmdline);
288 :
289 : #undef pr_fmt
290 : #define pr_fmt(fmt) "TAA: " fmt
291 :
292 : enum taa_mitigations {
293 : TAA_MITIGATION_OFF,
294 : TAA_MITIGATION_UCODE_NEEDED,
295 : TAA_MITIGATION_VERW,
296 : TAA_MITIGATION_TSX_DISABLED,
297 : };
298 :
299 : /* Default mitigation for TAA-affected CPUs */
300 : static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
301 : static bool taa_nosmt __ro_after_init;
302 :
303 : static const char * const taa_strings[] = {
304 : [TAA_MITIGATION_OFF] = "Vulnerable",
305 : [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
306 : [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
307 : [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
308 : };
309 :
310 1 : static void __init taa_select_mitigation(void)
311 : {
312 1 : u64 ia32_cap;
313 :
314 1 : if (!boot_cpu_has_bug(X86_BUG_TAA)) {
315 1 : taa_mitigation = TAA_MITIGATION_OFF;
316 1 : return;
317 : }
318 :
319 : /* TSX previously disabled by tsx=off */
320 0 : if (!boot_cpu_has(X86_FEATURE_RTM)) {
321 0 : taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
322 0 : goto out;
323 : }
324 :
325 0 : if (cpu_mitigations_off()) {
326 0 : taa_mitigation = TAA_MITIGATION_OFF;
327 0 : return;
328 : }
329 :
330 : /*
331 : * TAA mitigation via VERW is turned off if both
332 : * tsx_async_abort=off and mds=off are specified.
333 : */
334 0 : if (taa_mitigation == TAA_MITIGATION_OFF &&
335 0 : mds_mitigation == MDS_MITIGATION_OFF)
336 0 : goto out;
337 :
338 0 : if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
339 0 : taa_mitigation = TAA_MITIGATION_VERW;
340 : else
341 0 : taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
342 :
343 : /*
344 : * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
345 : * A microcode update fixes this behavior to clear CPU buffers. It also
346 : * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
347 : * ARCH_CAP_TSX_CTRL_MSR bit.
348 : *
349 : * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
350 : * update is required.
351 : */
352 0 : ia32_cap = x86_read_arch_cap_msr();
353 0 : if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
354 : !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
355 0 : taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
356 :
357 : /*
358 : * TSX is enabled, select alternate mitigation for TAA which is
359 : * the same as MDS. Enable MDS static branch to clear CPU buffers.
360 : *
361 : * For guests that can't determine whether the correct microcode is
362 : * present on host, enable the mitigation for UCODE_NEEDED as well.
363 : */
364 0 : static_branch_enable(&mds_user_clear);
365 :
366 0 : if (taa_nosmt || cpu_mitigations_auto_nosmt())
367 0 : cpu_smt_disable(false);
368 :
369 : /*
370 : * Update MDS mitigation, if necessary, as the mds_user_clear is
371 : * now enabled for TAA mitigation.
372 : */
373 0 : if (mds_mitigation == MDS_MITIGATION_OFF &&
374 0 : boot_cpu_has_bug(X86_BUG_MDS)) {
375 0 : mds_mitigation = MDS_MITIGATION_FULL;
376 0 : mds_select_mitigation();
377 : }
378 0 : out:
379 0 : pr_info("%s\n", taa_strings[taa_mitigation]);
380 : }
381 :
382 0 : static int __init tsx_async_abort_parse_cmdline(char *str)
383 : {
384 0 : if (!boot_cpu_has_bug(X86_BUG_TAA))
385 : return 0;
386 :
387 0 : if (!str)
388 : return -EINVAL;
389 :
390 0 : if (!strcmp(str, "off")) {
391 0 : taa_mitigation = TAA_MITIGATION_OFF;
392 0 : } else if (!strcmp(str, "full")) {
393 0 : taa_mitigation = TAA_MITIGATION_VERW;
394 0 : } else if (!strcmp(str, "full,nosmt")) {
395 0 : taa_mitigation = TAA_MITIGATION_VERW;
396 0 : taa_nosmt = true;
397 : }
398 :
399 : return 0;
400 : }
401 : early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
402 :
403 : #undef pr_fmt
404 : #define pr_fmt(fmt) "SRBDS: " fmt
405 :
406 : enum srbds_mitigations {
407 : SRBDS_MITIGATION_OFF,
408 : SRBDS_MITIGATION_UCODE_NEEDED,
409 : SRBDS_MITIGATION_FULL,
410 : SRBDS_MITIGATION_TSX_OFF,
411 : SRBDS_MITIGATION_HYPERVISOR,
412 : };
413 :
414 : static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
415 :
416 : static const char * const srbds_strings[] = {
417 : [SRBDS_MITIGATION_OFF] = "Vulnerable",
418 : [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
419 : [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
420 : [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
421 : [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
422 : };
423 :
424 : static bool srbds_off;
425 :
426 4 : void update_srbds_msr(void)
427 : {
428 4 : u64 mcu_ctrl;
429 :
430 4 : if (!boot_cpu_has_bug(X86_BUG_SRBDS))
431 : return;
432 :
433 4 : if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
434 : return;
435 :
436 0 : if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
437 : return;
438 :
439 0 : rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
440 :
441 0 : switch (srbds_mitigation) {
442 0 : case SRBDS_MITIGATION_OFF:
443 : case SRBDS_MITIGATION_TSX_OFF:
444 0 : mcu_ctrl |= RNGDS_MITG_DIS;
445 0 : break;
446 0 : case SRBDS_MITIGATION_FULL:
447 0 : mcu_ctrl &= ~RNGDS_MITG_DIS;
448 0 : break;
449 : default:
450 : break;
451 : }
452 :
453 0 : wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
454 : }
455 :
456 1 : static void __init srbds_select_mitigation(void)
457 : {
458 1 : u64 ia32_cap;
459 :
460 1 : if (!boot_cpu_has_bug(X86_BUG_SRBDS))
461 : return;
462 :
463 : /*
464 : * Check to see if this is one of the MDS_NO systems supporting
465 : * TSX that are only exposed to SRBDS when TSX is enabled.
466 : */
467 1 : ia32_cap = x86_read_arch_cap_msr();
468 1 : if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
469 0 : srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
470 1 : else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
471 1 : srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
472 0 : else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
473 0 : srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
474 0 : else if (cpu_mitigations_off() || srbds_off)
475 0 : srbds_mitigation = SRBDS_MITIGATION_OFF;
476 :
477 1 : update_srbds_msr();
478 1 : pr_info("%s\n", srbds_strings[srbds_mitigation]);
479 : }
480 :
481 0 : static int __init srbds_parse_cmdline(char *str)
482 : {
483 0 : if (!str)
484 : return -EINVAL;
485 :
486 0 : if (!boot_cpu_has_bug(X86_BUG_SRBDS))
487 : return 0;
488 :
489 0 : srbds_off = !strcmp(str, "off");
490 0 : return 0;
491 : }
492 : early_param("srbds", srbds_parse_cmdline);
493 :
494 : #undef pr_fmt
495 : #define pr_fmt(fmt) "Spectre V1 : " fmt
496 :
497 : enum spectre_v1_mitigation {
498 : SPECTRE_V1_MITIGATION_NONE,
499 : SPECTRE_V1_MITIGATION_AUTO,
500 : };
501 :
502 : static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
503 : SPECTRE_V1_MITIGATION_AUTO;
504 :
505 : static const char * const spectre_v1_strings[] = {
506 : [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
507 : [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
508 : };
509 :
510 : /*
511 : * Does SMAP provide full mitigation against speculative kernel access to
512 : * userspace?
513 : */
514 0 : static bool smap_works_speculatively(void)
515 : {
516 0 : if (!boot_cpu_has(X86_FEATURE_SMAP))
517 : return false;
518 :
519 : /*
520 : * On CPUs which are vulnerable to Meltdown, SMAP does not
521 : * prevent speculative access to user data in the L1 cache.
522 : * Consider SMAP to be non-functional as a mitigation on these
523 : * CPUs.
524 : */
525 0 : if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
526 0 : return false;
527 :
528 : return true;
529 : }
530 :
531 1 : static void __init spectre_v1_select_mitigation(void)
532 : {
533 1 : if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
534 0 : spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
535 0 : return;
536 : }
537 :
538 1 : if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
539 : /*
540 : * With Spectre v1, a user can speculatively control either
541 : * path of a conditional swapgs with a user-controlled GS
542 : * value. The mitigation is to add lfences to both code paths.
543 : *
544 : * If FSGSBASE is enabled, the user can put a kernel address in
545 : * GS, in which case SMAP provides no protection.
546 : *
547 : * If FSGSBASE is disabled, the user can only put a user space
548 : * address in GS. That makes an attack harder, but still
549 : * possible if there's no SMAP protection.
550 : */
551 1 : if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
552 0 : !smap_works_speculatively()) {
553 : /*
554 : * Mitigation can be provided from SWAPGS itself or
555 : * PTI as the CR3 write in the Meltdown mitigation
556 : * is serializing.
557 : *
558 : * If neither is there, mitigate with an LFENCE to
559 : * stop speculation through swapgs.
560 : */
561 1 : if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
562 1 : !boot_cpu_has(X86_FEATURE_PTI))
563 1 : setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
564 :
565 : /*
566 : * Enable lfences in the kernel entry (non-swapgs)
567 : * paths, to prevent user entry from speculatively
568 : * skipping swapgs.
569 : */
570 1 : setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
571 : }
572 : }
573 :
574 1 : pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
575 : }
576 :
577 0 : static int __init nospectre_v1_cmdline(char *str)
578 : {
579 0 : spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
580 0 : return 0;
581 : }
582 : early_param("nospectre_v1", nospectre_v1_cmdline);
583 :
584 : #undef pr_fmt
585 : #define pr_fmt(fmt) "Spectre V2 : " fmt
586 :
587 : static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
588 : SPECTRE_V2_NONE;
589 :
590 : static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
591 : SPECTRE_V2_USER_NONE;
592 : static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
593 : SPECTRE_V2_USER_NONE;
594 :
595 : #ifdef CONFIG_RETPOLINE
596 : static bool spectre_v2_bad_module;
597 :
598 : bool retpoline_module_ok(bool has_retpoline)
599 : {
600 : if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
601 : return true;
602 :
603 : pr_err("System may be vulnerable to spectre v2\n");
604 : spectre_v2_bad_module = true;
605 : return false;
606 : }
607 :
608 : static inline const char *spectre_v2_module_string(void)
609 : {
610 : return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
611 : }
612 : #else
613 0 : static inline const char *spectre_v2_module_string(void) { return ""; }
614 : #endif
615 :
616 0 : static inline bool match_option(const char *arg, int arglen, const char *opt)
617 : {
618 0 : int len = strlen(opt);
619 :
620 0 : return len == arglen && !strncmp(arg, opt, len);
621 : }
622 :
623 : /* The kernel command line selection for spectre v2 */
624 : enum spectre_v2_mitigation_cmd {
625 : SPECTRE_V2_CMD_NONE,
626 : SPECTRE_V2_CMD_AUTO,
627 : SPECTRE_V2_CMD_FORCE,
628 : SPECTRE_V2_CMD_RETPOLINE,
629 : SPECTRE_V2_CMD_RETPOLINE_GENERIC,
630 : SPECTRE_V2_CMD_RETPOLINE_AMD,
631 : };
632 :
633 : enum spectre_v2_user_cmd {
634 : SPECTRE_V2_USER_CMD_NONE,
635 : SPECTRE_V2_USER_CMD_AUTO,
636 : SPECTRE_V2_USER_CMD_FORCE,
637 : SPECTRE_V2_USER_CMD_PRCTL,
638 : SPECTRE_V2_USER_CMD_PRCTL_IBPB,
639 : SPECTRE_V2_USER_CMD_SECCOMP,
640 : SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
641 : };
642 :
643 : static const char * const spectre_v2_user_strings[] = {
644 : [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
645 : [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
646 : [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
647 : [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
648 : [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
649 : };
650 :
651 : static const struct {
652 : const char *option;
653 : enum spectre_v2_user_cmd cmd;
654 : bool secure;
655 : } v2_user_options[] __initconst = {
656 : { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
657 : { "off", SPECTRE_V2_USER_CMD_NONE, false },
658 : { "on", SPECTRE_V2_USER_CMD_FORCE, true },
659 : { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
660 : { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
661 : { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
662 : { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
663 : };
664 :
665 0 : static void __init spec_v2_user_print_cond(const char *reason, bool secure)
666 : {
667 0 : if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
668 0 : pr_info("spectre_v2_user=%s forced on command line.\n", reason);
669 0 : }
670 :
671 : static enum spectre_v2_user_cmd __init
672 0 : spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
673 : {
674 0 : char arg[20];
675 0 : int ret, i;
676 :
677 0 : switch (v2_cmd) {
678 : case SPECTRE_V2_CMD_NONE:
679 : return SPECTRE_V2_USER_CMD_NONE;
680 0 : case SPECTRE_V2_CMD_FORCE:
681 0 : return SPECTRE_V2_USER_CMD_FORCE;
682 : default:
683 0 : break;
684 : }
685 :
686 0 : ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
687 : arg, sizeof(arg));
688 0 : if (ret < 0)
689 : return SPECTRE_V2_USER_CMD_AUTO;
690 :
691 0 : for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
692 0 : if (match_option(arg, ret, v2_user_options[i].option)) {
693 0 : spec_v2_user_print_cond(v2_user_options[i].option,
694 0 : v2_user_options[i].secure);
695 0 : return v2_user_options[i].cmd;
696 : }
697 : }
698 :
699 0 : pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
700 0 : return SPECTRE_V2_USER_CMD_AUTO;
701 : }
702 :
703 : static void __init
704 0 : spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
705 : {
706 0 : enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
707 0 : bool smt_possible = IS_ENABLED(CONFIG_SMP);
708 0 : enum spectre_v2_user_cmd cmd;
709 :
710 0 : if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
711 : return;
712 :
713 0 : if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
714 : cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
715 0 : smt_possible = false;
716 :
717 0 : cmd = spectre_v2_parse_user_cmdline(v2_cmd);
718 0 : switch (cmd) {
719 0 : case SPECTRE_V2_USER_CMD_NONE:
720 0 : goto set_mode;
721 0 : case SPECTRE_V2_USER_CMD_FORCE:
722 0 : mode = SPECTRE_V2_USER_STRICT;
723 0 : break;
724 0 : case SPECTRE_V2_USER_CMD_PRCTL:
725 : case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
726 0 : mode = SPECTRE_V2_USER_PRCTL;
727 0 : break;
728 : case SPECTRE_V2_USER_CMD_AUTO:
729 : case SPECTRE_V2_USER_CMD_SECCOMP:
730 : case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
731 0 : if (IS_ENABLED(CONFIG_SECCOMP))
732 : mode = SPECTRE_V2_USER_SECCOMP;
733 : else
734 0 : mode = SPECTRE_V2_USER_PRCTL;
735 0 : break;
736 : }
737 :
738 : /* Initialize Indirect Branch Prediction Barrier */
739 0 : if (boot_cpu_has(X86_FEATURE_IBPB)) {
740 0 : setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
741 :
742 0 : spectre_v2_user_ibpb = mode;
743 0 : switch (cmd) {
744 0 : case SPECTRE_V2_USER_CMD_FORCE:
745 : case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
746 : case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
747 0 : static_branch_enable(&switch_mm_always_ibpb);
748 0 : spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
749 0 : break;
750 0 : case SPECTRE_V2_USER_CMD_PRCTL:
751 : case SPECTRE_V2_USER_CMD_AUTO:
752 : case SPECTRE_V2_USER_CMD_SECCOMP:
753 0 : static_branch_enable(&switch_mm_cond_ibpb);
754 0 : break;
755 : default:
756 : break;
757 : }
758 :
759 0 : pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
760 : static_key_enabled(&switch_mm_always_ibpb) ?
761 : "always-on" : "conditional");
762 : }
763 :
764 : /*
765 : * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
766 : * required.
767 : */
768 0 : if (!boot_cpu_has(X86_FEATURE_STIBP) ||
769 0 : !smt_possible ||
770 0 : spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
771 : return;
772 :
773 : /*
774 : * At this point, an STIBP mode other than "off" has been set.
775 : * If STIBP support is not being forced, check if STIBP always-on
776 : * is preferred.
777 : */
778 0 : if (mode != SPECTRE_V2_USER_STRICT &&
779 0 : boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
780 0 : mode = SPECTRE_V2_USER_STRICT_PREFERRED;
781 :
782 0 : spectre_v2_user_stibp = mode;
783 :
784 0 : set_mode:
785 0 : pr_info("%s\n", spectre_v2_user_strings[mode]);
786 : }
787 :
788 : static const char * const spectre_v2_strings[] = {
789 : [SPECTRE_V2_NONE] = "Vulnerable",
790 : [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
791 : [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
792 : [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
793 : };
794 :
795 : static const struct {
796 : const char *option;
797 : enum spectre_v2_mitigation_cmd cmd;
798 : bool secure;
799 : } mitigation_options[] __initconst = {
800 : { "off", SPECTRE_V2_CMD_NONE, false },
801 : { "on", SPECTRE_V2_CMD_FORCE, true },
802 : { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
803 : { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
804 : { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
805 : { "auto", SPECTRE_V2_CMD_AUTO, false },
806 : };
807 :
808 0 : static void __init spec_v2_print_cond(const char *reason, bool secure)
809 : {
810 0 : if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
811 0 : pr_info("%s selected on command line.\n", reason);
812 0 : }
813 :
814 1 : static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
815 : {
816 1 : enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
817 1 : char arg[20];
818 1 : int ret, i;
819 :
820 2 : if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
821 1 : cpu_mitigations_off())
822 0 : return SPECTRE_V2_CMD_NONE;
823 :
824 1 : ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
825 1 : if (ret < 0)
826 : return SPECTRE_V2_CMD_AUTO;
827 :
828 0 : for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
829 0 : if (!match_option(arg, ret, mitigation_options[i].option))
830 0 : continue;
831 0 : cmd = mitigation_options[i].cmd;
832 0 : break;
833 : }
834 :
835 0 : if (i >= ARRAY_SIZE(mitigation_options)) {
836 0 : pr_err("unknown option (%s). Switching to AUTO select\n", arg);
837 0 : return SPECTRE_V2_CMD_AUTO;
838 : }
839 :
840 0 : if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
841 0 : cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
842 0 : cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
843 : !IS_ENABLED(CONFIG_RETPOLINE)) {
844 0 : pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
845 0 : return SPECTRE_V2_CMD_AUTO;
846 : }
847 :
848 0 : if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
849 0 : boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
850 : boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
851 0 : pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
852 0 : return SPECTRE_V2_CMD_AUTO;
853 : }
854 :
855 0 : spec_v2_print_cond(mitigation_options[i].option,
856 0 : mitigation_options[i].secure);
857 0 : return cmd;
858 : }
859 :
860 1 : static void __init spectre_v2_select_mitigation(void)
861 : {
862 1 : enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
863 1 : enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
864 :
865 : /*
866 : * If the CPU is not affected and the command line mode is NONE or AUTO
867 : * then nothing to do.
868 : */
869 1 : if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
870 : (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
871 : return;
872 :
873 1 : switch (cmd) {
874 : case SPECTRE_V2_CMD_NONE:
875 : return;
876 :
877 : case SPECTRE_V2_CMD_FORCE:
878 : case SPECTRE_V2_CMD_AUTO:
879 1 : if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
880 0 : mode = SPECTRE_V2_IBRS_ENHANCED;
881 : /* Force it so VMEXIT will restore correctly */
882 0 : x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
883 0 : wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
884 0 : goto specv2_set_mode;
885 : }
886 : if (IS_ENABLED(CONFIG_RETPOLINE))
887 : goto retpoline_auto;
888 : break;
889 : case SPECTRE_V2_CMD_RETPOLINE_AMD:
890 : if (IS_ENABLED(CONFIG_RETPOLINE))
891 : goto retpoline_amd;
892 : break;
893 : case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
894 : if (IS_ENABLED(CONFIG_RETPOLINE))
895 : goto retpoline_generic;
896 : break;
897 : case SPECTRE_V2_CMD_RETPOLINE:
898 : if (IS_ENABLED(CONFIG_RETPOLINE))
899 : goto retpoline_auto;
900 : break;
901 : }
902 1 : pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
903 1 : return;
904 :
905 : retpoline_auto:
906 : if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
907 : boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
908 : retpoline_amd:
909 : if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
910 : pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
911 : goto retpoline_generic;
912 : }
913 : mode = SPECTRE_V2_RETPOLINE_AMD;
914 : setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
915 : setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
916 : } else {
917 : retpoline_generic:
918 : mode = SPECTRE_V2_RETPOLINE_GENERIC;
919 0 : setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
920 : }
921 :
922 0 : specv2_set_mode:
923 0 : spectre_v2_enabled = mode;
924 0 : pr_info("%s\n", spectre_v2_strings[mode]);
925 :
926 : /*
927 : * If spectre v2 protection has been enabled, unconditionally fill
928 : * RSB during a context switch; this protects against two independent
929 : * issues:
930 : *
931 : * - RSB underflow (and switch to BTB) on Skylake+
932 : * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
933 : */
934 0 : setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
935 0 : pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
936 :
937 : /*
938 : * Retpoline means the kernel is safe because it has no indirect
939 : * branches. Enhanced IBRS protects firmware too, so, enable restricted
940 : * speculation around firmware calls only when Enhanced IBRS isn't
941 : * supported.
942 : *
943 : * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
944 : * the user might select retpoline on the kernel command line and if
945 : * the CPU supports Enhanced IBRS, kernel might un-intentionally not
946 : * enable IBRS around firmware calls.
947 : */
948 0 : if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
949 : setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
950 : pr_info("Enabling Restricted Speculation for firmware calls\n");
951 : }
952 :
953 : /* Set up IBPB and STIBP depending on the general spectre V2 command */
954 0 : spectre_v2_user_select_mitigation(cmd);
955 : }
956 :
957 0 : static void update_stibp_msr(void * __unused)
958 : {
959 0 : wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
960 0 : }
961 :
962 : /* Update x86_spec_ctrl_base in case SMT state changed. */
963 0 : static void update_stibp_strict(void)
964 : {
965 0 : u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
966 :
967 0 : if (sched_smt_active())
968 0 : mask |= SPEC_CTRL_STIBP;
969 :
970 0 : if (mask == x86_spec_ctrl_base)
971 : return;
972 :
973 0 : pr_info("Update user space SMT mitigation: STIBP %s\n",
974 : mask & SPEC_CTRL_STIBP ? "always-on" : "off");
975 0 : x86_spec_ctrl_base = mask;
976 0 : on_each_cpu(update_stibp_msr, NULL, 1);
977 : }
978 :
979 : /* Update the static key controlling the evaluation of TIF_SPEC_IB */
980 0 : static void update_indir_branch_cond(void)
981 : {
982 0 : if (sched_smt_active())
983 0 : static_branch_enable(&switch_to_cond_stibp);
984 : else
985 0 : static_branch_disable(&switch_to_cond_stibp);
986 0 : }
987 :
988 : #undef pr_fmt
989 : #define pr_fmt(fmt) fmt
990 :
991 : /* Update the static key controlling the MDS CPU buffer clear in idle */
992 4 : static void update_mds_branch_idle(void)
993 : {
994 : /*
995 : * Enable the idle clearing if SMT is active on CPUs which are
996 : * affected only by MSBDS and not any other MDS variant.
997 : *
998 : * The other variants cannot be mitigated when SMT is enabled, so
999 : * clearing the buffers on idle just to prevent the Store Buffer
1000 : * repartitioning leak would be a window dressing exercise.
1001 : */
1002 4 : if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1003 : return;
1004 :
1005 0 : if (sched_smt_active())
1006 0 : static_branch_enable(&mds_idle_clear);
1007 : else
1008 0 : static_branch_disable(&mds_idle_clear);
1009 : }
1010 :
1011 : #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1012 : #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1013 :
1014 4 : void cpu_bugs_smt_update(void)
1015 : {
1016 4 : mutex_lock(&spec_ctrl_mutex);
1017 :
1018 4 : switch (spectre_v2_user_stibp) {
1019 : case SPECTRE_V2_USER_NONE:
1020 : break;
1021 0 : case SPECTRE_V2_USER_STRICT:
1022 : case SPECTRE_V2_USER_STRICT_PREFERRED:
1023 0 : update_stibp_strict();
1024 0 : break;
1025 0 : case SPECTRE_V2_USER_PRCTL:
1026 : case SPECTRE_V2_USER_SECCOMP:
1027 0 : update_indir_branch_cond();
1028 0 : break;
1029 : }
1030 :
1031 4 : switch (mds_mitigation) {
1032 : case MDS_MITIGATION_FULL:
1033 : case MDS_MITIGATION_VMWERV:
1034 4 : if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1035 0 : pr_warn_once(MDS_MSG_SMT);
1036 4 : update_mds_branch_idle();
1037 4 : break;
1038 : case MDS_MITIGATION_OFF:
1039 : break;
1040 : }
1041 :
1042 4 : switch (taa_mitigation) {
1043 : case TAA_MITIGATION_VERW:
1044 : case TAA_MITIGATION_UCODE_NEEDED:
1045 0 : if (sched_smt_active())
1046 0 : pr_warn_once(TAA_MSG_SMT);
1047 : break;
1048 : case TAA_MITIGATION_TSX_DISABLED:
1049 : case TAA_MITIGATION_OFF:
1050 : break;
1051 : }
1052 :
1053 4 : mutex_unlock(&spec_ctrl_mutex);
1054 4 : }
1055 :
1056 : #undef pr_fmt
1057 : #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1058 :
1059 : static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1060 :
1061 : /* The kernel command line selection */
1062 : enum ssb_mitigation_cmd {
1063 : SPEC_STORE_BYPASS_CMD_NONE,
1064 : SPEC_STORE_BYPASS_CMD_AUTO,
1065 : SPEC_STORE_BYPASS_CMD_ON,
1066 : SPEC_STORE_BYPASS_CMD_PRCTL,
1067 : SPEC_STORE_BYPASS_CMD_SECCOMP,
1068 : };
1069 :
1070 : static const char * const ssb_strings[] = {
1071 : [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1072 : [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1073 : [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1074 : [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1075 : };
1076 :
1077 : static const struct {
1078 : const char *option;
1079 : enum ssb_mitigation_cmd cmd;
1080 : } ssb_mitigation_options[] __initconst = {
1081 : { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
1082 : { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
1083 : { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
1084 : { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
1085 : { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1086 : };
1087 :
1088 1 : static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1089 : {
1090 1 : enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1091 1 : char arg[20];
1092 1 : int ret, i;
1093 :
1094 2 : if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1095 1 : cpu_mitigations_off()) {
1096 0 : return SPEC_STORE_BYPASS_CMD_NONE;
1097 : } else {
1098 1 : ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1099 : arg, sizeof(arg));
1100 1 : if (ret < 0)
1101 : return SPEC_STORE_BYPASS_CMD_AUTO;
1102 :
1103 0 : for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1104 0 : if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1105 0 : continue;
1106 :
1107 0 : cmd = ssb_mitigation_options[i].cmd;
1108 0 : break;
1109 : }
1110 :
1111 0 : if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1112 0 : pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1113 0 : return SPEC_STORE_BYPASS_CMD_AUTO;
1114 : }
1115 : }
1116 :
1117 : return cmd;
1118 : }
1119 :
1120 1 : static enum ssb_mitigation __init __ssb_select_mitigation(void)
1121 : {
1122 1 : enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1123 1 : enum ssb_mitigation_cmd cmd;
1124 :
1125 1 : if (!boot_cpu_has(X86_FEATURE_SSBD))
1126 : return mode;
1127 :
1128 1 : cmd = ssb_parse_cmdline();
1129 1 : if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1130 : (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1131 : cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1132 : return mode;
1133 :
1134 1 : switch (cmd) {
1135 : case SPEC_STORE_BYPASS_CMD_AUTO:
1136 : case SPEC_STORE_BYPASS_CMD_SECCOMP:
1137 : /*
1138 : * Choose prctl+seccomp as the default mode if seccomp is
1139 : * enabled.
1140 : */
1141 : if (IS_ENABLED(CONFIG_SECCOMP))
1142 : mode = SPEC_STORE_BYPASS_SECCOMP;
1143 : else
1144 : mode = SPEC_STORE_BYPASS_PRCTL;
1145 : break;
1146 : case SPEC_STORE_BYPASS_CMD_ON:
1147 : mode = SPEC_STORE_BYPASS_DISABLE;
1148 : break;
1149 : case SPEC_STORE_BYPASS_CMD_PRCTL:
1150 : mode = SPEC_STORE_BYPASS_PRCTL;
1151 : break;
1152 : case SPEC_STORE_BYPASS_CMD_NONE:
1153 : break;
1154 : }
1155 :
1156 : /*
1157 : * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1158 : * bit in the mask to allow guests to use the mitigation even in the
1159 : * case where the host does not enable it.
1160 : */
1161 1 : if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1162 0 : static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1163 1 : x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1164 : }
1165 :
1166 : /*
1167 : * We have three CPU feature flags that are in play here:
1168 : * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1169 : * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1170 : * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1171 : */
1172 1 : if (mode == SPEC_STORE_BYPASS_DISABLE) {
1173 0 : setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1174 : /*
1175 : * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1176 : * use a completely different MSR and bit dependent on family.
1177 : */
1178 0 : if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1179 0 : !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1180 0 : x86_amd_ssb_disable();
1181 : } else {
1182 0 : x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1183 0 : wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1184 : }
1185 : }
1186 :
1187 : return mode;
1188 : }
1189 :
1190 1 : static void ssb_select_mitigation(void)
1191 : {
1192 1 : ssb_mode = __ssb_select_mitigation();
1193 :
1194 1 : if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1195 1 : pr_info("%s\n", ssb_strings[ssb_mode]);
1196 1 : }
1197 :
1198 : #undef pr_fmt
1199 : #define pr_fmt(fmt) "Speculation prctl: " fmt
1200 :
1201 0 : static void task_update_spec_tif(struct task_struct *tsk)
1202 : {
1203 : /* Force the update of the real TIF bits */
1204 0 : set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1205 :
1206 : /*
1207 : * Immediately update the speculation control MSRs for the current
1208 : * task, but for a non-current task delay setting the CPU
1209 : * mitigation until it is scheduled next.
1210 : *
1211 : * This can only happen for SECCOMP mitigation. For PRCTL it's
1212 : * always the current task.
1213 : */
1214 0 : if (tsk == current)
1215 0 : speculation_ctrl_update_current();
1216 0 : }
1217 :
1218 0 : static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1219 : {
1220 0 : if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1221 : ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1222 : return -ENXIO;
1223 :
1224 0 : switch (ctrl) {
1225 : case PR_SPEC_ENABLE:
1226 : /* If speculation is force disabled, enable is not allowed */
1227 0 : if (task_spec_ssb_force_disable(task))
1228 : return -EPERM;
1229 0 : task_clear_spec_ssb_disable(task);
1230 0 : task_clear_spec_ssb_noexec(task);
1231 0 : task_update_spec_tif(task);
1232 0 : break;
1233 : case PR_SPEC_DISABLE:
1234 0 : task_set_spec_ssb_disable(task);
1235 0 : task_clear_spec_ssb_noexec(task);
1236 0 : task_update_spec_tif(task);
1237 0 : break;
1238 : case PR_SPEC_FORCE_DISABLE:
1239 0 : task_set_spec_ssb_disable(task);
1240 0 : task_set_spec_ssb_force_disable(task);
1241 0 : task_clear_spec_ssb_noexec(task);
1242 0 : task_update_spec_tif(task);
1243 0 : break;
1244 : case PR_SPEC_DISABLE_NOEXEC:
1245 0 : if (task_spec_ssb_force_disable(task))
1246 : return -EPERM;
1247 0 : task_set_spec_ssb_disable(task);
1248 0 : task_set_spec_ssb_noexec(task);
1249 0 : task_update_spec_tif(task);
1250 0 : break;
1251 : default:
1252 : return -ERANGE;
1253 : }
1254 : return 0;
1255 : }
1256 :
1257 0 : static bool is_spec_ib_user_controlled(void)
1258 : {
1259 0 : return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1260 0 : spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1261 0 : spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1262 : spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1263 : }
1264 :
1265 0 : static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1266 : {
1267 0 : switch (ctrl) {
1268 0 : case PR_SPEC_ENABLE:
1269 0 : if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1270 0 : spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1271 : return 0;
1272 :
1273 : /*
1274 : * With strict mode for both IBPB and STIBP, the instruction
1275 : * code paths avoid checking this task flag and instead,
1276 : * unconditionally run the instruction. However, STIBP and IBPB
1277 : * are independent and either can be set to conditionally
1278 : * enabled regardless of the mode of the other.
1279 : *
1280 : * If either is set to conditional, allow the task flag to be
1281 : * updated, unless it was force-disabled by a previous prctl
1282 : * call. Currently, this is possible on an AMD CPU which has the
1283 : * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1284 : * kernel is booted with 'spectre_v2_user=seccomp', then
1285 : * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1286 : * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1287 : */
1288 0 : if (!is_spec_ib_user_controlled() ||
1289 0 : task_spec_ib_force_disable(task))
1290 0 : return -EPERM;
1291 :
1292 0 : task_clear_spec_ib_disable(task);
1293 0 : task_update_spec_tif(task);
1294 0 : break;
1295 0 : case PR_SPEC_DISABLE:
1296 : case PR_SPEC_FORCE_DISABLE:
1297 : /*
1298 : * Indirect branch speculation is always allowed when
1299 : * mitigation is force disabled.
1300 : */
1301 0 : if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1302 0 : spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1303 : return -EPERM;
1304 :
1305 0 : if (!is_spec_ib_user_controlled())
1306 : return 0;
1307 :
1308 0 : task_set_spec_ib_disable(task);
1309 0 : if (ctrl == PR_SPEC_FORCE_DISABLE)
1310 0 : task_set_spec_ib_force_disable(task);
1311 0 : task_update_spec_tif(task);
1312 0 : break;
1313 : default:
1314 : return -ERANGE;
1315 : }
1316 : return 0;
1317 : }
1318 :
1319 0 : int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1320 : unsigned long ctrl)
1321 : {
1322 0 : switch (which) {
1323 0 : case PR_SPEC_STORE_BYPASS:
1324 0 : return ssb_prctl_set(task, ctrl);
1325 0 : case PR_SPEC_INDIRECT_BRANCH:
1326 0 : return ib_prctl_set(task, ctrl);
1327 : default:
1328 : return -ENODEV;
1329 : }
1330 : }
1331 :
1332 : #ifdef CONFIG_SECCOMP
1333 : void arch_seccomp_spec_mitigate(struct task_struct *task)
1334 : {
1335 : if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1336 : ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1337 : if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1338 : spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1339 : ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1340 : }
1341 : #endif
1342 :
1343 56 : static int ssb_prctl_get(struct task_struct *task)
1344 : {
1345 56 : switch (ssb_mode) {
1346 : case SPEC_STORE_BYPASS_DISABLE:
1347 : return PR_SPEC_DISABLE;
1348 : case SPEC_STORE_BYPASS_SECCOMP:
1349 : case SPEC_STORE_BYPASS_PRCTL:
1350 56 : if (task_spec_ssb_force_disable(task))
1351 : return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1352 56 : if (task_spec_ssb_noexec(task))
1353 : return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
1354 56 : if (task_spec_ssb_disable(task))
1355 0 : return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1356 : return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1357 : default:
1358 0 : if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1359 0 : return PR_SPEC_ENABLE;
1360 : return PR_SPEC_NOT_AFFECTED;
1361 : }
1362 : }
1363 :
1364 56 : static int ib_prctl_get(struct task_struct *task)
1365 : {
1366 56 : if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1367 : return PR_SPEC_NOT_AFFECTED;
1368 :
1369 56 : if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1370 56 : spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1371 : return PR_SPEC_ENABLE;
1372 0 : else if (is_spec_ib_user_controlled()) {
1373 0 : if (task_spec_ib_force_disable(task))
1374 : return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1375 0 : if (task_spec_ib_disable(task))
1376 : return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1377 0 : return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1378 0 : } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1379 0 : spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1380 : spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1381 : return PR_SPEC_DISABLE;
1382 : else
1383 0 : return PR_SPEC_NOT_AFFECTED;
1384 : }
1385 :
1386 112 : int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1387 : {
1388 112 : switch (which) {
1389 56 : case PR_SPEC_STORE_BYPASS:
1390 56 : return ssb_prctl_get(task);
1391 56 : case PR_SPEC_INDIRECT_BRANCH:
1392 56 : return ib_prctl_get(task);
1393 : default:
1394 : return -ENODEV;
1395 : }
1396 : }
1397 :
1398 3 : void x86_spec_ctrl_setup_ap(void)
1399 : {
1400 3 : if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1401 3 : wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1402 :
1403 3 : if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1404 0 : x86_amd_ssb_disable();
1405 3 : }
1406 :
1407 : bool itlb_multihit_kvm_mitigation;
1408 : EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1409 :
1410 : #undef pr_fmt
1411 : #define pr_fmt(fmt) "L1TF: " fmt
1412 :
1413 : /* Default mitigation for L1TF-affected CPUs */
1414 : enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1415 : #if IS_ENABLED(CONFIG_KVM_INTEL)
1416 : EXPORT_SYMBOL_GPL(l1tf_mitigation);
1417 : #endif
1418 : enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1419 : EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1420 :
1421 : /*
1422 : * These CPUs all support 44bits physical address space internally in the
1423 : * cache but CPUID can report a smaller number of physical address bits.
1424 : *
1425 : * The L1TF mitigation uses the top most address bit for the inversion of
1426 : * non present PTEs. When the installed memory reaches into the top most
1427 : * address bit due to memory holes, which has been observed on machines
1428 : * which report 36bits physical address bits and have 32G RAM installed,
1429 : * then the mitigation range check in l1tf_select_mitigation() triggers.
1430 : * This is a false positive because the mitigation is still possible due to
1431 : * the fact that the cache uses 44bit internally. Use the cache bits
1432 : * instead of the reported physical bits and adjust them on the affected
1433 : * machines to 44bit if the reported bits are less than 44.
1434 : */
1435 1 : static void override_cache_bits(struct cpuinfo_x86 *c)
1436 : {
1437 1 : if (c->x86 != 6)
1438 : return;
1439 :
1440 1 : switch (c->x86_model) {
1441 1 : case INTEL_FAM6_NEHALEM:
1442 : case INTEL_FAM6_WESTMERE:
1443 : case INTEL_FAM6_SANDYBRIDGE:
1444 : case INTEL_FAM6_IVYBRIDGE:
1445 : case INTEL_FAM6_HASWELL:
1446 : case INTEL_FAM6_HASWELL_L:
1447 : case INTEL_FAM6_HASWELL_G:
1448 : case INTEL_FAM6_BROADWELL:
1449 : case INTEL_FAM6_BROADWELL_G:
1450 : case INTEL_FAM6_SKYLAKE_L:
1451 : case INTEL_FAM6_SKYLAKE:
1452 : case INTEL_FAM6_KABYLAKE_L:
1453 : case INTEL_FAM6_KABYLAKE:
1454 1 : if (c->x86_cache_bits < 44)
1455 1 : c->x86_cache_bits = 44;
1456 : break;
1457 : }
1458 1 : }
1459 :
1460 1 : static void __init l1tf_select_mitigation(void)
1461 : {
1462 1 : u64 half_pa;
1463 :
1464 1 : if (!boot_cpu_has_bug(X86_BUG_L1TF))
1465 : return;
1466 :
1467 1 : if (cpu_mitigations_off())
1468 0 : l1tf_mitigation = L1TF_MITIGATION_OFF;
1469 1 : else if (cpu_mitigations_auto_nosmt())
1470 0 : l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1471 :
1472 1 : override_cache_bits(&boot_cpu_data);
1473 :
1474 1 : switch (l1tf_mitigation) {
1475 : case L1TF_MITIGATION_OFF:
1476 : case L1TF_MITIGATION_FLUSH_NOWARN:
1477 : case L1TF_MITIGATION_FLUSH:
1478 : break;
1479 0 : case L1TF_MITIGATION_FLUSH_NOSMT:
1480 : case L1TF_MITIGATION_FULL:
1481 0 : cpu_smt_disable(false);
1482 0 : break;
1483 0 : case L1TF_MITIGATION_FULL_FORCE:
1484 0 : cpu_smt_disable(true);
1485 0 : break;
1486 : }
1487 :
1488 : #if CONFIG_PGTABLE_LEVELS == 2
1489 : pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1490 : return;
1491 : #endif
1492 :
1493 1 : half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1494 2 : if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1495 1 : e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1496 0 : pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1497 0 : pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1498 : half_pa);
1499 0 : pr_info("However, doing so will make a part of your RAM unusable.\n");
1500 0 : pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1501 0 : return;
1502 : }
1503 :
1504 1 : setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1505 : }
1506 :
1507 0 : static int __init l1tf_cmdline(char *str)
1508 : {
1509 0 : if (!boot_cpu_has_bug(X86_BUG_L1TF))
1510 : return 0;
1511 :
1512 0 : if (!str)
1513 : return -EINVAL;
1514 :
1515 0 : if (!strcmp(str, "off"))
1516 0 : l1tf_mitigation = L1TF_MITIGATION_OFF;
1517 0 : else if (!strcmp(str, "flush,nowarn"))
1518 0 : l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1519 0 : else if (!strcmp(str, "flush"))
1520 0 : l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1521 0 : else if (!strcmp(str, "flush,nosmt"))
1522 0 : l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1523 0 : else if (!strcmp(str, "full"))
1524 0 : l1tf_mitigation = L1TF_MITIGATION_FULL;
1525 0 : else if (!strcmp(str, "full,force"))
1526 0 : l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1527 :
1528 : return 0;
1529 : }
1530 : early_param("l1tf", l1tf_cmdline);
1531 :
1532 : #undef pr_fmt
1533 : #define pr_fmt(fmt) fmt
1534 :
1535 : #ifdef CONFIG_SYSFS
1536 :
1537 : #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1538 :
1539 : #if IS_ENABLED(CONFIG_KVM_INTEL)
1540 : static const char * const l1tf_vmx_states[] = {
1541 : [VMENTER_L1D_FLUSH_AUTO] = "auto",
1542 : [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1543 : [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1544 : [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1545 : [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1546 : [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1547 : };
1548 :
1549 : static ssize_t l1tf_show_state(char *buf)
1550 : {
1551 : if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1552 : return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1553 :
1554 : if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1555 : (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1556 : sched_smt_active())) {
1557 : return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1558 : l1tf_vmx_states[l1tf_vmx_mitigation]);
1559 : }
1560 :
1561 : return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1562 : l1tf_vmx_states[l1tf_vmx_mitigation],
1563 : sched_smt_active() ? "vulnerable" : "disabled");
1564 : }
1565 :
1566 : static ssize_t itlb_multihit_show_state(char *buf)
1567 : {
1568 : if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
1569 : !boot_cpu_has(X86_FEATURE_VMX))
1570 : return sprintf(buf, "KVM: Mitigation: VMX unsupported\n");
1571 : else if (!(cr4_read_shadow() & X86_CR4_VMXE))
1572 : return sprintf(buf, "KVM: Mitigation: VMX disabled\n");
1573 : else if (itlb_multihit_kvm_mitigation)
1574 : return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
1575 : else
1576 : return sprintf(buf, "KVM: Vulnerable\n");
1577 : }
1578 : #else
1579 0 : static ssize_t l1tf_show_state(char *buf)
1580 : {
1581 0 : return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1582 : }
1583 :
1584 0 : static ssize_t itlb_multihit_show_state(char *buf)
1585 : {
1586 0 : return sprintf(buf, "Processor vulnerable\n");
1587 : }
1588 : #endif
1589 :
1590 0 : static ssize_t mds_show_state(char *buf)
1591 : {
1592 0 : if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1593 0 : return sprintf(buf, "%s; SMT Host state unknown\n",
1594 : mds_strings[mds_mitigation]);
1595 : }
1596 :
1597 0 : if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1598 0 : return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1599 0 : (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1600 0 : sched_smt_active() ? "mitigated" : "disabled"));
1601 : }
1602 :
1603 0 : return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1604 : sched_smt_active() ? "vulnerable" : "disabled");
1605 : }
1606 :
1607 0 : static ssize_t tsx_async_abort_show_state(char *buf)
1608 : {
1609 0 : if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
1610 : (taa_mitigation == TAA_MITIGATION_OFF))
1611 0 : return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
1612 :
1613 0 : if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1614 0 : return sprintf(buf, "%s; SMT Host state unknown\n",
1615 : taa_strings[taa_mitigation]);
1616 : }
1617 :
1618 0 : return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
1619 : sched_smt_active() ? "vulnerable" : "disabled");
1620 : }
1621 :
1622 0 : static char *stibp_state(void)
1623 : {
1624 0 : if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1625 : return "";
1626 :
1627 0 : switch (spectre_v2_user_stibp) {
1628 : case SPECTRE_V2_USER_NONE:
1629 : return ", STIBP: disabled";
1630 0 : case SPECTRE_V2_USER_STRICT:
1631 0 : return ", STIBP: forced";
1632 0 : case SPECTRE_V2_USER_STRICT_PREFERRED:
1633 0 : return ", STIBP: always-on";
1634 : case SPECTRE_V2_USER_PRCTL:
1635 : case SPECTRE_V2_USER_SECCOMP:
1636 0 : if (static_key_enabled(&switch_to_cond_stibp))
1637 0 : return ", STIBP: conditional";
1638 : }
1639 : return "";
1640 : }
1641 :
1642 0 : static char *ibpb_state(void)
1643 : {
1644 0 : if (boot_cpu_has(X86_FEATURE_IBPB)) {
1645 0 : if (static_key_enabled(&switch_mm_always_ibpb))
1646 : return ", IBPB: always-on";
1647 0 : if (static_key_enabled(&switch_mm_cond_ibpb))
1648 : return ", IBPB: conditional";
1649 0 : return ", IBPB: disabled";
1650 : }
1651 : return "";
1652 : }
1653 :
1654 0 : static ssize_t srbds_show_state(char *buf)
1655 : {
1656 0 : return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
1657 : }
1658 :
1659 0 : static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1660 : char *buf, unsigned int bug)
1661 : {
1662 0 : if (!boot_cpu_has_bug(bug))
1663 0 : return sprintf(buf, "Not affected\n");
1664 :
1665 0 : switch (bug) {
1666 : case X86_BUG_CPU_MELTDOWN:
1667 0 : if (boot_cpu_has(X86_FEATURE_PTI))
1668 0 : return sprintf(buf, "Mitigation: PTI\n");
1669 :
1670 0 : if (hypervisor_is_type(X86_HYPER_XEN_PV))
1671 0 : return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1672 :
1673 : break;
1674 :
1675 0 : case X86_BUG_SPECTRE_V1:
1676 0 : return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1677 :
1678 : case X86_BUG_SPECTRE_V2:
1679 0 : return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
1680 : ibpb_state(),
1681 0 : boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1682 : stibp_state(),
1683 0 : boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1684 : spectre_v2_module_string());
1685 :
1686 0 : case X86_BUG_SPEC_STORE_BYPASS:
1687 0 : return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1688 :
1689 : case X86_BUG_L1TF:
1690 0 : if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1691 0 : return l1tf_show_state(buf);
1692 : break;
1693 :
1694 0 : case X86_BUG_MDS:
1695 0 : return mds_show_state(buf);
1696 :
1697 0 : case X86_BUG_TAA:
1698 0 : return tsx_async_abort_show_state(buf);
1699 :
1700 : case X86_BUG_ITLB_MULTIHIT:
1701 0 : return itlb_multihit_show_state(buf);
1702 :
1703 0 : case X86_BUG_SRBDS:
1704 0 : return srbds_show_state(buf);
1705 :
1706 : default:
1707 : break;
1708 : }
1709 :
1710 0 : return sprintf(buf, "Vulnerable\n");
1711 : }
1712 :
1713 0 : ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1714 : {
1715 0 : return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1716 : }
1717 :
1718 0 : ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1719 : {
1720 0 : return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1721 : }
1722 :
1723 0 : ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1724 : {
1725 0 : return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1726 : }
1727 :
1728 0 : ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1729 : {
1730 0 : return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1731 : }
1732 :
1733 0 : ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1734 : {
1735 0 : return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1736 : }
1737 :
1738 0 : ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1739 : {
1740 0 : return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1741 : }
1742 :
1743 0 : ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
1744 : {
1745 0 : return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
1746 : }
1747 :
1748 0 : ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
1749 : {
1750 0 : return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
1751 : }
1752 :
1753 0 : ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
1754 : {
1755 0 : return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
1756 : }
1757 : #endif
|