Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : #include <linux/export.h>
3 : #include <linux/bitops.h>
4 : #include <linux/elf.h>
5 : #include <linux/mm.h>
6 :
7 : #include <linux/io.h>
8 : #include <linux/sched.h>
9 : #include <linux/sched/clock.h>
10 : #include <linux/random.h>
11 : #include <linux/topology.h>
12 : #include <asm/processor.h>
13 : #include <asm/apic.h>
14 : #include <asm/cacheinfo.h>
15 : #include <asm/cpu.h>
16 : #include <asm/spec-ctrl.h>
17 : #include <asm/smp.h>
18 : #include <asm/numa.h>
19 : #include <asm/pci-direct.h>
20 : #include <asm/delay.h>
21 : #include <asm/debugreg.h>
22 : #include <asm/resctrl.h>
23 :
24 : #ifdef CONFIG_X86_64
25 : # include <asm/mmconfig.h>
26 : #endif
27 :
28 : #include "cpu.h"
29 :
30 : static const int amd_erratum_383[];
31 : static const int amd_erratum_400[];
32 : static const int amd_erratum_1054[];
33 : static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
34 :
35 : /*
36 : * nodes_per_socket: Stores the number of nodes per socket.
37 : * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
38 : * Node Identifiers[10:8]
39 : */
40 : static u32 nodes_per_socket = 1;
41 :
42 0 : static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
43 : {
44 0 : u32 gprs[8] = { 0 };
45 0 : int err;
46 :
47 0 : WARN_ONCE((boot_cpu_data.x86 != 0xf),
48 : "%s should only be used on K8!\n", __func__);
49 :
50 0 : gprs[1] = msr;
51 0 : gprs[7] = 0x9c5a203a;
52 :
53 0 : err = rdmsr_safe_regs(gprs);
54 :
55 0 : *p = gprs[0] | ((u64)gprs[2] << 32);
56 :
57 0 : return err;
58 : }
59 :
60 0 : static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
61 : {
62 0 : u32 gprs[8] = { 0 };
63 :
64 0 : WARN_ONCE((boot_cpu_data.x86 != 0xf),
65 : "%s should only be used on K8!\n", __func__);
66 :
67 0 : gprs[0] = (u32)val;
68 0 : gprs[1] = msr;
69 0 : gprs[2] = val >> 32;
70 0 : gprs[7] = 0x9c5a203a;
71 :
72 0 : return wrmsr_safe_regs(gprs);
73 : }
74 :
75 : /*
76 : * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
77 : * misexecution of code under Linux. Owners of such processors should
78 : * contact AMD for precise details and a CPU swap.
79 : *
80 : * See http://www.multimania.com/poulot/k6bug.html
81 : * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
82 : * (Publication # 21266 Issue Date: August 1998)
83 : *
84 : * The following test is erm.. interesting. AMD neglected to up
85 : * the chip setting when fixing the bug but they also tweaked some
86 : * performance at the same time..
87 : */
88 :
89 : #ifdef CONFIG_X86_32
90 : extern __visible void vide(void);
91 : __asm__(".text\n"
92 : ".globl vide\n"
93 : ".type vide, @function\n"
94 : ".align 4\n"
95 : "vide: ret\n");
96 : #endif
97 :
98 : static void init_amd_k5(struct cpuinfo_x86 *c)
99 : {
100 : #ifdef CONFIG_X86_32
101 : /*
102 : * General Systems BIOSen alias the cpu frequency registers
103 : * of the Elan at 0x000df000. Unfortunately, one of the Linux
104 : * drivers subsequently pokes it, and changes the CPU speed.
105 : * Workaround : Remove the unneeded alias.
106 : */
107 : #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */
108 : #define CBAR_ENB (0x80000000)
109 : #define CBAR_KEY (0X000000CB)
110 : if (c->x86_model == 9 || c->x86_model == 10) {
111 : if (inl(CBAR) & CBAR_ENB)
112 : outl(0 | CBAR_KEY, CBAR);
113 : }
114 : #endif
115 : }
116 :
117 : static void init_amd_k6(struct cpuinfo_x86 *c)
118 : {
119 : #ifdef CONFIG_X86_32
120 : u32 l, h;
121 : int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
122 :
123 : if (c->x86_model < 6) {
124 : /* Based on AMD doc 20734R - June 2000 */
125 : if (c->x86_model == 0) {
126 : clear_cpu_cap(c, X86_FEATURE_APIC);
127 : set_cpu_cap(c, X86_FEATURE_PGE);
128 : }
129 : return;
130 : }
131 :
132 : if (c->x86_model == 6 && c->x86_stepping == 1) {
133 : const int K6_BUG_LOOP = 1000000;
134 : int n;
135 : void (*f_vide)(void);
136 : u64 d, d2;
137 :
138 : pr_info("AMD K6 stepping B detected - ");
139 :
140 : /*
141 : * It looks like AMD fixed the 2.6.2 bug and improved indirect
142 : * calls at the same time.
143 : */
144 :
145 : n = K6_BUG_LOOP;
146 : f_vide = vide;
147 : OPTIMIZER_HIDE_VAR(f_vide);
148 : d = rdtsc();
149 : while (n--)
150 : f_vide();
151 : d2 = rdtsc();
152 : d = d2-d;
153 :
154 : if (d > 20*K6_BUG_LOOP)
155 : pr_cont("system stability may be impaired when more than 32 MB are used.\n");
156 : else
157 : pr_cont("probably OK (after B9730xxxx).\n");
158 : }
159 :
160 : /* K6 with old style WHCR */
161 : if (c->x86_model < 8 ||
162 : (c->x86_model == 8 && c->x86_stepping < 8)) {
163 : /* We can only write allocate on the low 508Mb */
164 : if (mbytes > 508)
165 : mbytes = 508;
166 :
167 : rdmsr(MSR_K6_WHCR, l, h);
168 : if ((l&0x0000FFFF) == 0) {
169 : unsigned long flags;
170 : l = (1<<0)|((mbytes/4)<<1);
171 : local_irq_save(flags);
172 : wbinvd();
173 : wrmsr(MSR_K6_WHCR, l, h);
174 : local_irq_restore(flags);
175 : pr_info("Enabling old style K6 write allocation for %d Mb\n",
176 : mbytes);
177 : }
178 : return;
179 : }
180 :
181 : if ((c->x86_model == 8 && c->x86_stepping > 7) ||
182 : c->x86_model == 9 || c->x86_model == 13) {
183 : /* The more serious chips .. */
184 :
185 : if (mbytes > 4092)
186 : mbytes = 4092;
187 :
188 : rdmsr(MSR_K6_WHCR, l, h);
189 : if ((l&0xFFFF0000) == 0) {
190 : unsigned long flags;
191 : l = ((mbytes>>2)<<22)|(1<<16);
192 : local_irq_save(flags);
193 : wbinvd();
194 : wrmsr(MSR_K6_WHCR, l, h);
195 : local_irq_restore(flags);
196 : pr_info("Enabling new style K6 write allocation for %d Mb\n",
197 : mbytes);
198 : }
199 :
200 : return;
201 : }
202 :
203 : if (c->x86_model == 10) {
204 : /* AMD Geode LX is model 10 */
205 : /* placeholder for any needed mods */
206 : return;
207 : }
208 : #endif
209 : }
210 :
211 : static void init_amd_k7(struct cpuinfo_x86 *c)
212 : {
213 : #ifdef CONFIG_X86_32
214 : u32 l, h;
215 :
216 : /*
217 : * Bit 15 of Athlon specific MSR 15, needs to be 0
218 : * to enable SSE on Palomino/Morgan/Barton CPU's.
219 : * If the BIOS didn't enable it already, enable it here.
220 : */
221 : if (c->x86_model >= 6 && c->x86_model <= 10) {
222 : if (!cpu_has(c, X86_FEATURE_XMM)) {
223 : pr_info("Enabling disabled K7/SSE Support.\n");
224 : msr_clear_bit(MSR_K7_HWCR, 15);
225 : set_cpu_cap(c, X86_FEATURE_XMM);
226 : }
227 : }
228 :
229 : /*
230 : * It's been determined by AMD that Athlons since model 8 stepping 1
231 : * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
232 : * As per AMD technical note 27212 0.2
233 : */
234 : if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
235 : rdmsr(MSR_K7_CLK_CTL, l, h);
236 : if ((l & 0xfff00000) != 0x20000000) {
237 : pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
238 : l, ((l & 0x000fffff)|0x20000000));
239 : wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
240 : }
241 : }
242 :
243 : /* calling is from identify_secondary_cpu() ? */
244 : if (!c->cpu_index)
245 : return;
246 :
247 : /*
248 : * Certain Athlons might work (for various values of 'work') in SMP
249 : * but they are not certified as MP capable.
250 : */
251 : /* Athlon 660/661 is valid. */
252 : if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
253 : (c->x86_stepping == 1)))
254 : return;
255 :
256 : /* Duron 670 is valid */
257 : if ((c->x86_model == 7) && (c->x86_stepping == 0))
258 : return;
259 :
260 : /*
261 : * Athlon 662, Duron 671, and Athlon >model 7 have capability
262 : * bit. It's worth noting that the A5 stepping (662) of some
263 : * Athlon XP's have the MP bit set.
264 : * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
265 : * more.
266 : */
267 : if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
268 : ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
269 : (c->x86_model > 7))
270 : if (cpu_has(c, X86_FEATURE_MP))
271 : return;
272 :
273 : /* If we get here, not a certified SMP capable AMD system. */
274 :
275 : /*
276 : * Don't taint if we are running SMP kernel on a single non-MP
277 : * approved Athlon
278 : */
279 : WARN_ONCE(1, "WARNING: This combination of AMD"
280 : " processors is not suitable for SMP.\n");
281 : add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
282 : #endif
283 : }
284 :
285 : #ifdef CONFIG_NUMA
286 : /*
287 : * To workaround broken NUMA config. Read the comment in
288 : * srat_detect_node().
289 : */
290 0 : static int nearby_node(int apicid)
291 : {
292 0 : int i, node;
293 :
294 0 : for (i = apicid - 1; i >= 0; i--) {
295 0 : node = __apicid_to_node[i];
296 0 : if (node != NUMA_NO_NODE && node_online(node))
297 0 : return node;
298 : }
299 0 : for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
300 0 : node = __apicid_to_node[i];
301 0 : if (node != NUMA_NO_NODE && node_online(node))
302 0 : return node;
303 : }
304 0 : return first_node(node_online_map); /* Shouldn't happen */
305 : }
306 : #endif
307 :
308 : /*
309 : * Fix up cpu_core_id for pre-F17h systems to be in the
310 : * [0 .. cores_per_node - 1] range. Not really needed but
311 : * kept so as not to break existing setups.
312 : */
313 0 : static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
314 : {
315 0 : u32 cus_per_node;
316 :
317 0 : if (c->x86 >= 0x17)
318 : return;
319 :
320 0 : cus_per_node = c->x86_max_cores / nodes_per_socket;
321 0 : c->cpu_core_id %= cus_per_node;
322 : }
323 :
324 : /*
325 : * Fixup core topology information for
326 : * (1) AMD multi-node processors
327 : * Assumption: Number of cores in each internal node is the same.
328 : * (2) AMD processors supporting compute units
329 : */
330 0 : static void amd_get_topology(struct cpuinfo_x86 *c)
331 : {
332 0 : int cpu = smp_processor_id();
333 :
334 : /* get information required for multi-node processors */
335 0 : if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
336 0 : int err;
337 0 : u32 eax, ebx, ecx, edx;
338 :
339 0 : cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
340 :
341 0 : c->cpu_die_id = ecx & 0xff;
342 :
343 0 : if (c->x86 == 0x15)
344 0 : c->cu_id = ebx & 0xff;
345 :
346 0 : if (c->x86 >= 0x17) {
347 0 : c->cpu_core_id = ebx & 0xff;
348 :
349 0 : if (smp_num_siblings > 1)
350 0 : c->x86_max_cores /= smp_num_siblings;
351 : }
352 :
353 : /*
354 : * In case leaf B is available, use it to derive
355 : * topology information.
356 : */
357 0 : err = detect_extended_topology(c);
358 0 : if (!err)
359 0 : c->x86_coreid_bits = get_count_order(c->x86_max_cores);
360 :
361 0 : cacheinfo_amd_init_llc_id(c, cpu);
362 :
363 0 : } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
364 0 : u64 value;
365 :
366 0 : rdmsrl(MSR_FAM10H_NODE_ID, value);
367 0 : c->cpu_die_id = value & 7;
368 :
369 0 : per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
370 : } else
371 : return;
372 :
373 0 : if (nodes_per_socket > 1) {
374 0 : set_cpu_cap(c, X86_FEATURE_AMD_DCM);
375 0 : legacy_fixup_core_id(c);
376 : }
377 : }
378 :
379 : /*
380 : * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
381 : * Assumes number of cores is a power of two.
382 : */
383 0 : static void amd_detect_cmp(struct cpuinfo_x86 *c)
384 : {
385 0 : unsigned bits;
386 0 : int cpu = smp_processor_id();
387 :
388 0 : bits = c->x86_coreid_bits;
389 : /* Low order bits define the core id (index of core in socket) */
390 0 : c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
391 : /* Convert the initial APIC ID into the socket ID */
392 0 : c->phys_proc_id = c->initial_apicid >> bits;
393 : /* use socket ID also for last level cache */
394 0 : per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
395 0 : }
396 :
397 0 : static void amd_detect_ppin(struct cpuinfo_x86 *c)
398 : {
399 0 : unsigned long long val;
400 :
401 0 : if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
402 0 : return;
403 :
404 : /* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
405 0 : if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
406 0 : goto clear_ppin;
407 :
408 : /* PPIN is locked in disabled mode, clear feature bit */
409 0 : if ((val & 3UL) == 1UL)
410 0 : goto clear_ppin;
411 :
412 : /* If PPIN is disabled, try to enable it */
413 0 : if (!(val & 2UL)) {
414 0 : wrmsrl_safe(MSR_AMD_PPIN_CTL, val | 2UL);
415 0 : rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
416 : }
417 :
418 : /* If PPIN_EN bit is 1, return from here; otherwise fall through */
419 0 : if (val & 2UL)
420 : return;
421 :
422 0 : clear_ppin:
423 0 : clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
424 : }
425 :
426 0 : u32 amd_get_nodes_per_socket(void)
427 : {
428 0 : return nodes_per_socket;
429 : }
430 : EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
431 :
432 0 : static void srat_detect_node(struct cpuinfo_x86 *c)
433 : {
434 : #ifdef CONFIG_NUMA
435 0 : int cpu = smp_processor_id();
436 0 : int node;
437 0 : unsigned apicid = c->apicid;
438 :
439 0 : node = numa_cpu_node(cpu);
440 0 : if (node == NUMA_NO_NODE)
441 0 : node = per_cpu(cpu_llc_id, cpu);
442 :
443 : /*
444 : * On multi-fabric platform (e.g. Numascale NumaChip) a
445 : * platform-specific handler needs to be called to fixup some
446 : * IDs of the CPU.
447 : */
448 0 : if (x86_cpuinit.fixup_cpu_id)
449 0 : x86_cpuinit.fixup_cpu_id(c, node);
450 :
451 0 : if (!node_online(node)) {
452 : /*
453 : * Two possibilities here:
454 : *
455 : * - The CPU is missing memory and no node was created. In
456 : * that case try picking one from a nearby CPU.
457 : *
458 : * - The APIC IDs differ from the HyperTransport node IDs
459 : * which the K8 northbridge parsing fills in. Assume
460 : * they are all increased by a constant offset, but in
461 : * the same order as the HT nodeids. If that doesn't
462 : * result in a usable node fall back to the path for the
463 : * previous case.
464 : *
465 : * This workaround operates directly on the mapping between
466 : * APIC ID and NUMA node, assuming certain relationship
467 : * between APIC ID, HT node ID and NUMA topology. As going
468 : * through CPU mapping may alter the outcome, directly
469 : * access __apicid_to_node[].
470 : */
471 0 : int ht_nodeid = c->initial_apicid;
472 :
473 0 : if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
474 0 : node = __apicid_to_node[ht_nodeid];
475 : /* Pick a nearby node */
476 0 : if (!node_online(node))
477 0 : node = nearby_node(apicid);
478 : }
479 0 : numa_set_node(cpu, node);
480 : #endif
481 0 : }
482 :
483 0 : static void early_init_amd_mc(struct cpuinfo_x86 *c)
484 : {
485 : #ifdef CONFIG_SMP
486 0 : unsigned bits, ecx;
487 :
488 : /* Multi core CPU? */
489 0 : if (c->extended_cpuid_level < 0x80000008)
490 : return;
491 :
492 0 : ecx = cpuid_ecx(0x80000008);
493 :
494 0 : c->x86_max_cores = (ecx & 0xff) + 1;
495 :
496 : /* CPU telling us the core id bits shift? */
497 0 : bits = (ecx >> 12) & 0xF;
498 :
499 : /* Otherwise recompute */
500 0 : if (bits == 0) {
501 0 : while ((1 << bits) < c->x86_max_cores)
502 0 : bits++;
503 : }
504 :
505 0 : c->x86_coreid_bits = bits;
506 : #endif
507 : }
508 :
509 0 : static void bsp_init_amd(struct cpuinfo_x86 *c)
510 : {
511 0 : if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
512 :
513 0 : if (c->x86 > 0x10 ||
514 0 : (c->x86 == 0x10 && c->x86_model >= 0x2)) {
515 0 : u64 val;
516 :
517 0 : rdmsrl(MSR_K7_HWCR, val);
518 0 : if (!(val & BIT(24)))
519 0 : pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
520 : }
521 : }
522 :
523 0 : if (c->x86 == 0x15) {
524 0 : unsigned long upperbit;
525 0 : u32 cpuid, assoc;
526 :
527 0 : cpuid = cpuid_edx(0x80000005);
528 0 : assoc = cpuid >> 16 & 0xff;
529 0 : upperbit = ((cpuid >> 24) << 10) / assoc;
530 :
531 0 : va_align.mask = (upperbit - 1) & PAGE_MASK;
532 0 : va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
533 :
534 : /* A random value per boot for bit slice [12:upper_bit) */
535 0 : va_align.bits = get_random_int() & va_align.mask;
536 : }
537 :
538 0 : if (cpu_has(c, X86_FEATURE_MWAITX))
539 0 : use_mwaitx_delay();
540 :
541 0 : if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
542 0 : u32 ecx;
543 :
544 0 : ecx = cpuid_ecx(0x8000001e);
545 0 : __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
546 0 : } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
547 0 : u64 value;
548 :
549 0 : rdmsrl(MSR_FAM10H_NODE_ID, value);
550 0 : __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
551 : }
552 :
553 0 : if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
554 0 : !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
555 0 : c->x86 >= 0x15 && c->x86 <= 0x17) {
556 0 : unsigned int bit;
557 :
558 0 : switch (c->x86) {
559 : case 0x15: bit = 54; break;
560 0 : case 0x16: bit = 33; break;
561 0 : case 0x17: bit = 10; break;
562 : default: return;
563 : }
564 : /*
565 : * Try to cache the base value so further operations can
566 : * avoid RMW. If that faults, do not enable SSBD.
567 : */
568 0 : if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
569 0 : setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
570 0 : setup_force_cpu_cap(X86_FEATURE_SSBD);
571 0 : x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
572 : }
573 : }
574 :
575 0 : resctrl_cpu_detect(c);
576 : }
577 :
578 0 : static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
579 : {
580 0 : u64 msr;
581 :
582 : /*
583 : * BIOS support is required for SME and SEV.
584 : * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
585 : * the SME physical address space reduction value.
586 : * If BIOS has not enabled SME then don't advertise the
587 : * SME feature (set in scattered.c).
588 : * For SEV: If BIOS has not enabled SEV then don't advertise the
589 : * SEV and SEV_ES feature (set in scattered.c).
590 : *
591 : * In all cases, since support for SME and SEV requires long mode,
592 : * don't advertise the feature under CONFIG_X86_32.
593 : */
594 0 : if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
595 : /* Check if memory encryption is enabled */
596 0 : rdmsrl(MSR_K8_SYSCFG, msr);
597 0 : if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
598 0 : goto clear_all;
599 :
600 : /*
601 : * Always adjust physical address bits. Even though this
602 : * will be a value above 32-bits this is still done for
603 : * CONFIG_X86_32 so that accurate values are reported.
604 : */
605 0 : c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
606 :
607 0 : if (IS_ENABLED(CONFIG_X86_32))
608 : goto clear_all;
609 :
610 0 : rdmsrl(MSR_K7_HWCR, msr);
611 0 : if (!(msr & MSR_K7_HWCR_SMMLOCK))
612 0 : goto clear_sev;
613 :
614 : return;
615 :
616 0 : clear_all:
617 0 : setup_clear_cpu_cap(X86_FEATURE_SME);
618 0 : clear_sev:
619 0 : setup_clear_cpu_cap(X86_FEATURE_SEV);
620 0 : setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
621 : }
622 : }
623 :
624 0 : static void early_init_amd(struct cpuinfo_x86 *c)
625 : {
626 0 : u64 value;
627 0 : u32 dummy;
628 :
629 0 : early_init_amd_mc(c);
630 :
631 : #ifdef CONFIG_X86_32
632 : if (c->x86 == 6)
633 : set_cpu_cap(c, X86_FEATURE_K7);
634 : #endif
635 :
636 0 : if (c->x86 >= 0xf)
637 0 : set_cpu_cap(c, X86_FEATURE_K8);
638 :
639 0 : rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
640 :
641 : /*
642 : * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
643 : * with P/T states and does not stop in deep C-states
644 : */
645 0 : if (c->x86_power & (1 << 8)) {
646 0 : set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
647 0 : set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
648 : }
649 :
650 : /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
651 0 : if (c->x86_power & BIT(12))
652 0 : set_cpu_cap(c, X86_FEATURE_ACC_POWER);
653 :
654 : #ifdef CONFIG_X86_64
655 0 : set_cpu_cap(c, X86_FEATURE_SYSCALL32);
656 : #else
657 : /* Set MTRR capability flag if appropriate */
658 : if (c->x86 == 5)
659 : if (c->x86_model == 13 || c->x86_model == 9 ||
660 : (c->x86_model == 8 && c->x86_stepping >= 8))
661 : set_cpu_cap(c, X86_FEATURE_K6_MTRR);
662 : #endif
663 : #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
664 : /*
665 : * ApicID can always be treated as an 8-bit value for AMD APIC versions
666 : * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
667 : * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
668 : * after 16h.
669 : */
670 : if (boot_cpu_has(X86_FEATURE_APIC)) {
671 : if (c->x86 > 0x16)
672 : set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
673 : else if (c->x86 >= 0xf) {
674 : /* check CPU config space for extended APIC ID */
675 : unsigned int val;
676 :
677 : val = read_pci_config(0, 24, 0, 0x68);
678 : if ((val >> 17 & 0x3) == 0x3)
679 : set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
680 : }
681 : }
682 : #endif
683 :
684 : /*
685 : * This is only needed to tell the kernel whether to use VMCALL
686 : * and VMMCALL. VMMCALL is never executed except under virt, so
687 : * we can set it unconditionally.
688 : */
689 0 : set_cpu_cap(c, X86_FEATURE_VMMCALL);
690 :
691 : /* F16h erratum 793, CVE-2013-6885 */
692 0 : if (c->x86 == 0x16 && c->x86_model <= 0xf)
693 0 : msr_set_bit(MSR_AMD64_LS_CFG, 15);
694 :
695 : /*
696 : * Check whether the machine is affected by erratum 400. This is
697 : * used to select the proper idle routine and to enable the check
698 : * whether the machine is affected in arch_post_acpi_init(), which
699 : * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
700 : */
701 0 : if (cpu_has_amd_erratum(c, amd_erratum_400))
702 0 : set_cpu_bug(c, X86_BUG_AMD_E400);
703 :
704 0 : early_detect_mem_encrypt(c);
705 :
706 : /* Re-enable TopologyExtensions if switched off by BIOS */
707 0 : if (c->x86 == 0x15 &&
708 0 : (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
709 0 : !cpu_has(c, X86_FEATURE_TOPOEXT)) {
710 :
711 0 : if (msr_set_bit(0xc0011005, 54) > 0) {
712 0 : rdmsrl(0xc0011005, value);
713 0 : if (value & BIT_64(54)) {
714 0 : set_cpu_cap(c, X86_FEATURE_TOPOEXT);
715 0 : pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
716 : }
717 : }
718 : }
719 :
720 0 : if (cpu_has(c, X86_FEATURE_TOPOEXT))
721 0 : smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
722 0 : }
723 :
724 0 : static void init_amd_k8(struct cpuinfo_x86 *c)
725 : {
726 0 : u32 level;
727 0 : u64 value;
728 :
729 : /* On C+ stepping K8 rep microcode works well for copy/memset */
730 0 : level = cpuid_eax(1);
731 0 : if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
732 0 : set_cpu_cap(c, X86_FEATURE_REP_GOOD);
733 :
734 : /*
735 : * Some BIOSes incorrectly force this feature, but only K8 revision D
736 : * (model = 0x14) and later actually support it.
737 : * (AMD Erratum #110, docId: 25759).
738 : */
739 0 : if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
740 0 : clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
741 0 : if (!rdmsrl_amd_safe(0xc001100d, &value)) {
742 0 : value &= ~BIT_64(32);
743 0 : wrmsrl_amd_safe(0xc001100d, value);
744 : }
745 : }
746 :
747 0 : if (!c->x86_model_id[0])
748 0 : strcpy(c->x86_model_id, "Hammer");
749 :
750 : #ifdef CONFIG_SMP
751 : /*
752 : * Disable TLB flush filter by setting HWCR.FFDIS on K8
753 : * bit 6 of msr C001_0015
754 : *
755 : * Errata 63 for SH-B3 steppings
756 : * Errata 122 for all steppings (F+ have it disabled by default)
757 : */
758 0 : msr_set_bit(MSR_K7_HWCR, 6);
759 : #endif
760 0 : set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
761 0 : }
762 :
763 0 : static void init_amd_gh(struct cpuinfo_x86 *c)
764 : {
765 : #ifdef CONFIG_MMCONF_FAM10H
766 : /* do this for boot cpu */
767 : if (c == &boot_cpu_data)
768 : check_enable_amd_mmconf_dmi();
769 :
770 : fam10h_check_enable_mmcfg();
771 : #endif
772 :
773 : /*
774 : * Disable GART TLB Walk Errors on Fam10h. We do this here because this
775 : * is always needed when GART is enabled, even in a kernel which has no
776 : * MCE support built in. BIOS should disable GartTlbWlk Errors already.
777 : * If it doesn't, we do it here as suggested by the BKDG.
778 : *
779 : * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
780 : */
781 0 : msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
782 :
783 : /*
784 : * On family 10h BIOS may not have properly enabled WC+ support, causing
785 : * it to be converted to CD memtype. This may result in performance
786 : * degradation for certain nested-paging guests. Prevent this conversion
787 : * by clearing bit 24 in MSR_AMD64_BU_CFG2.
788 : *
789 : * NOTE: we want to use the _safe accessors so as not to #GP kvm
790 : * guests on older kvm hosts.
791 : */
792 0 : msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
793 :
794 0 : if (cpu_has_amd_erratum(c, amd_erratum_383))
795 0 : set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
796 0 : }
797 :
798 : #define MSR_AMD64_DE_CFG 0xC0011029
799 :
800 0 : static void init_amd_ln(struct cpuinfo_x86 *c)
801 : {
802 : /*
803 : * Apply erratum 665 fix unconditionally so machines without a BIOS
804 : * fix work.
805 : */
806 0 : msr_set_bit(MSR_AMD64_DE_CFG, 31);
807 0 : }
808 :
809 : static bool rdrand_force;
810 :
811 0 : static int __init rdrand_cmdline(char *str)
812 : {
813 0 : if (!str)
814 : return -EINVAL;
815 :
816 0 : if (!strcmp(str, "force"))
817 0 : rdrand_force = true;
818 : else
819 : return -EINVAL;
820 :
821 0 : return 0;
822 : }
823 : early_param("rdrand", rdrand_cmdline);
824 :
825 0 : static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
826 : {
827 : /*
828 : * Saving of the MSR used to hide the RDRAND support during
829 : * suspend/resume is done by arch/x86/power/cpu.c, which is
830 : * dependent on CONFIG_PM_SLEEP.
831 : */
832 0 : if (!IS_ENABLED(CONFIG_PM_SLEEP))
833 0 : return;
834 :
835 : /*
836 : * The nordrand option can clear X86_FEATURE_RDRAND, so check for
837 : * RDRAND support using the CPUID function directly.
838 : */
839 : if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
840 : return;
841 :
842 : msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
843 :
844 : /*
845 : * Verify that the CPUID change has occurred in case the kernel is
846 : * running virtualized and the hypervisor doesn't support the MSR.
847 : */
848 : if (cpuid_ecx(1) & BIT(30)) {
849 : pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
850 : return;
851 : }
852 :
853 : clear_cpu_cap(c, X86_FEATURE_RDRAND);
854 : pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
855 : }
856 :
857 : static void init_amd_jg(struct cpuinfo_x86 *c)
858 : {
859 : /*
860 : * Some BIOS implementations do not restore proper RDRAND support
861 : * across suspend and resume. Check on whether to hide the RDRAND
862 : * instruction support via CPUID.
863 : */
864 0 : clear_rdrand_cpuid_bit(c);
865 : }
866 :
867 0 : static void init_amd_bd(struct cpuinfo_x86 *c)
868 : {
869 0 : u64 value;
870 :
871 : /*
872 : * The way access filter has a performance penalty on some workloads.
873 : * Disable it on the affected CPUs.
874 : */
875 0 : if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
876 0 : if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
877 0 : value |= 0x1E;
878 0 : wrmsrl_safe(MSR_F15H_IC_CFG, value);
879 : }
880 : }
881 :
882 : /*
883 : * Some BIOS implementations do not restore proper RDRAND support
884 : * across suspend and resume. Check on whether to hide the RDRAND
885 : * instruction support via CPUID.
886 : */
887 0 : clear_rdrand_cpuid_bit(c);
888 0 : }
889 :
890 0 : static void init_amd_zn(struct cpuinfo_x86 *c)
891 : {
892 0 : set_cpu_cap(c, X86_FEATURE_ZEN);
893 :
894 : #ifdef CONFIG_NUMA
895 0 : node_reclaim_distance = 32;
896 : #endif
897 :
898 : /*
899 : * Fix erratum 1076: CPB feature bit not being set in CPUID.
900 : * Always set it, except when running under a hypervisor.
901 : */
902 0 : if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
903 0 : set_cpu_cap(c, X86_FEATURE_CPB);
904 0 : }
905 :
906 0 : static void init_amd(struct cpuinfo_x86 *c)
907 : {
908 0 : early_init_amd(c);
909 :
910 : /*
911 : * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
912 : * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
913 : */
914 0 : clear_cpu_cap(c, 0*32+31);
915 :
916 0 : if (c->x86 >= 0x10)
917 0 : set_cpu_cap(c, X86_FEATURE_REP_GOOD);
918 :
919 : /* get apicid instead of initial apic id from cpuid */
920 0 : c->apicid = hard_smp_processor_id();
921 :
922 : /* K6s reports MCEs but don't actually have all the MSRs */
923 0 : if (c->x86 < 6)
924 0 : clear_cpu_cap(c, X86_FEATURE_MCE);
925 :
926 0 : switch (c->x86) {
927 0 : case 4: init_amd_k5(c); break;
928 0 : case 5: init_amd_k6(c); break;
929 0 : case 6: init_amd_k7(c); break;
930 0 : case 0xf: init_amd_k8(c); break;
931 0 : case 0x10: init_amd_gh(c); break;
932 0 : case 0x12: init_amd_ln(c); break;
933 0 : case 0x15: init_amd_bd(c); break;
934 0 : case 0x16: init_amd_jg(c); break;
935 0 : case 0x17: fallthrough;
936 0 : case 0x19: init_amd_zn(c); break;
937 : }
938 :
939 : /*
940 : * Enable workaround for FXSAVE leak on CPUs
941 : * without a XSaveErPtr feature
942 : */
943 0 : if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
944 0 : set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
945 :
946 0 : cpu_detect_cache_sizes(c);
947 :
948 0 : amd_detect_cmp(c);
949 0 : amd_get_topology(c);
950 0 : srat_detect_node(c);
951 0 : amd_detect_ppin(c);
952 :
953 0 : init_amd_cacheinfo(c);
954 :
955 0 : if (cpu_has(c, X86_FEATURE_XMM2)) {
956 : /*
957 : * Use LFENCE for execution serialization. On families which
958 : * don't have that MSR, LFENCE is already serializing.
959 : * msr_set_bit() uses the safe accessors, too, even if the MSR
960 : * is not present.
961 : */
962 0 : msr_set_bit(MSR_F10H_DECFG,
963 : MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
964 :
965 : /* A serializing LFENCE stops RDTSC speculation */
966 0 : set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
967 : }
968 :
969 : /*
970 : * Family 0x12 and above processors have APIC timer
971 : * running in deep C states.
972 : */
973 0 : if (c->x86 > 0x11)
974 0 : set_cpu_cap(c, X86_FEATURE_ARAT);
975 :
976 : /* 3DNow or LM implies PREFETCHW */
977 0 : if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
978 0 : if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
979 0 : set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
980 :
981 : /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
982 0 : if (!cpu_has(c, X86_FEATURE_XENPV))
983 0 : set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
984 :
985 : /*
986 : * Turn on the Instructions Retired free counter on machines not
987 : * susceptible to erratum #1054 "Instructions Retired Performance
988 : * Counter May Be Inaccurate".
989 : */
990 0 : if (cpu_has(c, X86_FEATURE_IRPERF) &&
991 0 : !cpu_has_amd_erratum(c, amd_erratum_1054))
992 0 : msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
993 0 : }
994 :
995 : #ifdef CONFIG_X86_32
996 : static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
997 : {
998 : /* AMD errata T13 (order #21922) */
999 : if (c->x86 == 6) {
1000 : /* Duron Rev A0 */
1001 : if (c->x86_model == 3 && c->x86_stepping == 0)
1002 : size = 64;
1003 : /* Tbird rev A1/A2 */
1004 : if (c->x86_model == 4 &&
1005 : (c->x86_stepping == 0 || c->x86_stepping == 1))
1006 : size = 256;
1007 : }
1008 : return size;
1009 : }
1010 : #endif
1011 :
1012 0 : static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1013 : {
1014 0 : u32 ebx, eax, ecx, edx;
1015 0 : u16 mask = 0xfff;
1016 :
1017 0 : if (c->x86 < 0xf)
1018 0 : return;
1019 :
1020 0 : if (c->extended_cpuid_level < 0x80000006)
1021 : return;
1022 :
1023 0 : cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1024 :
1025 0 : tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1026 0 : tlb_lli_4k[ENTRIES] = ebx & mask;
1027 :
1028 : /*
1029 : * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1030 : * characteristics from the CPUID function 0x80000005 instead.
1031 : */
1032 0 : if (c->x86 == 0xf) {
1033 0 : cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1034 0 : mask = 0xff;
1035 : }
1036 :
1037 : /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1038 0 : if (!((eax >> 16) & mask))
1039 0 : tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1040 : else
1041 0 : tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1042 :
1043 : /* a 4M entry uses two 2M entries */
1044 0 : tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1045 :
1046 : /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1047 0 : if (!(eax & mask)) {
1048 : /* Erratum 658 */
1049 0 : if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1050 0 : tlb_lli_2m[ENTRIES] = 1024;
1051 : } else {
1052 0 : cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1053 0 : tlb_lli_2m[ENTRIES] = eax & 0xff;
1054 : }
1055 : } else
1056 0 : tlb_lli_2m[ENTRIES] = eax & mask;
1057 :
1058 0 : tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1059 : }
1060 :
1061 : static const struct cpu_dev amd_cpu_dev = {
1062 : .c_vendor = "AMD",
1063 : .c_ident = { "AuthenticAMD" },
1064 : #ifdef CONFIG_X86_32
1065 : .legacy_models = {
1066 : { .family = 4, .model_names =
1067 : {
1068 : [3] = "486 DX/2",
1069 : [7] = "486 DX/2-WB",
1070 : [8] = "486 DX/4",
1071 : [9] = "486 DX/4-WB",
1072 : [14] = "Am5x86-WT",
1073 : [15] = "Am5x86-WB"
1074 : }
1075 : },
1076 : },
1077 : .legacy_cache_size = amd_size_cache,
1078 : #endif
1079 : .c_early_init = early_init_amd,
1080 : .c_detect_tlb = cpu_detect_tlb_amd,
1081 : .c_bsp_init = bsp_init_amd,
1082 : .c_init = init_amd,
1083 : .c_x86_vendor = X86_VENDOR_AMD,
1084 : };
1085 :
1086 : cpu_dev_register(amd_cpu_dev);
1087 :
1088 : /*
1089 : * AMD errata checking
1090 : *
1091 : * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1092 : * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1093 : * have an OSVW id assigned, which it takes as first argument. Both take a
1094 : * variable number of family-specific model-stepping ranges created by
1095 : * AMD_MODEL_RANGE().
1096 : *
1097 : * Example:
1098 : *
1099 : * const int amd_erratum_319[] =
1100 : * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1101 : * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1102 : * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1103 : */
1104 :
1105 : #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1106 : #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1107 : #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1108 : ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1109 : #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1110 : #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1111 : #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1112 :
1113 : static const int amd_erratum_400[] =
1114 : AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1115 : AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1116 :
1117 : static const int amd_erratum_383[] =
1118 : AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1119 :
1120 : /* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1121 : static const int amd_erratum_1054[] =
1122 : AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1123 :
1124 0 : static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1125 : {
1126 0 : int osvw_id = *erratum++;
1127 0 : u32 range;
1128 0 : u32 ms;
1129 :
1130 0 : if (osvw_id >= 0 && osvw_id < 65536 &&
1131 0 : cpu_has(cpu, X86_FEATURE_OSVW)) {
1132 0 : u64 osvw_len;
1133 :
1134 0 : rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1135 0 : if (osvw_id < osvw_len) {
1136 0 : u64 osvw_bits;
1137 :
1138 0 : rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1139 : osvw_bits);
1140 0 : return osvw_bits & (1ULL << (osvw_id & 0x3f));
1141 : }
1142 : }
1143 :
1144 : /* OSVW unavailable or ID unknown, match family-model-stepping range */
1145 0 : ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1146 0 : while ((range = *erratum++))
1147 0 : if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1148 0 : (ms >= AMD_MODEL_RANGE_START(range)) &&
1149 0 : (ms <= AMD_MODEL_RANGE_END(range)))
1150 : return true;
1151 :
1152 : return false;
1153 : }
1154 :
1155 0 : void set_dr_addr_mask(unsigned long mask, int dr)
1156 : {
1157 0 : if (!boot_cpu_has(X86_FEATURE_BPEXT))
1158 : return;
1159 :
1160 0 : switch (dr) {
1161 0 : case 0:
1162 0 : wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1163 : break;
1164 0 : case 1:
1165 : case 2:
1166 : case 3:
1167 0 : wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1168 : break;
1169 : default:
1170 : break;
1171 : }
1172 : }
|