LCOV - code coverage report
Current view: top level - arch/x86/kernel/cpu/mtrr - generic.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 95 386 24.6 %
Date: 2021-04-22 12:43:58 Functions: 9 29 31.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
       4             :  * because MTRRs can span up to 40 bits (36bits on most modern x86)
       5             :  */
       6             : 
       7             : #include <linux/export.h>
       8             : #include <linux/init.h>
       9             : #include <linux/io.h>
      10             : #include <linux/mm.h>
      11             : 
      12             : #include <asm/processor-flags.h>
      13             : #include <asm/cpufeature.h>
      14             : #include <asm/tlbflush.h>
      15             : #include <asm/mtrr.h>
      16             : #include <asm/msr.h>
      17             : #include <asm/memtype.h>
      18             : 
      19             : #include "mtrr.h"
      20             : 
      21             : struct fixed_range_block {
      22             :         int base_msr;           /* start address of an MTRR block */
      23             :         int ranges;             /* number of MTRRs in this block  */
      24             : };
      25             : 
      26             : static struct fixed_range_block fixed_range_blocks[] = {
      27             :         { MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
      28             :         { MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
      29             :         { MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
      30             :         {}
      31             : };
      32             : 
      33             : static unsigned long smp_changes_mask;
      34             : static int mtrr_state_set;
      35             : u64 mtrr_tom2;
      36             : 
      37             : struct mtrr_state_type mtrr_state;
      38             : EXPORT_SYMBOL_GPL(mtrr_state);
      39             : 
      40             : /*
      41             :  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
      42             :  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
      43             :  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
      44             :  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
      45             :  * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
      46             :  * 0 for operation."
      47             :  */
      48           1 : static inline void k8_check_syscfg_dram_mod_en(void)
      49             : {
      50           1 :         u32 lo, hi;
      51             : 
      52           1 :         if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
      53           0 :               (boot_cpu_data.x86 >= 0x0f)))
      54             :                 return;
      55             : 
      56           0 :         rdmsr(MSR_K8_SYSCFG, lo, hi);
      57           0 :         if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
      58           0 :                 pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
      59             :                        " not cleared by BIOS, clearing this bit\n",
      60             :                        smp_processor_id());
      61           0 :                 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
      62           0 :                 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
      63             :         }
      64             : }
      65             : 
      66             : /* Get the size of contiguous MTRR range */
      67           0 : static u64 get_mtrr_size(u64 mask)
      68             : {
      69           0 :         u64 size;
      70             : 
      71           0 :         mask >>= PAGE_SHIFT;
      72           0 :         mask |= size_or_mask;
      73           0 :         size = -mask;
      74           0 :         size <<= PAGE_SHIFT;
      75           0 :         return size;
      76             : }
      77             : 
      78             : /*
      79             :  * Check and return the effective type for MTRR-MTRR type overlap.
      80             :  * Returns 1 if the effective type is UNCACHEABLE, else returns 0
      81             :  */
      82           0 : static int check_type_overlap(u8 *prev, u8 *curr)
      83             : {
      84           0 :         if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
      85           0 :                 *prev = MTRR_TYPE_UNCACHABLE;
      86           0 :                 *curr = MTRR_TYPE_UNCACHABLE;
      87           0 :                 return 1;
      88             :         }
      89             : 
      90           0 :         if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
      91           0 :             (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
      92           0 :                 *prev = MTRR_TYPE_WRTHROUGH;
      93           0 :                 *curr = MTRR_TYPE_WRTHROUGH;
      94             :         }
      95             : 
      96           0 :         if (*prev != *curr) {
      97           0 :                 *prev = MTRR_TYPE_UNCACHABLE;
      98           0 :                 *curr = MTRR_TYPE_UNCACHABLE;
      99           0 :                 return 1;
     100             :         }
     101             : 
     102             :         return 0;
     103             : }
     104             : 
     105             : /**
     106             :  * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
     107             :  *
     108             :  * Return the MTRR fixed memory type of 'start'.
     109             :  *
     110             :  * MTRR fixed entries are divided into the following ways:
     111             :  *  0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
     112             :  *  0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
     113             :  *  0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
     114             :  *
     115             :  * Return Values:
     116             :  * MTRR_TYPE_(type)  - Matched memory type
     117             :  * MTRR_TYPE_INVALID - Unmatched
     118             :  */
     119           0 : static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
     120             : {
     121           0 :         int idx;
     122             : 
     123           0 :         if (start >= 0x100000)
     124             :                 return MTRR_TYPE_INVALID;
     125             : 
     126             :         /* 0x0 - 0x7FFFF */
     127           0 :         if (start < 0x80000) {
     128           0 :                 idx = 0;
     129           0 :                 idx += (start >> 16);
     130           0 :                 return mtrr_state.fixed_ranges[idx];
     131             :         /* 0x80000 - 0xBFFFF */
     132           0 :         } else if (start < 0xC0000) {
     133           0 :                 idx = 1 * 8;
     134           0 :                 idx += ((start - 0x80000) >> 14);
     135           0 :                 return mtrr_state.fixed_ranges[idx];
     136             :         }
     137             : 
     138             :         /* 0xC0000 - 0xFFFFF */
     139           0 :         idx = 3 * 8;
     140           0 :         idx += ((start - 0xC0000) >> 12);
     141           0 :         return mtrr_state.fixed_ranges[idx];
     142             : }
     143             : 
     144             : /**
     145             :  * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
     146             :  *
     147             :  * Return Value:
     148             :  * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
     149             :  *
     150             :  * Output Arguments:
     151             :  * repeat - Set to 1 when [start:end] spanned across MTRR range and type
     152             :  *          returned corresponds only to [start:*partial_end].  Caller has
     153             :  *          to lookup again for [*partial_end:end].
     154             :  *
     155             :  * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
     156             :  *           region is fully covered by a single MTRR entry or the default
     157             :  *           type.
     158             :  */
     159           0 : static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
     160             :                                     int *repeat, u8 *uniform)
     161             : {
     162           0 :         int i;
     163           0 :         u64 base, mask;
     164           0 :         u8 prev_match, curr_match;
     165             : 
     166           0 :         *repeat = 0;
     167           0 :         *uniform = 1;
     168             : 
     169           0 :         prev_match = MTRR_TYPE_INVALID;
     170           0 :         for (i = 0; i < num_var_ranges; ++i) {
     171           0 :                 unsigned short start_state, end_state, inclusive;
     172             : 
     173           0 :                 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
     174           0 :                         continue;
     175             : 
     176           0 :                 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
     177           0 :                        (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
     178           0 :                 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
     179           0 :                        (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
     180             : 
     181           0 :                 start_state = ((start & mask) == (base & mask));
     182           0 :                 end_state = ((end & mask) == (base & mask));
     183           0 :                 inclusive = ((start < base) && (end > base));
     184             : 
     185           0 :                 if ((start_state != end_state) || inclusive) {
     186             :                         /*
     187             :                          * We have start:end spanning across an MTRR.
     188             :                          * We split the region into either
     189             :                          *
     190             :                          * - start_state:1
     191             :                          * (start:mtrr_end)(mtrr_end:end)
     192             :                          * - end_state:1
     193             :                          * (start:mtrr_start)(mtrr_start:end)
     194             :                          * - inclusive:1
     195             :                          * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
     196             :                          *
     197             :                          * depending on kind of overlap.
     198             :                          *
     199             :                          * Return the type of the first region and a pointer
     200             :                          * to the start of next region so that caller will be
     201             :                          * advised to lookup again after having adjusted start
     202             :                          * and end.
     203             :                          *
     204             :                          * Note: This way we handle overlaps with multiple
     205             :                          * entries and the default type properly.
     206             :                          */
     207           0 :                         if (start_state)
     208           0 :                                 *partial_end = base + get_mtrr_size(mask);
     209             :                         else
     210           0 :                                 *partial_end = base;
     211             : 
     212           0 :                         if (unlikely(*partial_end <= start)) {
     213           0 :                                 WARN_ON(1);
     214           0 :                                 *partial_end = start + PAGE_SIZE;
     215             :                         }
     216             : 
     217           0 :                         end = *partial_end - 1; /* end is inclusive */
     218           0 :                         *repeat = 1;
     219           0 :                         *uniform = 0;
     220             :                 }
     221             : 
     222           0 :                 if ((start & mask) != (base & mask))
     223           0 :                         continue;
     224             : 
     225           0 :                 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
     226           0 :                 if (prev_match == MTRR_TYPE_INVALID) {
     227           0 :                         prev_match = curr_match;
     228           0 :                         continue;
     229             :                 }
     230             : 
     231           0 :                 *uniform = 0;
     232           0 :                 if (check_type_overlap(&prev_match, &curr_match))
     233           0 :                         return curr_match;
     234             :         }
     235             : 
     236           0 :         if (prev_match != MTRR_TYPE_INVALID)
     237             :                 return prev_match;
     238             : 
     239           0 :         return mtrr_state.def_type;
     240             : }
     241             : 
     242             : /**
     243             :  * mtrr_type_lookup - look up memory type in MTRR
     244             :  *
     245             :  * Return Values:
     246             :  * MTRR_TYPE_(type)  - The effective MTRR type for the region
     247             :  * MTRR_TYPE_INVALID - MTRR is disabled
     248             :  *
     249             :  * Output Argument:
     250             :  * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
     251             :  *           region is fully covered by a single MTRR entry or the default
     252             :  *           type.
     253             :  */
     254          68 : u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
     255             : {
     256          68 :         u8 type, prev_type, is_uniform = 1, dummy;
     257          68 :         int repeat;
     258          68 :         u64 partial_end;
     259             : 
     260             :         /* Make end inclusive instead of exclusive */
     261          68 :         end--;
     262             : 
     263          68 :         if (!mtrr_state_set)
     264             :                 return MTRR_TYPE_INVALID;
     265             : 
     266          68 :         if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
     267             :                 return MTRR_TYPE_INVALID;
     268             : 
     269             :         /*
     270             :          * Look up the fixed ranges first, which take priority over
     271             :          * the variable ranges.
     272             :          */
     273           0 :         if ((start < 0x100000) &&
     274           0 :             (mtrr_state.have_fixed) &&
     275             :             (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
     276           0 :                 is_uniform = 0;
     277           0 :                 type = mtrr_type_lookup_fixed(start, end);
     278           0 :                 goto out;
     279             :         }
     280             : 
     281             :         /*
     282             :          * Look up the variable ranges.  Look of multiple ranges matching
     283             :          * this address and pick type as per MTRR precedence.
     284             :          */
     285           0 :         type = mtrr_type_lookup_variable(start, end, &partial_end,
     286             :                                          &repeat, &is_uniform);
     287             : 
     288             :         /*
     289             :          * Common path is with repeat = 0.
     290             :          * However, we can have cases where [start:end] spans across some
     291             :          * MTRR ranges and/or the default type.  Do repeated lookups for
     292             :          * that case here.
     293             :          */
     294           0 :         while (repeat) {
     295           0 :                 prev_type = type;
     296           0 :                 start = partial_end;
     297           0 :                 is_uniform = 0;
     298           0 :                 type = mtrr_type_lookup_variable(start, end, &partial_end,
     299             :                                                  &repeat, &dummy);
     300             : 
     301           0 :                 if (check_type_overlap(&prev_type, &type))
     302           0 :                         goto out;
     303             :         }
     304             : 
     305           0 :         if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
     306           0 :                 type = MTRR_TYPE_WRBACK;
     307             : 
     308           0 : out:
     309           0 :         *uniform = is_uniform;
     310           0 :         return type;
     311             : }
     312             : 
     313             : /* Get the MSR pair relating to a var range */
     314             : static void
     315           8 : get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
     316             : {
     317           8 :         rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
     318           8 :         rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
     319           8 : }
     320             : 
     321             : /* Fill the MSR pair relating to a var range */
     322           0 : void fill_mtrr_var_range(unsigned int index,
     323             :                 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
     324             : {
     325           0 :         struct mtrr_var_range *vr;
     326             : 
     327           0 :         vr = mtrr_state.var_ranges;
     328             : 
     329           0 :         vr[index].base_lo = base_lo;
     330           0 :         vr[index].base_hi = base_hi;
     331           0 :         vr[index].mask_lo = mask_lo;
     332           0 :         vr[index].mask_hi = mask_hi;
     333           0 : }
     334             : 
     335           1 : static void get_fixed_ranges(mtrr_type *frs)
     336             : {
     337           1 :         unsigned int *p = (unsigned int *)frs;
     338           1 :         int i;
     339             : 
     340           1 :         k8_check_syscfg_dram_mod_en();
     341             : 
     342           1 :         rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
     343             : 
     344           3 :         for (i = 0; i < 2; i++)
     345           2 :                 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
     346           9 :         for (i = 0; i < 8; i++)
     347           8 :                 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
     348           1 : }
     349             : 
     350           0 : void mtrr_save_fixed_ranges(void *info)
     351             : {
     352           0 :         if (boot_cpu_has(X86_FEATURE_MTRR))
     353           0 :                 get_fixed_ranges(mtrr_state.fixed_ranges);
     354           0 : }
     355             : 
     356             : static unsigned __initdata last_fixed_start;
     357             : static unsigned __initdata last_fixed_end;
     358             : static mtrr_type __initdata last_fixed_type;
     359             : 
     360           1 : static void __init print_fixed_last(void)
     361             : {
     362           1 :         if (!last_fixed_end)
     363             :                 return;
     364             : 
     365           1 :         pr_debug("  %05X-%05X %s\n", last_fixed_start,
     366             :                  last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
     367             : 
     368           1 :         last_fixed_end = 0;
     369             : }
     370             : 
     371           1 : static void __init update_fixed_last(unsigned base, unsigned end,
     372             :                                      mtrr_type type)
     373             : {
     374           1 :         last_fixed_start = base;
     375           1 :         last_fixed_end = end;
     376           1 :         last_fixed_type = type;
     377           0 : }
     378             : 
     379             : static void __init
     380          11 : print_fixed(unsigned base, unsigned step, const mtrr_type *types)
     381             : {
     382          11 :         unsigned i;
     383             : 
     384          99 :         for (i = 0; i < 8; ++i, ++types, base += step) {
     385          88 :                 if (last_fixed_end == 0) {
     386           1 :                         update_fixed_last(base, base + step, *types);
     387           1 :                         continue;
     388             :                 }
     389          87 :                 if (last_fixed_end == base && last_fixed_type == *types) {
     390          87 :                         last_fixed_end = base + step;
     391          87 :                         continue;
     392             :                 }
     393             :                 /* new segments: gap or different type */
     394           0 :                 print_fixed_last();
     395          88 :                 update_fixed_last(base, base + step, *types);
     396             :         }
     397          11 : }
     398             : 
     399             : static void prepare_set(void);
     400             : static void post_set(void);
     401             : 
     402           1 : static void __init print_mtrr_state(void)
     403             : {
     404           1 :         unsigned int i;
     405           1 :         int high_width;
     406             : 
     407           1 :         pr_debug("MTRR default type: %s\n",
     408             :                  mtrr_attrib_to_str(mtrr_state.def_type));
     409           1 :         if (mtrr_state.have_fixed) {
     410           1 :                 pr_debug("MTRR fixed ranges %sabled:\n",
     411             :                         ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
     412             :                          (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
     413             :                          "en" : "dis");
     414           1 :                 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
     415           4 :                 for (i = 0; i < 2; ++i)
     416           2 :                         print_fixed(0x80000 + i * 0x20000, 0x04000,
     417           2 :                                     mtrr_state.fixed_ranges + (i + 1) * 8);
     418           9 :                 for (i = 0; i < 8; ++i)
     419           8 :                         print_fixed(0xC0000 + i * 0x08000, 0x01000,
     420           8 :                                     mtrr_state.fixed_ranges + (i + 3) * 8);
     421             : 
     422             :                 /* tail */
     423           1 :                 print_fixed_last();
     424             :         }
     425           1 :         pr_debug("MTRR variable ranges %sabled:\n",
     426             :                  mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
     427           1 :         high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
     428             : 
     429           9 :         for (i = 0; i < num_var_ranges; ++i) {
     430           8 :                 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
     431             :                         pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
     432             :                                  i,
     433             :                                  high_width,
     434             :                                  mtrr_state.var_ranges[i].base_hi,
     435             :                                  mtrr_state.var_ranges[i].base_lo >> 12,
     436             :                                  high_width,
     437             :                                  mtrr_state.var_ranges[i].mask_hi,
     438             :                                  mtrr_state.var_ranges[i].mask_lo >> 12,
     439             :                                  mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
     440             :                 else
     441             :                         pr_debug("  %u disabled\n", i);
     442             :         }
     443           1 :         if (mtrr_tom2)
     444             :                 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
     445           1 : }
     446             : 
     447             : /* PAT setup for BP. We need to go through sync steps here */
     448           0 : void __init mtrr_bp_pat_init(void)
     449             : {
     450           0 :         unsigned long flags;
     451             : 
     452           0 :         local_irq_save(flags);
     453           0 :         prepare_set();
     454             : 
     455           0 :         pat_init();
     456             : 
     457           0 :         post_set();
     458           0 :         local_irq_restore(flags);
     459           0 : }
     460             : 
     461             : /* Grab all of the MTRR state for this CPU into *state */
     462           1 : bool __init get_mtrr_state(void)
     463             : {
     464           1 :         struct mtrr_var_range *vrs;
     465           1 :         unsigned lo, dummy;
     466           1 :         unsigned int i;
     467             : 
     468           1 :         vrs = mtrr_state.var_ranges;
     469             : 
     470           1 :         rdmsr(MSR_MTRRcap, lo, dummy);
     471           1 :         mtrr_state.have_fixed = (lo >> 8) & 1;
     472             : 
     473           9 :         for (i = 0; i < num_var_ranges; i++)
     474           8 :                 get_mtrr_var_range(i, &vrs[i]);
     475           1 :         if (mtrr_state.have_fixed)
     476           1 :                 get_fixed_ranges(mtrr_state.fixed_ranges);
     477             : 
     478           1 :         rdmsr(MSR_MTRRdefType, lo, dummy);
     479           1 :         mtrr_state.def_type = (lo & 0xff);
     480           1 :         mtrr_state.enabled = (lo & 0xc00) >> 10;
     481             : 
     482           1 :         if (amd_special_default_mtrr()) {
     483           0 :                 unsigned low, high;
     484             : 
     485             :                 /* TOP_MEM2 */
     486           0 :                 rdmsr(MSR_K8_TOP_MEM2, low, high);
     487           0 :                 mtrr_tom2 = high;
     488           0 :                 mtrr_tom2 <<= 32;
     489           0 :                 mtrr_tom2 |= low;
     490           0 :                 mtrr_tom2 &= 0xffffff800000ULL;
     491             :         }
     492             : 
     493           1 :         print_mtrr_state();
     494             : 
     495           1 :         mtrr_state_set = 1;
     496             : 
     497           1 :         return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
     498             : }
     499             : 
     500             : /* Some BIOS's are messed up and don't set all MTRRs the same! */
     501           0 : void __init mtrr_state_warn(void)
     502             : {
     503           0 :         unsigned long mask = smp_changes_mask;
     504             : 
     505           0 :         if (!mask)
     506             :                 return;
     507           0 :         if (mask & MTRR_CHANGE_MASK_FIXED)
     508           0 :                 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
     509           0 :         if (mask & MTRR_CHANGE_MASK_VARIABLE)
     510           0 :                 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
     511           0 :         if (mask & MTRR_CHANGE_MASK_DEFTYPE)
     512           0 :                 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
     513             : 
     514           0 :         pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
     515           0 :         pr_info("mtrr: corrected configuration.\n");
     516             : }
     517             : 
     518             : /*
     519             :  * Doesn't attempt to pass an error out to MTRR users
     520             :  * because it's quite complicated in some cases and probably not
     521             :  * worth it because the best error handling is to ignore it.
     522             :  */
     523           0 : void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
     524             : {
     525           0 :         if (wrmsr_safe(msr, a, b) < 0) {
     526           0 :                 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
     527             :                         smp_processor_id(), msr, a, b);
     528             :         }
     529           0 : }
     530             : 
     531             : /**
     532             :  * set_fixed_range - checks & updates a fixed-range MTRR if it
     533             :  *                   differs from the value it should have
     534             :  * @msr: MSR address of the MTTR which should be checked and updated
     535             :  * @changed: pointer which indicates whether the MTRR needed to be changed
     536             :  * @msrwords: pointer to the MSR values which the MSR should have
     537             :  */
     538           0 : static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
     539             : {
     540           0 :         unsigned lo, hi;
     541             : 
     542           0 :         rdmsr(msr, lo, hi);
     543             : 
     544           0 :         if (lo != msrwords[0] || hi != msrwords[1]) {
     545           0 :                 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
     546           0 :                 *changed = true;
     547             :         }
     548           0 : }
     549             : 
     550             : /**
     551             :  * generic_get_free_region - Get a free MTRR.
     552             :  * @base: The starting (base) address of the region.
     553             :  * @size: The size (in bytes) of the region.
     554             :  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
     555             :  *
     556             :  * Returns: The index of the region on success, else negative on error.
     557             :  */
     558             : int
     559           0 : generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
     560             : {
     561           0 :         unsigned long lbase, lsize;
     562           0 :         mtrr_type ltype;
     563           0 :         int i, max;
     564             : 
     565           0 :         max = num_var_ranges;
     566           0 :         if (replace_reg >= 0 && replace_reg < max)
     567             :                 return replace_reg;
     568             : 
     569           0 :         for (i = 0; i < max; ++i) {
     570           0 :                 mtrr_if->get(i, &lbase, &lsize, &ltype);
     571           0 :                 if (lsize == 0)
     572           0 :                         return i;
     573             :         }
     574             : 
     575             :         return -ENOSPC;
     576             : }
     577             : 
     578           8 : static void generic_get_mtrr(unsigned int reg, unsigned long *base,
     579             :                              unsigned long *size, mtrr_type *type)
     580             : {
     581           8 :         u32 mask_lo, mask_hi, base_lo, base_hi;
     582           8 :         unsigned int hi;
     583           8 :         u64 tmp, mask;
     584             : 
     585             :         /*
     586             :          * get_mtrr doesn't need to update mtrr_state, also it could be called
     587             :          * from any cpu, so try to print it out directly.
     588             :          */
     589           8 :         get_cpu();
     590             : 
     591           8 :         rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
     592             : 
     593           8 :         if ((mask_lo & 0x800) == 0) {
     594             :                 /*  Invalid (i.e. free) range */
     595           8 :                 *base = 0;
     596           8 :                 *size = 0;
     597           8 :                 *type = 0;
     598           8 :                 goto out_put_cpu;
     599             :         }
     600             : 
     601           0 :         rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
     602             : 
     603             :         /* Work out the shifted address mask: */
     604           0 :         tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
     605           0 :         mask = size_or_mask | tmp;
     606             : 
     607             :         /* Expand tmp with high bits to all 1s: */
     608           0 :         hi = fls64(tmp);
     609           0 :         if (hi > 0) {
     610           0 :                 tmp |= ~((1ULL<<(hi - 1)) - 1);
     611             : 
     612           0 :                 if (tmp != mask) {
     613           0 :                         pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
     614           0 :                         add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
     615           0 :                         mask = tmp;
     616             :                 }
     617             :         }
     618             : 
     619             :         /*
     620             :          * This works correctly if size is a power of two, i.e. a
     621             :          * contiguous range:
     622             :          */
     623           0 :         *size = -mask;
     624           0 :         *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
     625           0 :         *type = base_lo & 0xff;
     626             : 
     627           8 : out_put_cpu:
     628           8 :         put_cpu();
     629           8 : }
     630             : 
     631             : /**
     632             :  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
     633             :  *                    differ from the saved set
     634             :  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
     635             :  */
     636           0 : static int set_fixed_ranges(mtrr_type *frs)
     637             : {
     638           0 :         unsigned long long *saved = (unsigned long long *)frs;
     639           0 :         bool changed = false;
     640           0 :         int block = -1, range;
     641             : 
     642           0 :         k8_check_syscfg_dram_mod_en();
     643             : 
     644           0 :         while (fixed_range_blocks[++block].ranges) {
     645           0 :                 for (range = 0; range < fixed_range_blocks[block].ranges; range++)
     646           0 :                         set_fixed_range(fixed_range_blocks[block].base_msr + range,
     647             :                                         &changed, (unsigned int *)saved++);
     648             :         }
     649             : 
     650           0 :         return changed;
     651             : }
     652             : 
     653             : /*
     654             :  * Set the MSR pair relating to a var range.
     655             :  * Returns true if changes are made.
     656             :  */
     657           0 : static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
     658             : {
     659           0 :         unsigned int lo, hi;
     660           0 :         bool changed = false;
     661             : 
     662           0 :         rdmsr(MTRRphysBase_MSR(index), lo, hi);
     663           0 :         if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
     664           0 :             || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
     665             :                 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
     666             : 
     667           0 :                 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
     668           0 :                 changed = true;
     669             :         }
     670             : 
     671           0 :         rdmsr(MTRRphysMask_MSR(index), lo, hi);
     672             : 
     673           0 :         if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
     674           0 :             || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
     675             :                 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
     676           0 :                 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
     677           0 :                 changed = true;
     678             :         }
     679           0 :         return changed;
     680             : }
     681             : 
     682             : static u32 deftype_lo, deftype_hi;
     683             : 
     684             : /**
     685             :  * set_mtrr_state - Set the MTRR state for this CPU.
     686             :  *
     687             :  * NOTE: The CPU must already be in a safe state for MTRR changes.
     688             :  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
     689             :  */
     690           0 : static unsigned long set_mtrr_state(void)
     691             : {
     692           0 :         unsigned long change_mask = 0;
     693           0 :         unsigned int i;
     694             : 
     695           0 :         for (i = 0; i < num_var_ranges; i++) {
     696           0 :                 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
     697           0 :                         change_mask |= MTRR_CHANGE_MASK_VARIABLE;
     698             :         }
     699             : 
     700           0 :         if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
     701           0 :                 change_mask |= MTRR_CHANGE_MASK_FIXED;
     702             : 
     703             :         /*
     704             :          * Set_mtrr_restore restores the old value of MTRRdefType,
     705             :          * so to set it we fiddle with the saved value:
     706             :          */
     707           0 :         if ((deftype_lo & 0xff) != mtrr_state.def_type
     708           0 :             || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
     709             : 
     710           0 :                 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
     711           0 :                              (mtrr_state.enabled << 10);
     712           0 :                 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
     713             :         }
     714             : 
     715           0 :         return change_mask;
     716             : }
     717             : 
     718             : 
     719             : static unsigned long cr4;
     720             : static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
     721             : 
     722             : /*
     723             :  * Since we are disabling the cache don't allow any interrupts,
     724             :  * they would run extremely slow and would only increase the pain.
     725             :  *
     726             :  * The caller must ensure that local interrupts are disabled and
     727             :  * are reenabled after post_set() has been called.
     728             :  */
     729           0 : static void prepare_set(void) __acquires(set_atomicity_lock)
     730             : {
     731           0 :         unsigned long cr0;
     732             : 
     733             :         /*
     734             :          * Note that this is not ideal
     735             :          * since the cache is only flushed/disabled for this CPU while the
     736             :          * MTRRs are changed, but changing this requires more invasive
     737             :          * changes to the way the kernel boots
     738             :          */
     739             : 
     740           0 :         raw_spin_lock(&set_atomicity_lock);
     741             : 
     742             :         /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
     743           0 :         cr0 = read_cr0() | X86_CR0_CD;
     744           0 :         write_cr0(cr0);
     745             : 
     746             :         /*
     747             :          * Cache flushing is the most time-consuming step when programming
     748             :          * the MTRRs. Fortunately, as per the Intel Software Development
     749             :          * Manual, we can skip it if the processor supports cache self-
     750             :          * snooping.
     751             :          */
     752           0 :         if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
     753           0 :                 wbinvd();
     754             : 
     755             :         /* Save value of CR4 and clear Page Global Enable (bit 7) */
     756           0 :         if (boot_cpu_has(X86_FEATURE_PGE)) {
     757           0 :                 cr4 = __read_cr4();
     758           0 :                 __write_cr4(cr4 & ~X86_CR4_PGE);
     759             :         }
     760             : 
     761             :         /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
     762           0 :         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
     763           0 :         flush_tlb_local();
     764             : 
     765             :         /* Save MTRR state */
     766           0 :         rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
     767             : 
     768             :         /* Disable MTRRs, and set the default type to uncached */
     769           0 :         mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
     770             : 
     771             :         /* Again, only flush caches if we have to. */
     772           0 :         if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
     773           0 :                 wbinvd();
     774           0 : }
     775             : 
     776           0 : static void post_set(void) __releases(set_atomicity_lock)
     777             : {
     778             :         /* Flush TLBs (no need to flush caches - they are disabled) */
     779           0 :         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
     780           0 :         flush_tlb_local();
     781             : 
     782             :         /* Intel (P6) standard MTRRs */
     783           0 :         mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
     784             : 
     785             :         /* Enable caches */
     786           0 :         write_cr0(read_cr0() & ~X86_CR0_CD);
     787             : 
     788             :         /* Restore value of CR4 */
     789           0 :         if (boot_cpu_has(X86_FEATURE_PGE))
     790           0 :                 __write_cr4(cr4);
     791           0 :         raw_spin_unlock(&set_atomicity_lock);
     792           0 : }
     793             : 
     794           0 : static void generic_set_all(void)
     795             : {
     796           0 :         unsigned long mask, count;
     797           0 :         unsigned long flags;
     798             : 
     799           0 :         local_irq_save(flags);
     800           0 :         prepare_set();
     801             : 
     802             :         /* Actually set the state */
     803           0 :         mask = set_mtrr_state();
     804             : 
     805             :         /* also set PAT */
     806           0 :         pat_init();
     807             : 
     808           0 :         post_set();
     809           0 :         local_irq_restore(flags);
     810             : 
     811             :         /* Use the atomic bitops to update the global mask */
     812           0 :         for (count = 0; count < sizeof(mask) * 8; ++count) {
     813           0 :                 if (mask & 0x01)
     814           0 :                         set_bit(count, &smp_changes_mask);
     815           0 :                 mask >>= 1;
     816             :         }
     817             : 
     818           0 : }
     819             : 
     820             : /**
     821             :  * generic_set_mtrr - set variable MTRR register on the local CPU.
     822             :  *
     823             :  * @reg: The register to set.
     824             :  * @base: The base address of the region.
     825             :  * @size: The size of the region. If this is 0 the region is disabled.
     826             :  * @type: The type of the region.
     827             :  *
     828             :  * Returns nothing.
     829             :  */
     830           0 : static void generic_set_mtrr(unsigned int reg, unsigned long base,
     831             :                              unsigned long size, mtrr_type type)
     832             : {
     833           0 :         unsigned long flags;
     834           0 :         struct mtrr_var_range *vr;
     835             : 
     836           0 :         vr = &mtrr_state.var_ranges[reg];
     837             : 
     838           0 :         local_irq_save(flags);
     839           0 :         prepare_set();
     840             : 
     841           0 :         if (size == 0) {
     842             :                 /*
     843             :                  * The invalid bit is kept in the mask, so we simply
     844             :                  * clear the relevant mask register to disable a range.
     845             :                  */
     846           0 :                 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
     847           0 :                 memset(vr, 0, sizeof(struct mtrr_var_range));
     848             :         } else {
     849           0 :                 vr->base_lo = base << PAGE_SHIFT | type;
     850           0 :                 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
     851           0 :                 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
     852           0 :                 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
     853             : 
     854           0 :                 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
     855           0 :                 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
     856             :         }
     857             : 
     858           0 :         post_set();
     859           0 :         local_irq_restore(flags);
     860           0 : }
     861             : 
     862           0 : int generic_validate_add_page(unsigned long base, unsigned long size,
     863             :                               unsigned int type)
     864             : {
     865           0 :         unsigned long lbase, last;
     866             : 
     867             :         /*
     868             :          * For Intel PPro stepping <= 7
     869             :          * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
     870             :          */
     871           0 :         if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
     872           0 :             boot_cpu_data.x86_model == 1 &&
     873           0 :             boot_cpu_data.x86_stepping <= 7) {
     874           0 :                 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
     875           0 :                         pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
     876           0 :                         return -EINVAL;
     877             :                 }
     878           0 :                 if (!(base + size < 0x70000 || base > 0x7003F) &&
     879           0 :                     (type == MTRR_TYPE_WRCOMB
     880           0 :                      || type == MTRR_TYPE_WRBACK)) {
     881           0 :                         pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
     882           0 :                         return -EINVAL;
     883             :                 }
     884             :         }
     885             : 
     886             :         /*
     887             :          * Check upper bits of base and last are equal and lower bits are 0
     888             :          * for base and 1 for last
     889             :          */
     890           0 :         last = base + size - 1;
     891           0 :         for (lbase = base; !(lbase & 1) && (last & 1);
     892           0 :              lbase = lbase >> 1, last = last >> 1)
     893           0 :                 ;
     894           0 :         if (lbase != last) {
     895           0 :                 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
     896           0 :                 return -EINVAL;
     897             :         }
     898             :         return 0;
     899             : }
     900             : 
     901           0 : static int generic_have_wrcomb(void)
     902             : {
     903           0 :         unsigned long config, dummy;
     904           0 :         rdmsr(MSR_MTRRcap, config, dummy);
     905           0 :         return config & (1 << 10);
     906             : }
     907             : 
     908           0 : int positive_have_wrcomb(void)
     909             : {
     910           0 :         return 1;
     911             : }
     912             : 
     913             : /*
     914             :  * Generic structure...
     915             :  */
     916             : const struct mtrr_ops generic_mtrr_ops = {
     917             :         .use_intel_if           = 1,
     918             :         .set_all                = generic_set_all,
     919             :         .get                    = generic_get_mtrr,
     920             :         .get_free_region        = generic_get_free_region,
     921             :         .set                    = generic_set_mtrr,
     922             :         .validate_add_page      = generic_validate_add_page,
     923             :         .have_wrcomb            = generic_have_wrcomb,
     924             : };

Generated by: LCOV version 1.14