LCOV - code coverage report
Current view: top level - arch/x86/mm - init.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 261 332 78.6 %
Date: 2021-04-22 12:43:58 Functions: 24 31 77.4 %

          Line data    Source code
       1             : #include <linux/gfp.h>
       2             : #include <linux/initrd.h>
       3             : #include <linux/ioport.h>
       4             : #include <linux/swap.h>
       5             : #include <linux/memblock.h>
       6             : #include <linux/swapfile.h>
       7             : #include <linux/swapops.h>
       8             : #include <linux/kmemleak.h>
       9             : #include <linux/sched/task.h>
      10             : 
      11             : #include <asm/set_memory.h>
      12             : #include <asm/e820/api.h>
      13             : #include <asm/init.h>
      14             : #include <asm/page.h>
      15             : #include <asm/page_types.h>
      16             : #include <asm/sections.h>
      17             : #include <asm/setup.h>
      18             : #include <asm/tlbflush.h>
      19             : #include <asm/tlb.h>
      20             : #include <asm/proto.h>
      21             : #include <asm/dma.h>              /* for MAX_DMA_PFN */
      22             : #include <asm/microcode.h>
      23             : #include <asm/kaslr.h>
      24             : #include <asm/hypervisor.h>
      25             : #include <asm/cpufeature.h>
      26             : #include <asm/pti.h>
      27             : #include <asm/text-patching.h>
      28             : #include <asm/memtype.h>
      29             : 
      30             : /*
      31             :  * We need to define the tracepoints somewhere, and tlb.c
      32             :  * is only compied when SMP=y.
      33             :  */
      34             : #define CREATE_TRACE_POINTS
      35             : #include <trace/events/tlb.h>
      36             : 
      37             : #include "mm_internal.h"
      38             : 
      39             : /*
      40             :  * Tables translating between page_cache_type_t and pte encoding.
      41             :  *
      42             :  * The default values are defined statically as minimal supported mode;
      43             :  * WC and WT fall back to UC-.  pat_init() updates these values to support
      44             :  * more cache modes, WC and WT, when it is safe to do so.  See pat_init()
      45             :  * for the details.  Note, __early_ioremap() used during early boot-time
      46             :  * takes pgprot_t (pte encoding) and does not use these tables.
      47             :  *
      48             :  *   Index into __cachemode2pte_tbl[] is the cachemode.
      49             :  *
      50             :  *   Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
      51             :  *   (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
      52             :  */
      53             : static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
      54             :         [_PAGE_CACHE_MODE_WB      ]     = 0         | 0        ,
      55             :         [_PAGE_CACHE_MODE_WC      ]     = 0         | _PAGE_PCD,
      56             :         [_PAGE_CACHE_MODE_UC_MINUS]     = 0         | _PAGE_PCD,
      57             :         [_PAGE_CACHE_MODE_UC      ]     = _PAGE_PWT | _PAGE_PCD,
      58             :         [_PAGE_CACHE_MODE_WT      ]     = 0         | _PAGE_PCD,
      59             :         [_PAGE_CACHE_MODE_WP      ]     = 0         | _PAGE_PCD,
      60             : };
      61             : 
      62           5 : unsigned long cachemode2protval(enum page_cache_mode pcm)
      63             : {
      64           5 :         if (likely(pcm == 0))
      65             :                 return 0;
      66           5 :         return __cachemode2pte_tbl[pcm];
      67             : }
      68             : EXPORT_SYMBOL(cachemode2protval);
      69             : 
      70             : static uint8_t __pte2cachemode_tbl[8] = {
      71             :         [__pte2cm_idx( 0        | 0         | 0        )] = _PAGE_CACHE_MODE_WB,
      72             :         [__pte2cm_idx(_PAGE_PWT | 0         | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
      73             :         [__pte2cm_idx( 0        | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
      74             :         [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC,
      75             :         [__pte2cm_idx( 0        | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
      76             :         [__pte2cm_idx(_PAGE_PWT | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
      77             :         [__pte2cm_idx(0         | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
      78             :         [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
      79             : };
      80             : 
      81             : /* Check that the write-protect PAT entry is set for write-protect */
      82           0 : bool x86_has_pat_wp(void)
      83             : {
      84           0 :         return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
      85             : }
      86             : 
      87          16 : enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
      88             : {
      89          16 :         unsigned long masked;
      90             : 
      91          16 :         masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
      92          16 :         if (likely(masked == 0))
      93             :                 return 0;
      94           0 :         return __pte2cachemode_tbl[__pte2cm_idx(masked)];
      95             : }
      96             : 
      97             : static unsigned long __initdata pgt_buf_start;
      98             : static unsigned long __initdata pgt_buf_end;
      99             : static unsigned long __initdata pgt_buf_top;
     100             : 
     101             : static unsigned long min_pfn_mapped;
     102             : 
     103             : static bool __initdata can_use_brk_pgt = true;
     104             : 
     105             : /*
     106             :  * Pages returned are already directly mapped.
     107             :  *
     108             :  * Changing that is likely to break Xen, see commit:
     109             :  *
     110             :  *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
     111             :  *
     112             :  * for detailed information.
     113             :  */
     114           3 : __ref void *alloc_low_pages(unsigned int num)
     115             : {
     116           3 :         unsigned long pfn;
     117           3 :         int i;
     118             : 
     119           3 :         if (after_bootmem) {
     120           0 :                 unsigned int order;
     121             : 
     122           0 :                 order = get_order((unsigned long)num << PAGE_SHIFT);
     123           0 :                 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
     124             :         }
     125             : 
     126           3 :         if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
     127           0 :                 unsigned long ret = 0;
     128             : 
     129           0 :                 if (min_pfn_mapped < max_pfn_mapped) {
     130           0 :                         ret = memblock_find_in_range(
     131           0 :                                         min_pfn_mapped << PAGE_SHIFT,
     132           0 :                                         max_pfn_mapped << PAGE_SHIFT,
     133           0 :                                         PAGE_SIZE * num , PAGE_SIZE);
     134             :                 }
     135           0 :                 if (ret)
     136           0 :                         memblock_reserve(ret, PAGE_SIZE * num);
     137           0 :                 else if (can_use_brk_pgt)
     138           0 :                         ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
     139             : 
     140           0 :                 if (!ret)
     141           0 :                         panic("alloc_low_pages: can not alloc memory");
     142             : 
     143           0 :                 pfn = ret >> PAGE_SHIFT;
     144             :         } else {
     145           3 :                 pfn = pgt_buf_end;
     146           3 :                 pgt_buf_end += num;
     147             :         }
     148             : 
     149           6 :         for (i = 0; i < num; i++) {
     150           3 :                 void *adr;
     151             : 
     152           3 :                 adr = __va((pfn + i) << PAGE_SHIFT);
     153           3 :                 clear_page(adr);
     154             :         }
     155             : 
     156           3 :         return __va(pfn << PAGE_SHIFT);
     157             : }
     158             : 
     159             : /*
     160             :  * By default need to be able to allocate page tables below PGD firstly for
     161             :  * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
     162             :  * With KASLR memory randomization, depending on the machine e820 memory and the
     163             :  * PUD alignment, twice that many pages may be needed when KASLR memory
     164             :  * randomization is enabled.
     165             :  */
     166             : 
     167             : #ifndef CONFIG_X86_5LEVEL
     168             : #define INIT_PGD_PAGE_TABLES    3
     169             : #else
     170             : #define INIT_PGD_PAGE_TABLES    4
     171             : #endif
     172             : 
     173             : #ifndef CONFIG_RANDOMIZE_MEMORY
     174             : #define INIT_PGD_PAGE_COUNT      (2 * INIT_PGD_PAGE_TABLES)
     175             : #else
     176             : #define INIT_PGD_PAGE_COUNT      (4 * INIT_PGD_PAGE_TABLES)
     177             : #endif
     178             : 
     179             : #define INIT_PGT_BUF_SIZE       (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
     180           0 : RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
     181           1 : void  __init early_alloc_pgt_buf(void)
     182             : {
     183           1 :         unsigned long tables = INIT_PGT_BUF_SIZE;
     184           1 :         phys_addr_t base;
     185             : 
     186           1 :         base = __pa(extend_brk(tables, PAGE_SIZE));
     187             : 
     188           1 :         pgt_buf_start = base >> PAGE_SHIFT;
     189           1 :         pgt_buf_end = pgt_buf_start;
     190           1 :         pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
     191           1 : }
     192             : 
     193             : int after_bootmem;
     194             : 
     195           0 : early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
     196             : 
     197             : struct map_range {
     198             :         unsigned long start;
     199             :         unsigned long end;
     200             :         unsigned page_size_mask;
     201             : };
     202             : 
     203             : static int page_size_mask;
     204             : 
     205             : /*
     206             :  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
     207             :  * enable and PPro Global page enable), so that any CPU's that boot
     208             :  * up after us can get the correct flags. Invoked on the boot CPU.
     209             :  */
     210           2 : static inline void cr4_set_bits_and_update_boot(unsigned long mask)
     211             : {
     212           2 :         mmu_cr4_features |= mask;
     213           2 :         if (trampoline_cr4_features)
     214           0 :                 *trampoline_cr4_features = mmu_cr4_features;
     215           2 :         cr4_set_bits(mask);
     216           2 : }
     217             : 
     218           1 : static void __init probe_page_size_mask(void)
     219             : {
     220             :         /*
     221             :          * For pagealloc debugging, identity mapping will use small pages.
     222             :          * This will simplify cpa(), which otherwise needs to support splitting
     223             :          * large pages into small in interrupt context, etc.
     224             :          */
     225           1 :         if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
     226           1 :                 page_size_mask |= 1 << PG_LEVEL_2M;
     227             :         else
     228             :                 direct_gbpages = 0;
     229             : 
     230             :         /* Enable PSE if available */
     231           1 :         if (boot_cpu_has(X86_FEATURE_PSE))
     232           1 :                 cr4_set_bits_and_update_boot(X86_CR4_PSE);
     233             : 
     234             :         /* Enable PGE if available */
     235           1 :         __supported_pte_mask &= ~_PAGE_GLOBAL;
     236           1 :         if (boot_cpu_has(X86_FEATURE_PGE)) {
     237           1 :                 cr4_set_bits_and_update_boot(X86_CR4_PGE);
     238           1 :                 __supported_pte_mask |= _PAGE_GLOBAL;
     239             :         }
     240             : 
     241             :         /* By the default is everything supported: */
     242           1 :         __default_kernel_pte_mask = __supported_pte_mask;
     243             :         /* Except when with PTI where the kernel is mostly non-Global: */
     244           1 :         if (cpu_feature_enabled(X86_FEATURE_PTI))
     245             :                 __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
     246             : 
     247             :         /* Enable 1 GB linear kernel mappings if available: */
     248           2 :         if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
     249           1 :                 printk(KERN_INFO "Using GB pages for direct mapping\n");
     250           1 :                 page_size_mask |= 1 << PG_LEVEL_1G;
     251             :         } else {
     252           0 :                 direct_gbpages = 0;
     253             :         }
     254           1 : }
     255             : 
     256           1 : static void setup_pcid(void)
     257             : {
     258           1 :         if (!IS_ENABLED(CONFIG_X86_64))
     259             :                 return;
     260             : 
     261           1 :         if (!boot_cpu_has(X86_FEATURE_PCID))
     262             :                 return;
     263             : 
     264           1 :         if (boot_cpu_has(X86_FEATURE_PGE)) {
     265             :                 /*
     266             :                  * This can't be cr4_set_bits_and_update_boot() -- the
     267             :                  * trampoline code can't handle CR4.PCIDE and it wouldn't
     268             :                  * do any good anyway.  Despite the name,
     269             :                  * cr4_set_bits_and_update_boot() doesn't actually cause
     270             :                  * the bits in question to remain set all the way through
     271             :                  * the secondary boot asm.
     272             :                  *
     273             :                  * Instead, we brute-force it and set CR4.PCIDE manually in
     274             :                  * start_secondary().
     275             :                  */
     276           1 :                 cr4_set_bits(X86_CR4_PCIDE);
     277             : 
     278             :                 /*
     279             :                  * INVPCID's single-context modes (2/3) only work if we set
     280             :                  * X86_CR4_PCIDE, *and* we INVPCID support.  It's unusable
     281             :                  * on systems that have X86_CR4_PCIDE clear, or that have
     282             :                  * no INVPCID support at all.
     283             :                  */
     284           1 :                 if (boot_cpu_has(X86_FEATURE_INVPCID))
     285           1 :                         setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
     286             :         } else {
     287             :                 /*
     288             :                  * flush_tlb_all(), as currently implemented, won't work if
     289             :                  * PCID is on but PGE is not.  Since that combination
     290             :                  * doesn't exist on real hardware, there's no reason to try
     291             :                  * to fully support it, but it's polite to avoid corrupting
     292             :                  * data if we're on an improperly configured VM.
     293             :                  */
     294           0 :                 setup_clear_cpu_cap(X86_FEATURE_PCID);
     295             :         }
     296             : }
     297             : 
     298             : #ifdef CONFIG_X86_32
     299             : #define NR_RANGE_MR 3
     300             : #else /* CONFIG_X86_64 */
     301             : #define NR_RANGE_MR 5
     302             : #endif
     303             : 
     304           8 : static int __meminit save_mr(struct map_range *mr, int nr_range,
     305             :                              unsigned long start_pfn, unsigned long end_pfn,
     306             :                              unsigned long page_size_mask)
     307             : {
     308           8 :         if (start_pfn < end_pfn) {
     309           5 :                 if (nr_range >= NR_RANGE_MR)
     310           0 :                         panic("run out of range for init_memory_mapping\n");
     311           5 :                 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
     312           5 :                 mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
     313           5 :                 mr[nr_range].page_size_mask = page_size_mask;
     314           5 :                 nr_range++;
     315             :         }
     316             : 
     317           8 :         return nr_range;
     318             : }
     319             : 
     320             : /*
     321             :  * adjust the page_size_mask for small range to go with
     322             :  *      big page size instead small one if nearby are ram too.
     323             :  */
     324           4 : static void __ref adjust_range_page_size_mask(struct map_range *mr,
     325             :                                                          int nr_range)
     326             : {
     327           4 :         int i;
     328             : 
     329           9 :         for (i = 0; i < nr_range; i++) {
     330           5 :                 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
     331           5 :                     !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
     332           2 :                         unsigned long start = round_down(mr[i].start, PMD_SIZE);
     333           2 :                         unsigned long end = round_up(mr[i].end, PMD_SIZE);
     334             : 
     335             : #ifdef CONFIG_X86_32
     336             :                         if ((end >> PAGE_SHIFT) > max_low_pfn)
     337             :                                 continue;
     338             : #endif
     339             : 
     340           2 :                         if (memblock_is_region_memory(start, end - start))
     341           0 :                                 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
     342             :                 }
     343           5 :                 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
     344           5 :                     !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
     345           5 :                         unsigned long start = round_down(mr[i].start, PUD_SIZE);
     346           5 :                         unsigned long end = round_up(mr[i].end, PUD_SIZE);
     347             : 
     348           5 :                         if (memblock_is_region_memory(start, end - start))
     349           0 :                                 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
     350             :                 }
     351             :         }
     352           4 : }
     353             : 
     354             : static const char *page_size_string(struct map_range *mr)
     355             : {
     356             :         static const char str_1g[] = "1G";
     357             :         static const char str_2m[] = "2M";
     358             :         static const char str_4m[] = "4M";
     359             :         static const char str_4k[] = "4k";
     360             : 
     361             :         if (mr->page_size_mask & (1<<PG_LEVEL_1G))
     362             :                 return str_1g;
     363             :         /*
     364             :          * 32-bit without PAE has a 4M large page size.
     365             :          * PG_LEVEL_2M is misnamed, but we can at least
     366             :          * print out the right size in the string.
     367             :          */
     368             :         if (IS_ENABLED(CONFIG_X86_32) &&
     369             :             !IS_ENABLED(CONFIG_X86_PAE) &&
     370             :             mr->page_size_mask & (1<<PG_LEVEL_2M))
     371             :                 return str_4m;
     372             : 
     373             :         if (mr->page_size_mask & (1<<PG_LEVEL_2M))
     374             :                 return str_2m;
     375             : 
     376             :         return str_4k;
     377             : }
     378             : 
     379           4 : static int __meminit split_mem_range(struct map_range *mr, int nr_range,
     380             :                                      unsigned long start,
     381             :                                      unsigned long end)
     382             : {
     383           4 :         unsigned long start_pfn, end_pfn, limit_pfn;
     384           4 :         unsigned long pfn;
     385           4 :         int i;
     386             : 
     387           4 :         limit_pfn = PFN_DOWN(end);
     388             : 
     389             :         /* head if not big page alignment ? */
     390           4 :         pfn = start_pfn = PFN_DOWN(start);
     391             : #ifdef CONFIG_X86_32
     392             :         /*
     393             :          * Don't use a large page for the first 2/4MB of memory
     394             :          * because there are often fixed size MTRRs in there
     395             :          * and overlapping MTRRs into large pages can cause
     396             :          * slowdowns.
     397             :          */
     398             :         if (pfn == 0)
     399             :                 end_pfn = PFN_DOWN(PMD_SIZE);
     400             :         else
     401             :                 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
     402             : #else /* CONFIG_X86_64 */
     403           4 :         end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
     404             : #endif
     405           4 :         if (end_pfn > limit_pfn)
     406             :                 end_pfn = limit_pfn;
     407           4 :         if (start_pfn < end_pfn) {
     408           1 :                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
     409           1 :                 pfn = end_pfn;
     410             :         }
     411             : 
     412             :         /* big page (2M) range */
     413           4 :         start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
     414             : #ifdef CONFIG_X86_32
     415             :         end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
     416             : #else /* CONFIG_X86_64 */
     417           4 :         end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
     418           4 :         if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
     419             :                 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
     420             : #endif
     421             : 
     422           4 :         if (start_pfn < end_pfn) {
     423           3 :                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
     424           3 :                                 page_size_mask & (1<<PG_LEVEL_2M));
     425           3 :                 pfn = end_pfn;
     426             :         }
     427             : 
     428             : #ifdef CONFIG_X86_64
     429             :         /* big page (1G) range */
     430           4 :         start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
     431           4 :         end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
     432           4 :         if (start_pfn < end_pfn) {
     433           0 :                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
     434           0 :                                 page_size_mask &
     435             :                                  ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
     436           0 :                 pfn = end_pfn;
     437             :         }
     438             : 
     439             :         /* tail is not big page (1G) alignment */
     440           4 :         start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
     441           4 :         end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
     442           4 :         if (start_pfn < end_pfn) {
     443           0 :                 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
     444           0 :                                 page_size_mask & (1<<PG_LEVEL_2M));
     445           0 :                 pfn = end_pfn;
     446             :         }
     447             : #endif
     448             : 
     449             :         /* tail is not big page (2M) alignment */
     450           4 :         start_pfn = pfn;
     451           4 :         end_pfn = limit_pfn;
     452           4 :         nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
     453             : 
     454           4 :         if (!after_bootmem)
     455           4 :                 adjust_range_page_size_mask(mr, nr_range);
     456             : 
     457             :         /* try to merge same page size and continuous */
     458           5 :         for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
     459           1 :                 unsigned long old_start;
     460           1 :                 if (mr[i].end != mr[i+1].start ||
     461           1 :                     mr[i].page_size_mask != mr[i+1].page_size_mask)
     462           1 :                         continue;
     463             :                 /* move it */
     464           0 :                 old_start = mr[i].start;
     465           0 :                 memmove(&mr[i], &mr[i+1],
     466           0 :                         (nr_range - 1 - i) * sizeof(struct map_range));
     467           0 :                 mr[i--].start = old_start;
     468           0 :                 nr_range--;
     469             :         }
     470             : 
     471           4 :         for (i = 0; i < nr_range; i++)
     472             :                 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
     473             :                                 mr[i].start, mr[i].end - 1,
     474             :                                 page_size_string(&mr[i]));
     475             : 
     476           4 :         return nr_range;
     477             : }
     478             : 
     479             : struct range pfn_mapped[E820_MAX_ENTRIES];
     480             : int nr_pfn_mapped;
     481             : 
     482           4 : static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
     483             : {
     484           4 :         nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
     485             :                                              nr_pfn_mapped, start_pfn, end_pfn);
     486           4 :         nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
     487             : 
     488           4 :         max_pfn_mapped = max(max_pfn_mapped, end_pfn);
     489             : 
     490           4 :         if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
     491           4 :                 max_low_pfn_mapped = max(max_low_pfn_mapped,
     492             :                                          min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
     493           4 : }
     494             : 
     495        1002 : bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
     496             : {
     497        1002 :         int i;
     498             : 
     499        1002 :         for (i = 0; i < nr_pfn_mapped; i++)
     500        1002 :                 if ((start_pfn >= pfn_mapped[i].start) &&
     501        1002 :                     (end_pfn <= pfn_mapped[i].end))
     502             :                         return true;
     503             : 
     504             :         return false;
     505             : }
     506             : 
     507             : /*
     508             :  * Setup the direct mapping of the physical memory at PAGE_OFFSET.
     509             :  * This runs before bootmem is initialized and gets pages directly from
     510             :  * the physical memory. To access them they are temporarily mapped.
     511             :  */
     512           4 : unsigned long __ref init_memory_mapping(unsigned long start,
     513             :                                         unsigned long end, pgprot_t prot)
     514             : {
     515           4 :         struct map_range mr[NR_RANGE_MR];
     516           4 :         unsigned long ret = 0;
     517           4 :         int nr_range, i;
     518             : 
     519           4 :         pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
     520             :                start, end - 1);
     521             : 
     522           4 :         memset(mr, 0, sizeof(mr));
     523           4 :         nr_range = split_mem_range(mr, 0, start, end);
     524             : 
     525          13 :         for (i = 0; i < nr_range; i++)
     526           5 :                 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
     527           5 :                                                    mr[i].page_size_mask,
     528             :                                                    prot);
     529             : 
     530           4 :         add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
     531             : 
     532           4 :         return ret >> PAGE_SHIFT;
     533             : }
     534             : 
     535             : /*
     536             :  * We need to iterate through the E820 memory map and create direct mappings
     537             :  * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
     538             :  * create direct mappings for all pfns from [0 to max_low_pfn) and
     539             :  * [4GB to max_pfn) because of possible memory holes in high addresses
     540             :  * that cannot be marked as UC by fixed/variable range MTRRs.
     541             :  * Depending on the alignment of E820 ranges, this may possibly result
     542             :  * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
     543             :  *
     544             :  * init_mem_mapping() calls init_range_memory_mapping() with big range.
     545             :  * That range would have hole in the middle or ends, and only ram parts
     546             :  * will be mapped in init_range_memory_mapping().
     547             :  */
     548           3 : static unsigned long __init init_range_memory_mapping(
     549             :                                            unsigned long r_start,
     550             :                                            unsigned long r_end)
     551             : {
     552           3 :         unsigned long start_pfn, end_pfn;
     553           3 :         unsigned long mapped_ram_size = 0;
     554           3 :         int i;
     555             : 
     556           9 :         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
     557           6 :                 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
     558           6 :                 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
     559           6 :                 if (start >= end)
     560           3 :                         continue;
     561             : 
     562             :                 /*
     563             :                  * if it is overlapping with brk pgt, we need to
     564             :                  * alloc pgt buf from memblock instead.
     565             :                  */
     566           3 :                 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
     567           3 :                                     min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
     568           3 :                 init_memory_mapping(start, end, PAGE_KERNEL);
     569           3 :                 mapped_ram_size += end - start;
     570           3 :                 can_use_brk_pgt = true;
     571             :         }
     572             : 
     573           3 :         return mapped_ram_size;
     574             : }
     575             : 
     576           2 : static unsigned long __init get_new_step_size(unsigned long step_size)
     577             : {
     578             :         /*
     579             :          * Initial mapped size is PMD_SIZE (2M).
     580             :          * We can not set step_size to be PUD_SIZE (1G) yet.
     581             :          * In worse case, when we cross the 1G boundary, and
     582             :          * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
     583             :          * to map 1G range with PTE. Hence we use one less than the
     584             :          * difference of page table level shifts.
     585             :          *
     586             :          * Don't need to worry about overflow in the top-down case, on 32bit,
     587             :          * when step_size is 0, round_down() returns 0 for start, and that
     588             :          * turns it into 0x100000000ULL.
     589             :          * In the bottom-up case, round_up(x, 0) returns 0 though too, which
     590             :          * needs to be taken into consideration by the code below.
     591             :          */
     592           2 :         return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
     593             : }
     594             : 
     595             : /**
     596             :  * memory_map_top_down - Map [map_start, map_end) top down
     597             :  * @map_start: start address of the target memory range
     598             :  * @map_end: end address of the target memory range
     599             :  *
     600             :  * This function will setup direct mapping for memory range
     601             :  * [map_start, map_end) in top-down. That said, the page tables
     602             :  * will be allocated at the end of the memory, and we map the
     603             :  * memory in top-down.
     604             :  */
     605           1 : static void __init memory_map_top_down(unsigned long map_start,
     606             :                                        unsigned long map_end)
     607             : {
     608           1 :         unsigned long real_end, last_start;
     609           1 :         unsigned long step_size;
     610           1 :         unsigned long addr;
     611           1 :         unsigned long mapped_ram_size = 0;
     612             : 
     613             :         /* xen has big range in reserved near end of ram, skip it at first.*/
     614           1 :         addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
     615           1 :         real_end = addr + PMD_SIZE;
     616             : 
     617             :         /* step_size need to be small so pgt_buf from BRK could cover it */
     618           1 :         step_size = PMD_SIZE;
     619           1 :         max_pfn_mapped = 0; /* will get exact value next */
     620           1 :         min_pfn_mapped = real_end >> PAGE_SHIFT;
     621           1 :         last_start = real_end;
     622             : 
     623             :         /*
     624             :          * We start from the top (end of memory) and go to the bottom.
     625             :          * The memblock_find_in_range() gets us a block of RAM from the
     626             :          * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
     627             :          * for page table.
     628             :          */
     629           4 :         while (last_start > map_start) {
     630           3 :                 unsigned long start;
     631             : 
     632           3 :                 if (last_start > step_size) {
     633           2 :                         start = round_down(last_start - 1, step_size);
     634           2 :                         if (start < map_start)
     635             :                                 start = map_start;
     636             :                 } else
     637             :                         start = map_start;
     638           3 :                 mapped_ram_size += init_range_memory_mapping(start,
     639             :                                                         last_start);
     640           3 :                 last_start = start;
     641           3 :                 min_pfn_mapped = last_start >> PAGE_SHIFT;
     642           3 :                 if (mapped_ram_size >= step_size)
     643           2 :                         step_size = get_new_step_size(step_size);
     644             :         }
     645             : 
     646           1 :         if (real_end < map_end)
     647           0 :                 init_range_memory_mapping(real_end, map_end);
     648           1 : }
     649             : 
     650             : /**
     651             :  * memory_map_bottom_up - Map [map_start, map_end) bottom up
     652             :  * @map_start: start address of the target memory range
     653             :  * @map_end: end address of the target memory range
     654             :  *
     655             :  * This function will setup direct mapping for memory range
     656             :  * [map_start, map_end) in bottom-up. Since we have limited the
     657             :  * bottom-up allocation above the kernel, the page tables will
     658             :  * be allocated just above the kernel and we map the memory
     659             :  * in [map_start, map_end) in bottom-up.
     660             :  */
     661           0 : static void __init memory_map_bottom_up(unsigned long map_start,
     662             :                                         unsigned long map_end)
     663             : {
     664           0 :         unsigned long next, start;
     665           0 :         unsigned long mapped_ram_size = 0;
     666             :         /* step_size need to be small so pgt_buf from BRK could cover it */
     667           0 :         unsigned long step_size = PMD_SIZE;
     668             : 
     669           0 :         start = map_start;
     670           0 :         min_pfn_mapped = start >> PAGE_SHIFT;
     671             : 
     672             :         /*
     673             :          * We start from the bottom (@map_start) and go to the top (@map_end).
     674             :          * The memblock_find_in_range() gets us a block of RAM from the
     675             :          * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
     676             :          * for page table.
     677             :          */
     678           0 :         while (start < map_end) {
     679           0 :                 if (step_size && map_end - start > step_size) {
     680           0 :                         next = round_up(start + 1, step_size);
     681           0 :                         if (next > map_end)
     682             :                                 next = map_end;
     683             :                 } else {
     684             :                         next = map_end;
     685             :                 }
     686             : 
     687           0 :                 mapped_ram_size += init_range_memory_mapping(start, next);
     688           0 :                 start = next;
     689             : 
     690           0 :                 if (mapped_ram_size >= step_size)
     691           0 :                         step_size = get_new_step_size(step_size);
     692             :         }
     693           0 : }
     694             : 
     695             : /*
     696             :  * The real mode trampoline, which is required for bootstrapping CPUs
     697             :  * occupies only a small area under the low 1MB.  See reserve_real_mode()
     698             :  * for details.
     699             :  *
     700             :  * If KASLR is disabled the first PGD entry of the direct mapping is copied
     701             :  * to map the real mode trampoline.
     702             :  *
     703             :  * If KASLR is enabled, copy only the PUD which covers the low 1MB
     704             :  * area. This limits the randomization granularity to 1GB for both 4-level
     705             :  * and 5-level paging.
     706             :  */
     707           1 : static void __init init_trampoline(void)
     708             : {
     709             : #ifdef CONFIG_X86_64
     710           1 :         if (!kaslr_memory_enabled())
     711           1 :                 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
     712             :         else
     713             :                 init_trampoline_kaslr();
     714             : #endif
     715           1 : }
     716             : 
     717           1 : void __init init_mem_mapping(void)
     718             : {
     719           1 :         unsigned long end;
     720             : 
     721           1 :         pti_check_boottime_disable();
     722           1 :         probe_page_size_mask();
     723           1 :         setup_pcid();
     724             : 
     725             : #ifdef CONFIG_X86_64
     726           1 :         end = max_pfn << PAGE_SHIFT;
     727             : #else
     728             :         end = max_low_pfn << PAGE_SHIFT;
     729             : #endif
     730             : 
     731             :         /* the ISA range is always mapped regardless of memory holes */
     732           1 :         init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
     733             : 
     734             :         /* Init the trampoline, possibly with KASLR memory offset */
     735           1 :         init_trampoline();
     736             : 
     737             :         /*
     738             :          * If the allocation is in bottom-up direction, we setup direct mapping
     739             :          * in bottom-up, otherwise we setup direct mapping in top-down.
     740             :          */
     741           1 :         if (memblock_bottom_up()) {
     742           0 :                 unsigned long kernel_end = __pa_symbol(_end);
     743             : 
     744             :                 /*
     745             :                  * we need two separate calls here. This is because we want to
     746             :                  * allocate page tables above the kernel. So we first map
     747             :                  * [kernel_end, end) to make memory above the kernel be mapped
     748             :                  * as soon as possible. And then use page tables allocated above
     749             :                  * the kernel to map [ISA_END_ADDRESS, kernel_end).
     750             :                  */
     751           0 :                 memory_map_bottom_up(kernel_end, end);
     752           0 :                 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
     753             :         } else {
     754           1 :                 memory_map_top_down(ISA_END_ADDRESS, end);
     755             :         }
     756             : 
     757             : #ifdef CONFIG_X86_64
     758           1 :         if (max_pfn > max_low_pfn) {
     759             :                 /* can we preseve max_low_pfn ?*/
     760           0 :                 max_low_pfn = max_pfn;
     761             :         }
     762             : #else
     763             :         early_ioremap_page_table_range_init();
     764             : #endif
     765             : 
     766           1 :         load_cr3(swapper_pg_dir);
     767           1 :         __flush_tlb_all();
     768             : 
     769           1 :         x86_init.hyper.init_mem_mapping();
     770             : 
     771           1 :         early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
     772           1 : }
     773             : 
     774             : /*
     775             :  * Initialize an mm_struct to be used during poking and a pointer to be used
     776             :  * during patching.
     777             :  */
     778           1 : void __init poking_init(void)
     779             : {
     780           1 :         spinlock_t *ptl;
     781           1 :         pte_t *ptep;
     782             : 
     783           1 :         poking_mm = copy_init_mm();
     784           1 :         BUG_ON(!poking_mm);
     785             : 
     786             :         /*
     787             :          * Randomize the poking address, but make sure that the following page
     788             :          * will be mapped at the same PMD. We need 2 pages, so find space for 3,
     789             :          * and adjust the address if the PMD ends after the first one.
     790             :          */
     791           1 :         poking_addr = TASK_UNMAPPED_BASE;
     792           1 :         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
     793             :                 poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
     794             :                         (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
     795             : 
     796           1 :         if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
     797           0 :                 poking_addr += PAGE_SIZE;
     798             : 
     799             :         /*
     800             :          * We need to trigger the allocation of the page-tables that will be
     801             :          * needed for poking now. Later, poking may be performed in an atomic
     802             :          * section, which might cause allocation to fail.
     803             :          */
     804           1 :         ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
     805           1 :         BUG_ON(!ptep);
     806           1 :         pte_unmap_unlock(ptep, ptl);
     807           1 : }
     808             : 
     809             : /*
     810             :  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
     811             :  * is valid. The argument is a physical page number.
     812             :  *
     813             :  * On x86, access has to be given to the first megabyte of RAM because that
     814             :  * area traditionally contains BIOS code and data regions used by X, dosemu,
     815             :  * and similar apps. Since they map the entire memory range, the whole range
     816             :  * must be allowed (for mapping), but any areas that would otherwise be
     817             :  * disallowed are flagged as being "zero filled" instead of rejected.
     818             :  * Access has to be given to non-kernel-ram areas as well, these contain the
     819             :  * PCI mmio resources as well as potential bios/acpi data regions.
     820             :  */
     821           0 : int devmem_is_allowed(unsigned long pagenr)
     822             : {
     823           0 :         if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
     824             :                                 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
     825             :                         != REGION_DISJOINT) {
     826             :                 /*
     827             :                  * For disallowed memory regions in the low 1MB range,
     828             :                  * request that the page be shown as all zeros.
     829             :                  */
     830           0 :                 if (pagenr < 256)
     831             :                         return 2;
     832             : 
     833           0 :                 return 0;
     834             :         }
     835             : 
     836             :         /*
     837             :          * This must follow RAM test, since System RAM is considered a
     838             :          * restricted resource under CONFIG_STRICT_IOMEM.
     839             :          */
     840           0 :         if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
     841             :                 /* Low 1MB bypasses iomem restrictions. */
     842           0 :                 if (pagenr < 256)
     843             :                         return 1;
     844             : 
     845           0 :                 return 0;
     846             :         }
     847             : 
     848             :         return 1;
     849             : }
     850             : 
     851           4 : void free_init_pages(const char *what, unsigned long begin, unsigned long end)
     852             : {
     853           4 :         unsigned long begin_aligned, end_aligned;
     854             : 
     855             :         /* Make sure boundaries are page aligned */
     856           4 :         begin_aligned = PAGE_ALIGN(begin);
     857           4 :         end_aligned   = end & PAGE_MASK;
     858             : 
     859           4 :         if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
     860           0 :                 begin = begin_aligned;
     861           0 :                 end   = end_aligned;
     862             :         }
     863             : 
     864           4 :         if (begin >= end)
     865             :                 return;
     866             : 
     867             :         /*
     868             :          * If debugging page accesses then do not free this memory but
     869             :          * mark them not present - any buggy init-section access will
     870             :          * create a kernel page fault:
     871             :          */
     872           4 :         if (debug_pagealloc_enabled()) {
     873             :                 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
     874             :                         begin, end - 1);
     875             :                 /*
     876             :                  * Inform kmemleak about the hole in the memory since the
     877             :                  * corresponding pages will be unmapped.
     878             :                  */
     879             :                 kmemleak_free_part((void *)begin, end - begin);
     880             :                 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
     881             :         } else {
     882             :                 /*
     883             :                  * We just marked the kernel text read only above, now that
     884             :                  * we are going to free part of that, we need to make that
     885             :                  * writeable and non-executable first.
     886             :                  */
     887           4 :                 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
     888           4 :                 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
     889             : 
     890           4 :                 free_reserved_area((void *)begin, (void *)end,
     891             :                                    POISON_FREE_INITMEM, what);
     892             :         }
     893             : }
     894             : 
     895             : /*
     896             :  * begin/end can be in the direct map or the "high kernel mapping"
     897             :  * used for the kernel image only.  free_init_pages() will do the
     898             :  * right thing for either kind of address.
     899             :  */
     900           3 : void free_kernel_image_pages(const char *what, void *begin, void *end)
     901             : {
     902           3 :         unsigned long begin_ul = (unsigned long)begin;
     903           3 :         unsigned long end_ul = (unsigned long)end;
     904           3 :         unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
     905             : 
     906           2 :         free_init_pages(what, begin_ul, end_ul);
     907             : 
     908             :         /*
     909             :          * PTI maps some of the kernel into userspace.  For performance,
     910             :          * this includes some kernel areas that do not contain secrets.
     911             :          * Those areas might be adjacent to the parts of the kernel image
     912             :          * being freed, which may contain secrets.  Remove the "high kernel
     913             :          * image mapping" for these freed areas, ensuring they are not even
     914             :          * potentially vulnerable to Meltdown regardless of the specific
     915             :          * optimizations PTI is currently using.
     916             :          *
     917             :          * The "noalias" prevents unmapping the direct map alias which is
     918             :          * needed to access the freed pages.
     919             :          *
     920             :          * This is only valid for 64bit kernels. 32bit has only one mapping
     921             :          * which can't be treated in this way for obvious reasons.
     922             :          */
     923           3 :         if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
     924             :                 set_memory_np_noalias(begin_ul, len_pages);
     925           2 : }
     926             : 
     927           1 : void __ref free_initmem(void)
     928             : {
     929           1 :         e820__reallocate_tables();
     930             : 
     931           1 :         mem_encrypt_free_decrypted_mem();
     932             : 
     933           1 :         free_kernel_image_pages("unused kernel image (initmem)",
     934             :                                 &__init_begin, &__init_end);
     935           1 : }
     936             : 
     937             : #ifdef CONFIG_BLK_DEV_INITRD
     938           0 : void __init free_initrd_mem(unsigned long start, unsigned long end)
     939             : {
     940             :         /*
     941             :          * end could be not aligned, and We can not align that,
     942             :          * decompresser could be confused by aligned initrd_end
     943             :          * We already reserve the end partial page before in
     944             :          *   - i386_start_kernel()
     945             :          *   - x86_64_start_kernel()
     946             :          *   - relocate_initrd()
     947             :          * So here We can do PAGE_ALIGN() safely to get partial page to be freed
     948             :          */
     949           0 :         free_init_pages("initrd", start, PAGE_ALIGN(end));
     950           0 : }
     951             : #endif
     952             : 
     953             : /*
     954             :  * Calculate the precise size of the DMA zone (first 16 MB of RAM),
     955             :  * and pass it to the MM layer - to help it set zone watermarks more
     956             :  * accurately.
     957             :  *
     958             :  * Done on 64-bit systems only for the time being, although 32-bit systems
     959             :  * might benefit from this as well.
     960             :  */
     961           1 : void __init memblock_find_dma_reserve(void)
     962             : {
     963             : #ifdef CONFIG_X86_64
     964           1 :         u64 nr_pages = 0, nr_free_pages = 0;
     965           1 :         unsigned long start_pfn, end_pfn;
     966           1 :         phys_addr_t start_addr, end_addr;
     967           1 :         int i;
     968           1 :         u64 u;
     969             : 
     970             :         /*
     971             :          * Iterate over all memory ranges (free and reserved ones alike),
     972             :          * to calculate the total number of pages in the first 16 MB of RAM:
     973             :          */
     974           1 :         nr_pages = 0;
     975           3 :         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
     976           2 :                 start_pfn = min(start_pfn, MAX_DMA_PFN);
     977           2 :                 end_pfn   = min(end_pfn,   MAX_DMA_PFN);
     978             : 
     979           2 :                 nr_pages += end_pfn - start_pfn;
     980             :         }
     981             : 
     982             :         /*
     983             :          * Iterate over free memory ranges to calculate the number of free
     984             :          * pages in the DMA zone, while not counting potential partial
     985             :          * pages at the beginning or the end of the range:
     986             :          */
     987           1 :         nr_free_pages = 0;
     988           4 :         for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
     989           3 :                 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
     990           3 :                 end_pfn   = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
     991             : 
     992           3 :                 if (start_pfn < end_pfn)
     993           2 :                         nr_free_pages += end_pfn - start_pfn;
     994             :         }
     995             : 
     996           1 :         set_dma_reserve(nr_pages - nr_free_pages);
     997             : #endif
     998           1 : }
     999             : 
    1000           1 : void __init zone_sizes_init(void)
    1001             : {
    1002           1 :         unsigned long max_zone_pfns[MAX_NR_ZONES];
    1003             : 
    1004           1 :         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
    1005             : 
    1006             : #ifdef CONFIG_ZONE_DMA
    1007             :         max_zone_pfns[ZONE_DMA]         = min(MAX_DMA_PFN, max_low_pfn);
    1008             : #endif
    1009             : #ifdef CONFIG_ZONE_DMA32
    1010           1 :         max_zone_pfns[ZONE_DMA32]       = min(MAX_DMA32_PFN, max_low_pfn);
    1011             : #endif
    1012           1 :         max_zone_pfns[ZONE_NORMAL]      = max_low_pfn;
    1013             : #ifdef CONFIG_HIGHMEM
    1014             :         max_zone_pfns[ZONE_HIGHMEM]     = max_pfn;
    1015             : #endif
    1016             : 
    1017           1 :         free_area_init(max_zone_pfns);
    1018           1 : }
    1019             : 
    1020             : __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
    1021             :         .loaded_mm = &init_mm,
    1022             :         .next_asid = 1,
    1023             :         .cr4 = ~0UL,    /* fail hard if we screw up cr4 shadow initialization */
    1024             : };
    1025             : 
    1026           8 : void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
    1027             : {
    1028             :         /* entry 0 MUST be WB (hardwired to speed up translations) */
    1029           8 :         BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
    1030             : 
    1031           8 :         __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
    1032           8 :         __pte2cachemode_tbl[entry] = cache;
    1033           8 : }
    1034             : 
    1035             : #ifdef CONFIG_SWAP
    1036             : unsigned long max_swapfile_size(void)
    1037             : {
    1038             :         unsigned long pages;
    1039             : 
    1040             :         pages = generic_max_swapfile_size();
    1041             : 
    1042             :         if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
    1043             :                 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
    1044             :                 unsigned long long l1tf_limit = l1tf_pfn_limit();
    1045             :                 /*
    1046             :                  * We encode swap offsets also with 3 bits below those for pfn
    1047             :                  * which makes the usable limit higher.
    1048             :                  */
    1049             : #if CONFIG_PGTABLE_LEVELS > 2
    1050             :                 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
    1051             : #endif
    1052             :                 pages = min_t(unsigned long long, l1tf_limit, pages);
    1053             :         }
    1054             :         return pages;
    1055             : }
    1056             : #endif

Generated by: LCOV version 1.14