LCOV - code coverage report
Current view: top level - arch/x86/kernel - head64.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 75 184 40.8 %
Date: 2021-04-22 12:43:58 Functions: 7 12 58.3 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  prepare to run common code
       4             :  *
       5             :  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
       6             :  */
       7             : 
       8             : #define DISABLE_BRANCH_PROFILING
       9             : 
      10             : /* cpu_feature_enabled() cannot be used this early */
      11             : #define USE_EARLY_PGTABLE_L5
      12             : 
      13             : #include <linux/init.h>
      14             : #include <linux/linkage.h>
      15             : #include <linux/types.h>
      16             : #include <linux/kernel.h>
      17             : #include <linux/string.h>
      18             : #include <linux/percpu.h>
      19             : #include <linux/start_kernel.h>
      20             : #include <linux/io.h>
      21             : #include <linux/memblock.h>
      22             : #include <linux/mem_encrypt.h>
      23             : #include <linux/pgtable.h>
      24             : 
      25             : #include <asm/processor.h>
      26             : #include <asm/proto.h>
      27             : #include <asm/smp.h>
      28             : #include <asm/setup.h>
      29             : #include <asm/desc.h>
      30             : #include <asm/tlbflush.h>
      31             : #include <asm/sections.h>
      32             : #include <asm/kdebug.h>
      33             : #include <asm/e820/api.h>
      34             : #include <asm/bios_ebda.h>
      35             : #include <asm/bootparam_utils.h>
      36             : #include <asm/microcode.h>
      37             : #include <asm/kasan.h>
      38             : #include <asm/fixmap.h>
      39             : #include <asm/realmode.h>
      40             : #include <asm/extable.h>
      41             : #include <asm/trapnr.h>
      42             : #include <asm/sev-es.h>
      43             : 
      44             : /*
      45             :  * Manage page tables very early on.
      46             :  */
      47             : extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
      48             : static unsigned int __initdata next_early_pgt;
      49             : pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
      50             : 
      51             : #ifdef CONFIG_X86_5LEVEL
      52             : unsigned int __pgtable_l5_enabled __ro_after_init;
      53             : unsigned int pgdir_shift __ro_after_init = 39;
      54             : EXPORT_SYMBOL(pgdir_shift);
      55             : unsigned int ptrs_per_p4d __ro_after_init = 1;
      56             : EXPORT_SYMBOL(ptrs_per_p4d);
      57             : #endif
      58             : 
      59             : #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
      60             : unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
      61             : EXPORT_SYMBOL(page_offset_base);
      62             : unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
      63             : EXPORT_SYMBOL(vmalloc_base);
      64             : unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
      65             : EXPORT_SYMBOL(vmemmap_base);
      66             : #endif
      67             : 
      68             : /*
      69             :  * GDT used on the boot CPU before switching to virtual addresses.
      70             :  */
      71             : static struct desc_struct startup_gdt[GDT_ENTRIES] = {
      72             :         [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
      73             :         [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
      74             :         [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
      75             : };
      76             : 
      77             : /*
      78             :  * Address needs to be set at runtime because it references the startup_gdt
      79             :  * while the kernel still uses a direct mapping.
      80             :  */
      81             : static struct desc_ptr startup_gdt_descr = {
      82             :         .size = sizeof(startup_gdt),
      83             :         .address = 0,
      84             : };
      85             : 
      86             : #define __head  __section(".head.text")
      87             : 
      88           0 : static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
      89             : {
      90           0 :         return ptr - (void *)_text + (void *)physaddr;
      91             : }
      92             : 
      93           0 : static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
      94             : {
      95           0 :         return fixup_pointer(ptr, physaddr);
      96             : }
      97             : 
      98             : #ifdef CONFIG_X86_5LEVEL
      99             : static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
     100             : {
     101             :         return fixup_pointer(ptr, physaddr);
     102             : }
     103             : 
     104             : static bool __head check_la57_support(unsigned long physaddr)
     105             : {
     106             :         /*
     107             :          * 5-level paging is detected and enabled at kernel decomression
     108             :          * stage. Only check if it has been enabled there.
     109             :          */
     110             :         if (!(native_read_cr4() & X86_CR4_LA57))
     111             :                 return false;
     112             : 
     113             :         *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
     114             :         *fixup_int(&pgdir_shift, physaddr) = 48;
     115             :         *fixup_int(&ptrs_per_p4d, physaddr) = 512;
     116             :         *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
     117             :         *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
     118             :         *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
     119             : 
     120             :         return true;
     121             : }
     122             : #else
     123           0 : static bool __head check_la57_support(unsigned long physaddr)
     124             : {
     125           0 :         return false;
     126             : }
     127             : #endif
     128             : 
     129             : /* Code in __startup_64() can be relocated during execution, but the compiler
     130             :  * doesn't have to generate PC-relative relocations when accessing globals from
     131             :  * that function. Clang actually does not generate them, which leads to
     132             :  * boot-time crashes. To work around this problem, every global pointer must
     133             :  * be adjusted using fixup_pointer().
     134             :  */
     135           0 : unsigned long __head __startup_64(unsigned long physaddr,
     136             :                                   struct boot_params *bp)
     137             : {
     138           0 :         unsigned long vaddr, vaddr_end;
     139           0 :         unsigned long load_delta, *p;
     140           0 :         unsigned long pgtable_flags;
     141           0 :         pgdval_t *pgd;
     142           0 :         p4dval_t *p4d;
     143           0 :         pudval_t *pud;
     144           0 :         pmdval_t *pmd, pmd_entry;
     145           0 :         pteval_t *mask_ptr;
     146           0 :         bool la57;
     147           0 :         int i;
     148           0 :         unsigned int *next_pgt_ptr;
     149             : 
     150           0 :         la57 = check_la57_support(physaddr);
     151             : 
     152             :         /* Is the address too large? */
     153           0 :         if (physaddr >> MAX_PHYSMEM_BITS)
     154           0 :                 for (;;);
     155             : 
     156             :         /*
     157             :          * Compute the delta between the address I am compiled to run at
     158             :          * and the address I am actually running at.
     159             :          */
     160           0 :         load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
     161             : 
     162             :         /* Is the address not 2M aligned? */
     163           0 :         if (load_delta & ~PMD_PAGE_MASK)
     164           0 :                 for (;;);
     165             : 
     166             :         /* Activate Secure Memory Encryption (SME) if supported and enabled */
     167           0 :         sme_enable(bp);
     168             : 
     169             :         /* Include the SME encryption mask in the fixup value */
     170           0 :         load_delta += sme_get_me_mask();
     171             : 
     172             :         /* Fixup the physical addresses in the page table */
     173             : 
     174           0 :         pgd = fixup_pointer(&early_top_pgt, physaddr);
     175           0 :         p = pgd + pgd_index(__START_KERNEL_map);
     176           0 :         if (la57)
     177             :                 *p = (unsigned long)level4_kernel_pgt;
     178             :         else
     179           0 :                 *p = (unsigned long)level3_kernel_pgt;
     180           0 :         *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
     181             : 
     182           0 :         if (la57) {
     183             :                 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
     184             :                 p4d[511] += load_delta;
     185             :         }
     186             : 
     187           0 :         pud = fixup_pointer(&level3_kernel_pgt, physaddr);
     188           0 :         pud[510] += load_delta;
     189           0 :         pud[511] += load_delta;
     190             : 
     191           0 :         pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
     192           0 :         for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
     193           0 :                 pmd[i] += load_delta;
     194             : 
     195             :         /*
     196             :          * Set up the identity mapping for the switchover.  These
     197             :          * entries should *NOT* have the global bit set!  This also
     198             :          * creates a bunch of nonsense entries but that is fine --
     199             :          * it avoids problems around wraparound.
     200             :          */
     201             : 
     202           0 :         next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
     203           0 :         pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
     204           0 :         pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
     205             : 
     206           0 :         pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
     207             : 
     208           0 :         if (la57) {
     209             :                 p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
     210             :                                     physaddr);
     211             : 
     212             :                 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
     213             :                 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
     214             :                 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
     215             : 
     216             :                 i = physaddr >> P4D_SHIFT;
     217             :                 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
     218             :                 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
     219             :         } else {
     220           0 :                 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
     221           0 :                 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
     222           0 :                 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
     223             :         }
     224             : 
     225           0 :         i = physaddr >> PUD_SHIFT;
     226           0 :         pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
     227           0 :         pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
     228             : 
     229           0 :         pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
     230             :         /* Filter out unsupported __PAGE_KERNEL_* bits: */
     231           0 :         mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
     232           0 :         pmd_entry &= *mask_ptr;
     233           0 :         pmd_entry += sme_get_me_mask();
     234           0 :         pmd_entry +=  physaddr;
     235             : 
     236           0 :         for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
     237           0 :                 int idx = i + (physaddr >> PMD_SHIFT);
     238             : 
     239           0 :                 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
     240             :         }
     241             : 
     242             :         /*
     243             :          * Fixup the kernel text+data virtual addresses. Note that
     244             :          * we might write invalid pmds, when the kernel is relocated
     245             :          * cleanup_highmap() fixes this up along with the mappings
     246             :          * beyond _end.
     247             :          *
     248             :          * Only the region occupied by the kernel image has so far
     249             :          * been checked against the table of usable memory regions
     250             :          * provided by the firmware, so invalidate pages outside that
     251             :          * region. A page table entry that maps to a reserved area of
     252             :          * memory would allow processor speculation into that area,
     253             :          * and on some hardware (particularly the UV platform) even
     254             :          * speculative access to some reserved areas is caught as an
     255             :          * error, causing the BIOS to halt the system.
     256             :          */
     257             : 
     258           0 :         pmd = fixup_pointer(level2_kernel_pgt, physaddr);
     259             : 
     260             :         /* invalidate pages before the kernel image */
     261           0 :         for (i = 0; i < pmd_index((unsigned long)_text); i++)
     262           0 :                 pmd[i] &= ~_PAGE_PRESENT;
     263             : 
     264             :         /* fixup pages that are part of the kernel image */
     265           0 :         for (; i <= pmd_index((unsigned long)_end); i++)
     266           0 :                 if (pmd[i] & _PAGE_PRESENT)
     267           0 :                         pmd[i] += load_delta;
     268             : 
     269             :         /* invalidate pages after the kernel image */
     270           0 :         for (; i < PTRS_PER_PMD; i++)
     271           0 :                 pmd[i] &= ~_PAGE_PRESENT;
     272             : 
     273             :         /*
     274             :          * Fixup phys_base - remove the memory encryption mask to obtain
     275             :          * the true physical address.
     276             :          */
     277           0 :         *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
     278             : 
     279             :         /* Encrypt the kernel and related (if SME is active) */
     280           0 :         sme_encrypt_kernel(bp);
     281             : 
     282             :         /*
     283             :          * Clear the memory encryption mask from the .bss..decrypted section.
     284             :          * The bss section will be memset to zero later in the initialization so
     285             :          * there is no need to zero it after changing the memory encryption
     286             :          * attribute.
     287             :          */
     288           0 :         if (mem_encrypt_active()) {
     289             :                 vaddr = (unsigned long)__start_bss_decrypted;
     290             :                 vaddr_end = (unsigned long)__end_bss_decrypted;
     291             :                 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
     292             :                         i = pmd_index(vaddr);
     293             :                         pmd[i] -= sme_get_me_mask();
     294             :                 }
     295             :         }
     296             : 
     297             :         /*
     298             :          * Return the SME encryption mask (if SME is active) to be used as a
     299             :          * modifier for the initial pgdir entry programmed into CR3.
     300             :          */
     301           0 :         return sme_get_me_mask();
     302             : }
     303             : 
     304           3 : unsigned long __startup_secondary_64(void)
     305             : {
     306             :         /*
     307             :          * Return the SME encryption mask (if SME is active) to be used as a
     308             :          * modifier for the initial pgdir entry programmed into CR3.
     309             :          */
     310           3 :         return sme_get_me_mask();
     311             : }
     312             : 
     313             : /* Wipe all early page tables except for the kernel symbol map */
     314           0 : static void __init reset_early_page_tables(void)
     315             : {
     316           0 :         memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
     317           0 :         next_early_pgt = 0;
     318           0 :         write_cr3(__sme_pa_nodebug(early_top_pgt));
     319           0 : }
     320             : 
     321             : /* Create a new PMD entry */
     322           4 : bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
     323             : {
     324           4 :         unsigned long physaddr = address - __PAGE_OFFSET;
     325           4 :         pgdval_t pgd, *pgd_p;
     326           4 :         p4dval_t p4d, *p4d_p;
     327           4 :         pudval_t pud, *pud_p;
     328           4 :         pmdval_t *pmd_p;
     329             : 
     330             :         /* Invalid address or early pgt is done ?  */
     331          12 :         if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
     332             :                 return false;
     333             : 
     334           4 : again:
     335           4 :         pgd_p = &early_top_pgt[pgd_index(address)].pgd;
     336           4 :         pgd = *pgd_p;
     337             : 
     338             :         /*
     339             :          * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
     340             :          * critical -- __PAGE_OFFSET would point us back into the dynamic
     341             :          * range and we might end up looping forever...
     342             :          */
     343           4 :         if (!pgtable_l5_enabled())
     344           4 :                 p4d_p = pgd_p;
     345             :         else if (pgd)
     346             :                 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
     347             :         else {
     348             :                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
     349             :                         reset_early_page_tables();
     350             :                         goto again;
     351             :                 }
     352             : 
     353             :                 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
     354             :                 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
     355             :                 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
     356             :         }
     357           4 :         p4d_p += p4d_index(address);
     358           4 :         p4d = *p4d_p;
     359             : 
     360           4 :         if (p4d)
     361           3 :                 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
     362             :         else {
     363           1 :                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
     364           0 :                         reset_early_page_tables();
     365           0 :                         goto again;
     366             :                 }
     367             : 
     368           1 :                 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
     369           1 :                 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
     370           1 :                 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
     371             :         }
     372           4 :         pud_p += pud_index(address);
     373           4 :         pud = *pud_p;
     374             : 
     375           4 :         if (pud)
     376           3 :                 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
     377             :         else {
     378           1 :                 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
     379           0 :                         reset_early_page_tables();
     380           0 :                         goto again;
     381             :                 }
     382             : 
     383           1 :                 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
     384           1 :                 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
     385           1 :                 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
     386             :         }
     387           4 :         pmd_p[pmd_index(address)] = pmd;
     388             : 
     389           4 :         return true;
     390             : }
     391             : 
     392           4 : static bool __init early_make_pgtable(unsigned long address)
     393             : {
     394           4 :         unsigned long physaddr = address - __PAGE_OFFSET;
     395           4 :         pmdval_t pmd;
     396             : 
     397           4 :         pmd = (physaddr & PMD_MASK) + early_pmd_flags;
     398             : 
     399           4 :         return __early_make_pgtable(address, pmd);
     400             : }
     401             : 
     402           4 : void __init do_early_exception(struct pt_regs *regs, int trapnr)
     403             : {
     404           8 :         if (trapnr == X86_TRAP_PF &&
     405           4 :             early_make_pgtable(native_read_cr2()))
     406             :                 return;
     407             : 
     408           0 :         if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
     409             :             trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
     410             :                 return;
     411             : 
     412           0 :         early_fixup_exception(regs, trapnr);
     413             : }
     414             : 
     415             : /* Don't add a printk in there. printk relies on the PDA which is not initialized 
     416             :    yet. */
     417           0 : static void __init clear_bss(void)
     418             : {
     419           0 :         memset(__bss_start, 0,
     420             :                (unsigned long) __bss_stop - (unsigned long) __bss_start);
     421           1 : }
     422             : 
     423           1 : static unsigned long get_cmd_line_ptr(void)
     424             : {
     425           1 :         unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
     426             : 
     427           1 :         cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
     428             : 
     429           1 :         return cmd_line_ptr;
     430             : }
     431             : 
     432           1 : static void __init copy_bootdata(char *real_mode_data)
     433             : {
     434           1 :         char * command_line;
     435           1 :         unsigned long cmd_line_ptr;
     436             : 
     437             :         /*
     438             :          * If SME is active, this will create decrypted mappings of the
     439             :          * boot data in advance of the copy operations.
     440             :          */
     441           1 :         sme_map_bootdata(real_mode_data);
     442             : 
     443           1 :         memcpy(&boot_params, real_mode_data, sizeof(boot_params));
     444           1 :         sanitize_boot_params(&boot_params);
     445           1 :         cmd_line_ptr = get_cmd_line_ptr();
     446           1 :         if (cmd_line_ptr) {
     447           1 :                 command_line = __va(cmd_line_ptr);
     448           1 :                 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
     449             :         }
     450             : 
     451             :         /*
     452             :          * The old boot data is no longer needed and won't be reserved,
     453             :          * freeing up that memory for use by the system. If SME is active,
     454             :          * we need to remove the mappings that were created so that the
     455             :          * memory doesn't remain mapped as decrypted.
     456             :          */
     457           1 :         sme_unmap_bootdata(real_mode_data);
     458           1 : }
     459             : 
     460           0 : asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
     461             : {
     462             :         /*
     463             :          * Build-time sanity checks on the kernel image and module
     464             :          * area mappings. (these are purely build-time and produce no code)
     465             :          */
     466           0 :         BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
     467           0 :         BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
     468           0 :         BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
     469           0 :         BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
     470           0 :         BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
     471           0 :         BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
     472           0 :         MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
     473             :                                 (__START_KERNEL & PGDIR_MASK)));
     474           0 :         BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
     475             : 
     476           0 :         cr4_init_shadow();
     477             : 
     478             :         /* Kill off the identity-map trampoline */
     479           0 :         reset_early_page_tables();
     480             : 
     481           0 :         clear_bss();
     482             : 
     483           1 :         clear_page(init_top_pgt);
     484             : 
     485             :         /*
     486             :          * SME support may update early_pmd_flags to include the memory
     487             :          * encryption mask, so it needs to be called before anything
     488             :          * that may generate a page fault.
     489             :          */
     490           1 :         sme_early_init();
     491             : 
     492           1 :         kasan_early_init();
     493             : 
     494           1 :         idt_setup_early_handler();
     495             : 
     496           1 :         copy_bootdata(__va(real_mode_data));
     497             : 
     498             :         /*
     499             :          * Load microcode early on BSP.
     500             :          */
     501           1 :         load_ucode_bsp();
     502             : 
     503             :         /* set init_top_pgt kernel high mapping*/
     504           1 :         init_top_pgt[511] = early_top_pgt[511];
     505             : 
     506           1 :         x86_64_start_reservations(real_mode_data);
     507           0 : }
     508             : 
     509           1 : void __init x86_64_start_reservations(char *real_mode_data)
     510             : {
     511             :         /* version is always not zero if it is copied */
     512           1 :         if (!boot_params.hdr.version)
     513           0 :                 copy_bootdata(__va(real_mode_data));
     514             : 
     515           1 :         x86_early_init_platform_quirks();
     516             : 
     517           1 :         switch (boot_params.hdr.hardware_subarch) {
     518             :         case X86_SUBARCH_INTEL_MID:
     519             :                 x86_intel_mid_early_setup();
     520             :                 break;
     521             :         default:
     522             :                 break;
     523             :         }
     524             : 
     525           1 :         start_kernel();
     526           0 : }
     527             : 
     528             : /*
     529             :  * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
     530             :  * used until the idt_table takes over. On the boot CPU this happens in
     531             :  * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
     532             :  * this happens in the functions called from head_64.S.
     533             :  *
     534             :  * The idt_table can't be used that early because all the code modifying it is
     535             :  * in idt.c and can be instrumented by tracing or KASAN, which both don't work
     536             :  * during early CPU bringup. Also the idt_table has the runtime vectors
     537             :  * configured which require certain CPU state to be setup already (like TSS),
     538             :  * which also hasn't happened yet in early CPU bringup.
     539             :  */
     540             : static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
     541             : 
     542             : static struct desc_ptr bringup_idt_descr = {
     543             :         .size           = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
     544             :         .address        = 0, /* Set at runtime */
     545             : };
     546             : 
     547             : static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
     548             : {
     549             : #ifdef CONFIG_AMD_MEM_ENCRYPT
     550             :         struct idt_data data;
     551             :         gate_desc desc;
     552             : 
     553             :         init_idt_data(&data, n, handler);
     554             :         idt_init_desc(&desc, &data);
     555             :         native_write_idt_entry(idt, n, &desc);
     556             : #endif
     557             : }
     558             : 
     559             : /* This runs while still in the direct mapping */
     560           0 : static void startup_64_load_idt(unsigned long physbase)
     561             : {
     562           0 :         struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
     563           0 :         gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
     564             : 
     565             : 
     566           0 :         if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
     567             :                 void *handler;
     568             : 
     569             :                 /* VMM Communication Exception */
     570             :                 handler = fixup_pointer(vc_no_ghcb, physbase);
     571             :                 set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
     572             :         }
     573             : 
     574           0 :         desc->address = (unsigned long)idt;
     575           0 :         native_load_idt(desc);
     576             : }
     577             : 
     578             : /* This is used when running on kernel addresses */
     579           3 : void early_setup_idt(void)
     580             : {
     581             :         /* VMM Communication Exception */
     582           3 :         if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
     583             :                 set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
     584             : 
     585           3 :         bringup_idt_descr.address = (unsigned long)bringup_idt_table;
     586           3 :         native_load_idt(&bringup_idt_descr);
     587           3 : }
     588             : 
     589             : /*
     590             :  * Setup boot CPU state needed before kernel switches to virtual addresses.
     591             :  */
     592           0 : void __head startup_64_setup_env(unsigned long physbase)
     593             : {
     594             :         /* Load GDT */
     595           0 :         startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
     596           0 :         native_load_gdt(&startup_gdt_descr);
     597             : 
     598             :         /* New GDT is live - reload data segment registers */
     599           0 :         asm volatile("movl %%eax, %%ds\n"
     600             :                      "movl %%eax, %%ss\n"
     601             :                      "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
     602             : 
     603           0 :         startup_64_load_idt(physbase);
     604           0 : }

Generated by: LCOV version 1.14