Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0 2 : #include <linux/io.h> 3 : #include <linux/slab.h> 4 : #include <linux/memblock.h> 5 : #include <linux/mem_encrypt.h> 6 : #include <linux/pgtable.h> 7 : 8 : #include <asm/set_memory.h> 9 : #include <asm/realmode.h> 10 : #include <asm/tlbflush.h> 11 : #include <asm/crash.h> 12 : #include <asm/sev-es.h> 13 : 14 : struct real_mode_header *real_mode_header; 15 : u32 *trampoline_cr4_features; 16 : 17 : /* Hold the pgd entry used on booting additional CPUs */ 18 : pgd_t trampoline_pgd_entry; 19 : 20 1 : void __init reserve_real_mode(void) 21 : { 22 1 : phys_addr_t mem; 23 1 : size_t size = real_mode_size_needed(); 24 : 25 1 : if (!size) 26 : return; 27 : 28 1 : WARN_ON(slab_is_available()); 29 : 30 : /* Has to be under 1M so we can execute real-mode AP code. */ 31 1 : mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); 32 1 : if (!mem) { 33 0 : pr_info("No sub-1M memory is available for the trampoline\n"); 34 0 : return; 35 : } 36 : 37 1 : memblock_reserve(mem, size); 38 1 : set_real_mode_mem(mem); 39 1 : crash_reserve_low_1M(); 40 : } 41 : 42 1 : static void sme_sev_setup_real_mode(struct trampoline_header *th) 43 : { 44 : #ifdef CONFIG_AMD_MEM_ENCRYPT 45 : if (sme_active()) 46 : th->flags |= TH_FLAGS_SME_ACTIVE; 47 : 48 : if (sev_es_active()) { 49 : /* 50 : * Skip the call to verify_cpu() in secondary_startup_64 as it 51 : * will cause #VC exceptions when the AP can't handle them yet. 52 : */ 53 : th->start = (u64) secondary_startup_64_no_verify; 54 : 55 : if (sev_es_setup_ap_jump_table(real_mode_header)) 56 : panic("Failed to get/update SEV-ES AP Jump Table"); 57 : } 58 : #endif 59 1 : } 60 : 61 1 : static void __init setup_real_mode(void) 62 : { 63 1 : u16 real_mode_seg; 64 1 : const u32 *rel; 65 1 : u32 count; 66 1 : unsigned char *base; 67 1 : unsigned long phys_base; 68 1 : struct trampoline_header *trampoline_header; 69 1 : size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 70 : #ifdef CONFIG_X86_64 71 1 : u64 *trampoline_pgd; 72 1 : u64 efer; 73 : #endif 74 : 75 1 : base = (unsigned char *)real_mode_header; 76 : 77 : /* 78 : * If SME is active, the trampoline area will need to be in 79 : * decrypted memory in order to bring up other processors 80 : * successfully. This is not needed for SEV. 81 : */ 82 1 : if (sme_active()) 83 : set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT); 84 : 85 1 : memcpy(base, real_mode_blob, size); 86 : 87 1 : phys_base = __pa(base); 88 1 : real_mode_seg = phys_base >> 4; 89 : 90 1 : rel = (u32 *) real_mode_relocs; 91 : 92 : /* 16-bit segment relocations. */ 93 1 : count = *rel++; 94 3 : while (count--) { 95 2 : u16 *seg = (u16 *) (base + *rel++); 96 2 : *seg = real_mode_seg; 97 : } 98 : 99 : /* 32-bit linear relocations. */ 100 1 : count = *rel++; 101 22 : while (count--) { 102 21 : u32 *ptr = (u32 *) (base + *rel++); 103 21 : *ptr += phys_base; 104 : } 105 : 106 : /* Must be perfomed *after* relocation. */ 107 1 : trampoline_header = (struct trampoline_header *) 108 1 : __va(real_mode_header->trampoline_header); 109 : 110 : #ifdef CONFIG_X86_32 111 : trampoline_header->start = __pa_symbol(startup_32_smp); 112 : trampoline_header->gdt_limit = __BOOT_DS + 7; 113 : trampoline_header->gdt_base = __pa_symbol(boot_gdt); 114 : #else 115 : /* 116 : * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR 117 : * so we need to mask it out. 118 : */ 119 1 : rdmsrl(MSR_EFER, efer); 120 1 : trampoline_header->efer = efer & ~EFER_LMA; 121 : 122 1 : trampoline_header->start = (u64) secondary_startup_64; 123 1 : trampoline_cr4_features = &trampoline_header->cr4; 124 1 : *trampoline_cr4_features = mmu_cr4_features; 125 : 126 1 : trampoline_header->flags = 0; 127 : 128 1 : trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); 129 1 : trampoline_pgd[0] = trampoline_pgd_entry.pgd; 130 1 : trampoline_pgd[511] = init_top_pgt[511].pgd; 131 : #endif 132 : 133 1 : sme_sev_setup_real_mode(trampoline_header); 134 1 : } 135 : 136 : /* 137 : * reserve_real_mode() gets called very early, to guarantee the 138 : * availability of low memory. This is before the proper kernel page 139 : * tables are set up, so we cannot set page permissions in that 140 : * function. Also trampoline code will be executed by APs so we 141 : * need to mark it executable at do_pre_smp_initcalls() at least, 142 : * thus run it as a early_initcall(). 143 : */ 144 1 : static void __init set_real_mode_permissions(void) 145 : { 146 1 : unsigned char *base = (unsigned char *) real_mode_header; 147 1 : size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob); 148 : 149 2 : size_t ro_size = 150 1 : PAGE_ALIGN(real_mode_header->ro_end) - 151 1 : __pa(base); 152 : 153 1 : size_t text_size = 154 1 : PAGE_ALIGN(real_mode_header->ro_end) - 155 1 : real_mode_header->text_start; 156 : 157 1 : unsigned long text_start = 158 1 : (unsigned long) __va(real_mode_header->text_start); 159 : 160 1 : set_memory_nx((unsigned long) base, size >> PAGE_SHIFT); 161 1 : set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT); 162 1 : set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT); 163 1 : } 164 : 165 1 : static int __init init_real_mode(void) 166 : { 167 1 : if (!real_mode_header) 168 0 : panic("Real mode trampoline was not allocated"); 169 : 170 1 : setup_real_mode(); 171 1 : set_real_mode_permissions(); 172 : 173 1 : return 0; 174 : } 175 : early_initcall(init_real_mode);