Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 :
3 : #include <linux/spinlock.h>
4 : #include <linux/percpu.h>
5 : #include <linux/kallsyms.h>
6 : #include <linux/kcore.h>
7 : #include <linux/pgtable.h>
8 :
9 : #include <asm/cpu_entry_area.h>
10 : #include <asm/fixmap.h>
11 : #include <asm/desc.h>
12 :
13 : static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
14 :
15 : #ifdef CONFIG_X86_64
16 : static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
17 : DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
18 : #endif
19 :
20 : #ifdef CONFIG_X86_32
21 : DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
22 : #endif
23 :
24 : /* Is called from entry code, so must be noinstr */
25 38 : noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
26 : {
27 38 : unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
28 38 : BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
29 :
30 38 : return (struct cpu_entry_area *) va;
31 : }
32 : EXPORT_SYMBOL(get_cpu_entry_area);
33 :
34 193 : void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
35 : {
36 193 : unsigned long va = (unsigned long) cea_vaddr;
37 193 : pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
38 :
39 : /*
40 : * The cpu_entry_area is shared between the user and kernel
41 : * page tables. All of its ptes can safely be global.
42 : * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
43 : * non-present PTEs, so be careful not to set it in that
44 : * case to avoid confusion.
45 : */
46 193 : if (boot_cpu_has(X86_FEATURE_PGE) &&
47 193 : (pgprot_val(flags) & _PAGE_PRESENT))
48 65 : pte = pte_set_flags(pte, _PAGE_GLOBAL);
49 :
50 193 : set_pte_vaddr(va, pte);
51 193 : }
52 :
53 : static void __init
54 28 : cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
55 : {
56 88 : for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
57 60 : cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
58 28 : }
59 :
60 4 : static void __init percpu_setup_debug_store(unsigned int cpu)
61 : {
62 : #ifdef CONFIG_CPU_SUP_INTEL
63 4 : unsigned int npages;
64 4 : void *cea;
65 :
66 4 : if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
67 0 : return;
68 :
69 4 : cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
70 4 : npages = sizeof(struct debug_store) / PAGE_SIZE;
71 4 : BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
72 4 : cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
73 4 : PAGE_KERNEL);
74 :
75 4 : cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
76 : /*
77 : * Force the population of PMDs for not yet allocated per cpu
78 : * memory like debug store buffers.
79 : */
80 4 : npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
81 132 : for (; npages; npages--, cea += PAGE_SIZE)
82 128 : cea_set_pte(cea, 0, PAGE_NONE);
83 : #endif
84 : }
85 :
86 : #ifdef CONFIG_X86_64
87 :
88 : #define cea_map_stack(name) do { \
89 : npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
90 : cea_map_percpu_pages(cea->estacks.name## _stack, \
91 : estacks->name## _stack, npages, PAGE_KERNEL); \
92 : } while (0)
93 :
94 4 : static void __init percpu_setup_exception_stacks(unsigned int cpu)
95 : {
96 4 : struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
97 4 : struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
98 4 : unsigned int npages;
99 :
100 4 : BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
101 :
102 4 : per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
103 :
104 : /*
105 : * The exceptions stack mappings in the per cpu area are protected
106 : * by guard pages so each stack must be mapped separately. DB2 is
107 : * not mapped; it just exists to catch triple nesting of #DB.
108 : */
109 4 : cea_map_stack(DF);
110 4 : cea_map_stack(NMI);
111 4 : cea_map_stack(DB);
112 4 : cea_map_stack(MCE);
113 4 : }
114 : #else
115 : static inline void percpu_setup_exception_stacks(unsigned int cpu)
116 : {
117 : struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
118 :
119 : cea_map_percpu_pages(&cea->doublefault_stack,
120 : &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
121 : }
122 : #endif
123 :
124 : /* Setup the fixmap mappings only once per-processor */
125 4 : static void __init setup_cpu_entry_area(unsigned int cpu)
126 : {
127 4 : struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
128 : #ifdef CONFIG_X86_64
129 : /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
130 4 : pgprot_t gdt_prot = PAGE_KERNEL_RO;
131 4 : pgprot_t tss_prot = PAGE_KERNEL_RO;
132 : #else
133 : /*
134 : * On native 32-bit systems, the GDT cannot be read-only because
135 : * our double fault handler uses a task gate, and entering through
136 : * a task gate needs to change an available TSS to busy. If the
137 : * GDT is read-only, that will triple fault. The TSS cannot be
138 : * read-only because the CPU writes to it on task switches.
139 : *
140 : * On Xen PV, the GDT must be read-only because the hypervisor
141 : * requires it.
142 : */
143 : pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
144 : PAGE_KERNEL_RO : PAGE_KERNEL;
145 : pgprot_t tss_prot = PAGE_KERNEL;
146 : #endif
147 :
148 4 : cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
149 :
150 4 : cea_map_percpu_pages(&cea->entry_stack_page,
151 4 : per_cpu_ptr(&entry_stack_storage, cpu), 1,
152 4 : PAGE_KERNEL);
153 :
154 : /*
155 : * The Intel SDM says (Volume 3, 7.2.1):
156 : *
157 : * Avoid placing a page boundary in the part of the TSS that the
158 : * processor reads during a task switch (the first 104 bytes). The
159 : * processor may not correctly perform address translations if a
160 : * boundary occurs in this area. During a task switch, the processor
161 : * reads and writes into the first 104 bytes of each TSS (using
162 : * contiguous physical addresses beginning with the physical address
163 : * of the first byte of the TSS). So, after TSS access begins, if
164 : * part of the 104 bytes is not physically contiguous, the processor
165 : * will access incorrect information without generating a page-fault
166 : * exception.
167 : *
168 : * There are also a lot of errata involving the TSS spanning a page
169 : * boundary. Assert that we're not doing that.
170 : */
171 4 : BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
172 : offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
173 4 : BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
174 : /*
175 : * VMX changes the host TR limit to 0x67 after a VM exit. This is
176 : * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
177 : * that this is correct.
178 : */
179 4 : BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
180 4 : BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
181 :
182 4 : cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
183 : sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
184 :
185 : #ifdef CONFIG_X86_32
186 : per_cpu(cpu_entry_area, cpu) = cea;
187 : #endif
188 :
189 4 : percpu_setup_exception_stacks(cpu);
190 :
191 4 : percpu_setup_debug_store(cpu);
192 4 : }
193 :
194 1 : static __init void setup_cpu_entry_area_ptes(void)
195 : {
196 : #ifdef CONFIG_X86_32
197 : unsigned long start, end;
198 :
199 : /* The +1 is for the readonly IDT: */
200 : BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
201 : BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
202 : BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
203 :
204 : start = CPU_ENTRY_AREA_BASE;
205 : end = start + CPU_ENTRY_AREA_MAP_SIZE;
206 :
207 : /* Careful here: start + PMD_SIZE might wrap around */
208 : for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
209 : populate_extra_pte(start);
210 : #endif
211 1 : }
212 :
213 1 : void __init setup_cpu_entry_areas(void)
214 : {
215 1 : unsigned int cpu;
216 :
217 1 : setup_cpu_entry_area_ptes();
218 :
219 5 : for_each_possible_cpu(cpu)
220 4 : setup_cpu_entry_area(cpu);
221 :
222 : /*
223 : * This is the last essential update to swapper_pgdir which needs
224 : * to be synchronized to initial_page_table on 32bit.
225 : */
226 1 : sync_initial_page_table();
227 1 : }
|