Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : #define DISABLE_BRANCH_PROFILING
3 : #define pr_fmt(fmt) "kasan: " fmt
4 :
5 : /* cpu_feature_enabled() cannot be used this early */
6 : #define USE_EARLY_PGTABLE_L5
7 :
8 : #include <linux/memblock.h>
9 : #include <linux/kasan.h>
10 : #include <linux/kdebug.h>
11 : #include <linux/mm.h>
12 : #include <linux/sched.h>
13 : #include <linux/sched/task.h>
14 : #include <linux/vmalloc.h>
15 :
16 : #include <asm/e820/types.h>
17 : #include <asm/pgalloc.h>
18 : #include <asm/tlbflush.h>
19 : #include <asm/sections.h>
20 : #include <asm/cpu_entry_area.h>
21 :
22 : extern struct range pfn_mapped[E820_MAX_ENTRIES];
23 :
24 : static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
25 :
26 330 : static __init void *early_alloc(size_t size, int nid, bool should_panic)
27 : {
28 330 : void *ptr = memblock_alloc_try_nid(size, size,
29 330 : __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
30 :
31 330 : if (!ptr && should_panic)
32 0 : panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
33 : (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
34 :
35 330 : return ptr;
36 : }
37 :
38 70 : static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
39 : unsigned long end, int nid)
40 : {
41 70 : pte_t *pte;
42 :
43 70 : if (pmd_none(*pmd)) {
44 70 : void *p;
45 :
46 70 : if (boot_cpu_has(X86_FEATURE_PSE) &&
47 70 : ((end - addr) == PMD_SIZE) &&
48 68 : IS_ALIGNED(addr, PMD_SIZE)) {
49 68 : p = early_alloc(PMD_SIZE, nid, false);
50 68 : if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
51 68 : return;
52 0 : else if (p)
53 0 : memblock_free(__pa(p), PMD_SIZE);
54 : }
55 :
56 2 : p = early_alloc(PAGE_SIZE, nid, true);
57 2 : pmd_populate_kernel(&init_mm, pmd, p);
58 : }
59 :
60 4 : pte = pte_offset_kernel(pmd, addr);
61 250 : do {
62 250 : pte_t entry;
63 250 : void *p;
64 :
65 250 : if (!pte_none(*pte))
66 0 : continue;
67 :
68 250 : p = early_alloc(PAGE_SIZE, nid, true);
69 250 : entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
70 250 : set_pte_at(&init_mm, addr, pte, entry);
71 250 : } while (pte++, addr += PAGE_SIZE, addr != end);
72 : }
73 :
74 3 : static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
75 : unsigned long end, int nid)
76 : {
77 3 : pmd_t *pmd;
78 3 : unsigned long next;
79 :
80 3 : if (pud_none(*pud)) {
81 2 : void *p;
82 :
83 2 : if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
84 2 : ((end - addr) == PUD_SIZE) &&
85 0 : IS_ALIGNED(addr, PUD_SIZE)) {
86 0 : p = early_alloc(PUD_SIZE, nid, false);
87 0 : if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
88 0 : return;
89 0 : else if (p)
90 0 : memblock_free(__pa(p), PUD_SIZE);
91 : }
92 :
93 2 : p = early_alloc(PAGE_SIZE, nid, true);
94 2 : pud_populate(&init_mm, pud, p);
95 : }
96 :
97 6 : pmd = pmd_offset(pud, addr);
98 70 : do {
99 70 : next = pmd_addr_end(addr, end);
100 140 : if (!pmd_large(*pmd))
101 70 : kasan_populate_pmd(pmd, addr, next, nid);
102 70 : } while (pmd++, addr = next, addr != end);
103 : }
104 :
105 3 : static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
106 : unsigned long end, int nid)
107 : {
108 3 : pud_t *pud;
109 3 : unsigned long next;
110 :
111 3 : if (p4d_none(*p4d)) {
112 0 : void *p = early_alloc(PAGE_SIZE, nid, true);
113 :
114 0 : p4d_populate(&init_mm, p4d, p);
115 : }
116 :
117 3 : pud = pud_offset(p4d, addr);
118 3 : do {
119 3 : next = pud_addr_end(addr, end);
120 3 : if (!pud_large(*pud))
121 3 : kasan_populate_pud(pud, addr, next, nid);
122 3 : } while (pud++, addr = next, addr != end);
123 3 : }
124 :
125 3 : static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
126 : unsigned long end, int nid)
127 : {
128 3 : void *p;
129 3 : p4d_t *p4d;
130 3 : unsigned long next;
131 :
132 3 : if (pgd_none(*pgd)) {
133 : p = early_alloc(PAGE_SIZE, nid, true);
134 3 : pgd_populate(&init_mm, pgd, p);
135 : }
136 :
137 3 : p4d = p4d_offset(pgd, addr);
138 3 : do {
139 3 : next = p4d_addr_end(addr, end);
140 3 : kasan_populate_p4d(p4d, addr, next, nid);
141 3 : } while (p4d++, addr = next, addr != end);
142 : }
143 :
144 3 : static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
145 : int nid)
146 : {
147 3 : pgd_t *pgd;
148 3 : unsigned long next;
149 :
150 3 : addr = addr & PAGE_MASK;
151 3 : end = round_up(end, PAGE_SIZE);
152 3 : pgd = pgd_offset_k(addr);
153 3 : do {
154 3 : next = pgd_addr_end(addr, end);
155 3 : kasan_populate_pgd(pgd, addr, next, nid);
156 3 : } while (pgd++, addr = next, addr != end);
157 3 : }
158 :
159 1 : static void __init map_range(struct range *range)
160 : {
161 1 : unsigned long start;
162 1 : unsigned long end;
163 :
164 1 : start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
165 1 : end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
166 :
167 1 : kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
168 1 : }
169 :
170 1 : static void __init clear_pgds(unsigned long start,
171 : unsigned long end)
172 : {
173 1 : pgd_t *pgd;
174 : /* See comment in kasan_init() */
175 1 : unsigned long pgd_end = end & PGDIR_MASK;
176 :
177 33 : for (; start < pgd_end; start += PGDIR_SIZE) {
178 32 : pgd = pgd_offset_k(start);
179 : /*
180 : * With folded p4d, pgd_clear() is nop, use p4d_clear()
181 : * instead.
182 : */
183 32 : if (pgtable_l5_enabled())
184 : pgd_clear(pgd);
185 : else
186 32 : p4d_clear(p4d_offset(pgd, start));
187 : }
188 :
189 1 : pgd = pgd_offset_k(start);
190 1 : for (; start < end; start += P4D_SIZE)
191 0 : p4d_clear(p4d_offset(pgd, start));
192 1 : }
193 :
194 64 : static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
195 : {
196 64 : unsigned long p4d;
197 :
198 64 : if (!pgtable_l5_enabled())
199 64 : return (p4d_t *)pgd;
200 :
201 : p4d = pgd_val(*pgd) & PTE_PFN_MASK;
202 : p4d += __START_KERNEL_map - phys_base;
203 : return (p4d_t *)p4d + p4d_index(addr);
204 : }
205 :
206 64 : static void __init kasan_early_p4d_populate(pgd_t *pgd,
207 : unsigned long addr,
208 : unsigned long end)
209 : {
210 64 : pgd_t pgd_entry;
211 64 : p4d_t *p4d, p4d_entry;
212 64 : unsigned long next;
213 :
214 64 : if (pgd_none(*pgd)) {
215 : pgd_entry = __pgd(_KERNPG_TABLE |
216 : __pa_nodebug(kasan_early_shadow_p4d));
217 : set_pgd(pgd, pgd_entry);
218 : }
219 :
220 64 : p4d = early_p4d_offset(pgd, addr);
221 64 : do {
222 64 : next = p4d_addr_end(addr, end);
223 :
224 64 : if (!p4d_none(*p4d))
225 0 : continue;
226 :
227 128 : p4d_entry = __p4d(_KERNPG_TABLE |
228 : __pa_nodebug(kasan_early_shadow_pud));
229 64 : set_p4d(p4d, p4d_entry);
230 64 : } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
231 64 : }
232 :
233 2 : static void __init kasan_map_early_shadow(pgd_t *pgd)
234 : {
235 : /* See comment in kasan_init() */
236 2 : unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
237 2 : unsigned long end = KASAN_SHADOW_END;
238 2 : unsigned long next;
239 :
240 2 : pgd += pgd_index(addr);
241 64 : do {
242 64 : next = pgd_addr_end(addr, end);
243 64 : kasan_early_p4d_populate(pgd, addr, next);
244 64 : } while (pgd++, addr = next, addr != end);
245 2 : }
246 :
247 9 : static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
248 : unsigned long addr,
249 : unsigned long end)
250 : {
251 9 : p4d_t *p4d;
252 9 : unsigned long next;
253 9 : void *p;
254 :
255 9 : p4d = p4d_offset(pgd, addr);
256 9 : do {
257 9 : next = p4d_addr_end(addr, end);
258 :
259 9 : if (p4d_none(*p4d)) {
260 8 : p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
261 8 : p4d_populate(&init_mm, p4d, p);
262 : }
263 9 : } while (p4d++, addr = next, addr != end);
264 9 : }
265 :
266 1 : static void __init kasan_shallow_populate_pgds(void *start, void *end)
267 : {
268 1 : unsigned long addr, next;
269 1 : pgd_t *pgd;
270 1 : void *p;
271 :
272 1 : addr = (unsigned long)start;
273 1 : pgd = pgd_offset_k(addr);
274 9 : do {
275 9 : next = pgd_addr_end(addr, (unsigned long)end);
276 :
277 9 : if (pgd_none(*pgd)) {
278 : p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
279 9 : pgd_populate(&init_mm, pgd, p);
280 : }
281 :
282 : /*
283 : * we need to populate p4ds to be synced when running in
284 : * four level mode - see sync_global_pgds_l4()
285 : */
286 9 : kasan_shallow_populate_p4ds(pgd, addr, next);
287 9 : } while (pgd++, addr = next, addr != (unsigned long)end);
288 1 : }
289 :
290 1 : void __init kasan_early_init(void)
291 : {
292 1 : int i;
293 1 : pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
294 : __PAGE_KERNEL | _PAGE_ENC;
295 1 : pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
296 1 : pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
297 1 : p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
298 :
299 : /* Mask out unsupported __PAGE_KERNEL bits: */
300 1 : pte_val &= __default_kernel_pte_mask;
301 1 : pmd_val &= __default_kernel_pte_mask;
302 1 : pud_val &= __default_kernel_pte_mask;
303 1 : p4d_val &= __default_kernel_pte_mask;
304 :
305 513 : for (i = 0; i < PTRS_PER_PTE; i++)
306 512 : kasan_early_shadow_pte[i] = __pte(pte_val);
307 :
308 513 : for (i = 0; i < PTRS_PER_PMD; i++)
309 512 : kasan_early_shadow_pmd[i] = __pmd(pmd_val);
310 :
311 513 : for (i = 0; i < PTRS_PER_PUD; i++)
312 512 : kasan_early_shadow_pud[i] = __pud(pud_val);
313 :
314 1 : for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
315 : kasan_early_shadow_p4d[i] = __p4d(p4d_val);
316 :
317 1 : kasan_map_early_shadow(early_top_pgt);
318 1 : kasan_map_early_shadow(init_top_pgt);
319 1 : }
320 :
321 1 : void __init kasan_init(void)
322 : {
323 1 : int i;
324 1 : void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
325 :
326 1 : memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
327 :
328 : /*
329 : * We use the same shadow offset for 4- and 5-level paging to
330 : * facilitate boot-time switching between paging modes.
331 : * As result in 5-level paging mode KASAN_SHADOW_START and
332 : * KASAN_SHADOW_END are not aligned to PGD boundary.
333 : *
334 : * KASAN_SHADOW_START doesn't share PGD with anything else.
335 : * We claim whole PGD entry to make things easier.
336 : *
337 : * KASAN_SHADOW_END lands in the last PGD entry and it collides with
338 : * bunch of things like kernel code, modules, EFI mapping, etc.
339 : * We need to take extra steps to not overwrite them.
340 : */
341 1 : if (pgtable_l5_enabled()) {
342 : void *ptr;
343 :
344 : ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
345 : memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
346 : set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
347 : __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
348 : }
349 :
350 1 : load_cr3(early_top_pgt);
351 1 : __flush_tlb_all();
352 :
353 1 : clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
354 :
355 1 : kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
356 1 : kasan_mem_to_shadow((void *)PAGE_OFFSET));
357 :
358 3 : for (i = 0; i < E820_MAX_ENTRIES; i++) {
359 2 : if (pfn_mapped[i].end == 0)
360 : break;
361 :
362 1 : map_range(&pfn_mapped[i]);
363 : }
364 :
365 1 : shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
366 1 : shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
367 1 : shadow_cpu_entry_begin = (void *)round_down(
368 : (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
369 :
370 1 : shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
371 : CPU_ENTRY_AREA_MAP_SIZE);
372 1 : shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
373 1 : shadow_cpu_entry_end = (void *)round_up(
374 : (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
375 :
376 1 : kasan_populate_early_shadow(
377 1 : kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
378 1 : kasan_mem_to_shadow((void *)VMALLOC_START));
379 :
380 : /*
381 : * If we're in full vmalloc mode, don't back vmalloc space with early
382 : * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
383 : * the global table and we can populate the lower levels on demand.
384 : */
385 1 : if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
386 1 : kasan_shallow_populate_pgds(
387 : kasan_mem_to_shadow((void *)VMALLOC_START),
388 : kasan_mem_to_shadow((void *)VMALLOC_END));
389 : else
390 : kasan_populate_early_shadow(
391 : kasan_mem_to_shadow((void *)VMALLOC_START),
392 : kasan_mem_to_shadow((void *)VMALLOC_END));
393 :
394 1 : kasan_populate_early_shadow(
395 1 : kasan_mem_to_shadow((void *)VMALLOC_END + 1),
396 : shadow_cpu_entry_begin);
397 :
398 1 : kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
399 : (unsigned long)shadow_cpu_entry_end, 0);
400 :
401 1 : kasan_populate_early_shadow(shadow_cpu_entry_end,
402 1 : kasan_mem_to_shadow((void *)__START_KERNEL_map));
403 :
404 2 : kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
405 1 : (unsigned long)kasan_mem_to_shadow(_end),
406 : early_pfn_to_nid(__pa(_stext)));
407 :
408 1 : kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
409 : (void *)KASAN_SHADOW_END);
410 :
411 1 : load_cr3(init_top_pgt);
412 1 : __flush_tlb_all();
413 :
414 : /*
415 : * kasan_early_shadow_page has been used as early shadow memory, thus
416 : * it may contain some garbage. Now we can clear and write protect it,
417 : * since after the TLB flush no one should write to it.
418 : */
419 1 : memset(kasan_early_shadow_page, 0, PAGE_SIZE);
420 514 : for (i = 0; i < PTRS_PER_PTE; i++) {
421 512 : pte_t pte;
422 512 : pgprot_t prot;
423 :
424 512 : prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
425 512 : pgprot_val(prot) &= __default_kernel_pte_mask;
426 :
427 1024 : pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
428 512 : set_pte(&kasan_early_shadow_pte[i], pte);
429 : }
430 : /* Flush TLBs again to be sure that write protection applied. */
431 1 : __flush_tlb_all();
432 :
433 1 : init_task.kasan_depth = 0;
434 1 : pr_info("KernelAddressSanitizer initialized\n");
435 1 : }
|