Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Helper routines for building identity mapping page tables. This is
4 : * included by both the compressed kernel and the regular kernel.
5 : */
6 :
7 0 : static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
8 : unsigned long addr, unsigned long end)
9 : {
10 0 : addr &= PMD_MASK;
11 0 : for (; addr < end; addr += PMD_SIZE) {
12 0 : pmd_t *pmd = pmd_page + pmd_index(addr);
13 :
14 0 : if (pmd_present(*pmd))
15 0 : continue;
16 :
17 0 : set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
18 : }
19 0 : }
20 :
21 0 : static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
22 : unsigned long addr, unsigned long end)
23 : {
24 0 : unsigned long next;
25 :
26 0 : for (; addr < end; addr = next) {
27 0 : pud_t *pud = pud_page + pud_index(addr);
28 0 : pmd_t *pmd;
29 :
30 0 : next = (addr & PUD_MASK) + PUD_SIZE;
31 0 : if (next > end)
32 : next = end;
33 :
34 0 : if (info->direct_gbpages) {
35 0 : pud_t pudval;
36 :
37 0 : if (pud_present(*pud))
38 0 : continue;
39 :
40 0 : addr &= PUD_MASK;
41 0 : pudval = __pud((addr - info->offset) | info->page_flag);
42 0 : set_pud(pud, pudval);
43 0 : continue;
44 : }
45 :
46 0 : if (pud_present(*pud)) {
47 0 : pmd = pmd_offset(pud, 0);
48 0 : ident_pmd_init(info, pmd, addr, next);
49 0 : continue;
50 : }
51 0 : pmd = (pmd_t *)info->alloc_pgt_page(info->context);
52 0 : if (!pmd)
53 : return -ENOMEM;
54 0 : ident_pmd_init(info, pmd, addr, next);
55 0 : set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
56 : }
57 :
58 : return 0;
59 : }
60 :
61 0 : static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
62 : unsigned long addr, unsigned long end)
63 : {
64 0 : unsigned long next;
65 0 : int result;
66 :
67 0 : for (; addr < end; addr = next) {
68 0 : p4d_t *p4d = p4d_page + p4d_index(addr);
69 0 : pud_t *pud;
70 :
71 0 : next = (addr & P4D_MASK) + P4D_SIZE;
72 0 : if (next > end)
73 : next = end;
74 :
75 0 : if (p4d_present(*p4d)) {
76 0 : pud = pud_offset(p4d, 0);
77 0 : result = ident_pud_init(info, pud, addr, next);
78 0 : if (result)
79 0 : return result;
80 :
81 0 : continue;
82 : }
83 0 : pud = (pud_t *)info->alloc_pgt_page(info->context);
84 0 : if (!pud)
85 : return -ENOMEM;
86 :
87 0 : result = ident_pud_init(info, pud, addr, next);
88 0 : if (result)
89 0 : return result;
90 :
91 0 : set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
92 : }
93 :
94 : return 0;
95 : }
96 :
97 0 : int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
98 : unsigned long pstart, unsigned long pend)
99 : {
100 0 : unsigned long addr = pstart + info->offset;
101 0 : unsigned long end = pend + info->offset;
102 0 : unsigned long next;
103 0 : int result;
104 :
105 : /* Set the default pagetable flags if not supplied */
106 0 : if (!info->kernpg_flag)
107 0 : info->kernpg_flag = _KERNPG_TABLE;
108 :
109 : /* Filter out unsupported __PAGE_KERNEL_* bits: */
110 0 : info->kernpg_flag &= __default_kernel_pte_mask;
111 :
112 0 : for (; addr < end; addr = next) {
113 0 : pgd_t *pgd = pgd_page + pgd_index(addr);
114 0 : p4d_t *p4d;
115 :
116 0 : next = (addr & PGDIR_MASK) + PGDIR_SIZE;
117 0 : if (next > end)
118 : next = end;
119 :
120 0 : if (pgd_present(*pgd)) {
121 0 : p4d = p4d_offset(pgd, 0);
122 0 : result = ident_p4d_init(info, p4d, addr, next);
123 0 : if (result)
124 0 : return result;
125 0 : continue;
126 : }
127 :
128 : p4d = (p4d_t *)info->alloc_pgt_page(info->context);
129 : if (!p4d)
130 : return -ENOMEM;
131 : result = ident_p4d_init(info, p4d, addr, next);
132 : if (result)
133 : return result;
134 : if (pgtable_l5_enabled()) {
135 : set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
136 : } else {
137 : /*
138 : * With p4d folded, pgd is equal to p4d.
139 : * The pgd entry has to point to the pud page table in this case.
140 : */
141 : pud_t *pud = pud_offset(p4d, 0);
142 : set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
143 : }
144 : }
145 :
146 : return 0;
147 : }
|