Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Provide common bits of early_ioremap() support for architectures needing
4 : * temporary mappings during boot before ioremap() is available.
5 : *
6 : * This is mostly a direct copy of the x86 early_ioremap implementation.
7 : *
8 : * (C) Copyright 1995 1996, 2014 Linus Torvalds
9 : *
10 : */
11 : #include <linux/kernel.h>
12 : #include <linux/init.h>
13 : #include <linux/io.h>
14 : #include <linux/module.h>
15 : #include <linux/slab.h>
16 : #include <linux/mm.h>
17 : #include <linux/vmalloc.h>
18 : #include <asm/fixmap.h>
19 : #include <asm/early_ioremap.h>
20 :
21 : #ifdef CONFIG_MMU
22 : static int early_ioremap_debug __initdata;
23 :
24 0 : static int __init early_ioremap_debug_setup(char *str)
25 : {
26 0 : early_ioremap_debug = 1;
27 :
28 0 : return 0;
29 : }
30 : early_param("early_ioremap_debug", early_ioremap_debug_setup);
31 :
32 : static int after_paging_init __initdata;
33 :
34 0 : pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
35 : unsigned long size,
36 : pgprot_t prot)
37 : {
38 0 : return prot;
39 : }
40 :
41 0 : void __init __weak early_ioremap_shutdown(void)
42 : {
43 0 : }
44 :
45 0 : void __init early_ioremap_reset(void)
46 : {
47 0 : early_ioremap_shutdown();
48 0 : after_paging_init = 1;
49 0 : }
50 :
51 : /*
52 : * Generally, ioremap() is available after paging_init() has been called.
53 : * Architectures wanting to allow early_ioremap after paging_init() can
54 : * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
55 : */
56 : #ifndef __late_set_fixmap
57 : static inline void __init __late_set_fixmap(enum fixed_addresses idx,
58 : phys_addr_t phys, pgprot_t prot)
59 : {
60 : BUG();
61 : }
62 : #endif
63 :
64 : #ifndef __late_clear_fixmap
65 : static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
66 : {
67 : BUG();
68 : }
69 : #endif
70 :
71 : static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
72 : static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
73 : static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
74 :
75 1 : void __init early_ioremap_setup(void)
76 : {
77 1 : int i;
78 :
79 9 : for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
80 8 : if (WARN_ON(prev_map[i]))
81 : break;
82 :
83 9 : for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
84 8 : slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
85 1 : }
86 :
87 1 : static int __init check_early_ioremap_leak(void)
88 : {
89 1 : int count = 0;
90 1 : int i;
91 :
92 9 : for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
93 8 : if (prev_map[i])
94 0 : count++;
95 :
96 1 : if (WARN(count, KERN_WARNING
97 : "Debug warning: early ioremap leak of %d areas detected.\n"
98 : "please boot with early_ioremap_debug and report the dmesg.\n",
99 : count))
100 0 : return 1;
101 : return 0;
102 : }
103 : late_initcall(check_early_ioremap_leak);
104 :
105 : static void __init __iomem *
106 69 : __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
107 : {
108 69 : unsigned long offset;
109 69 : resource_size_t last_addr;
110 69 : unsigned int nrpages;
111 69 : enum fixed_addresses idx;
112 69 : int i, slot;
113 :
114 69 : WARN_ON(system_state >= SYSTEM_RUNNING);
115 :
116 72 : slot = -1;
117 72 : for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
118 72 : if (!prev_map[i]) {
119 : slot = i;
120 : break;
121 : }
122 : }
123 :
124 69 : if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
125 : __func__, &phys_addr, size))
126 : return NULL;
127 :
128 : /* Don't allow wraparound or zero size */
129 69 : last_addr = phys_addr + size - 1;
130 138 : if (WARN_ON(!size || last_addr < phys_addr))
131 : return NULL;
132 :
133 69 : prev_size[slot] = size;
134 : /*
135 : * Mappings have to be page-aligned
136 : */
137 69 : offset = offset_in_page(phys_addr);
138 69 : phys_addr &= PAGE_MASK;
139 69 : size = PAGE_ALIGN(last_addr + 1) - phys_addr;
140 :
141 : /*
142 : * Mappings have to fit in the FIX_BTMAP area.
143 : */
144 69 : nrpages = size >> PAGE_SHIFT;
145 69 : if (WARN_ON(nrpages > NR_FIX_BTMAPS))
146 : return NULL;
147 :
148 : /*
149 : * Ok, go for it..
150 : */
151 69 : idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
152 140 : while (nrpages > 0) {
153 71 : if (after_paging_init)
154 0 : __late_set_fixmap(idx, phys_addr, prot);
155 : else
156 71 : __early_set_fixmap(idx, phys_addr, prot);
157 71 : phys_addr += PAGE_SIZE;
158 71 : --idx;
159 71 : --nrpages;
160 : }
161 69 : WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
162 : __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
163 :
164 69 : prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
165 69 : return prev_map[slot];
166 : }
167 :
168 69 : void __init early_iounmap(void __iomem *addr, unsigned long size)
169 : {
170 69 : unsigned long virt_addr;
171 69 : unsigned long offset;
172 69 : unsigned int nrpages;
173 69 : enum fixed_addresses idx;
174 69 : int i, slot;
175 :
176 69 : slot = -1;
177 72 : for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
178 72 : if (prev_map[i] == addr) {
179 : slot = i;
180 : break;
181 : }
182 : }
183 :
184 69 : if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n",
185 : __func__, addr, size))
186 : return;
187 :
188 69 : if (WARN(prev_size[slot] != size,
189 : "%s(%p, %08lx) [%d] size not consistent %08lx\n",
190 : __func__, addr, size, slot, prev_size[slot]))
191 : return;
192 :
193 69 : WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n",
194 : __func__, addr, size, slot);
195 :
196 69 : virt_addr = (unsigned long)addr;
197 69 : if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
198 : return;
199 :
200 69 : offset = offset_in_page(virt_addr);
201 69 : nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
202 :
203 69 : idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
204 140 : while (nrpages > 0) {
205 71 : if (after_paging_init)
206 0 : __late_clear_fixmap(idx);
207 : else
208 71 : __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
209 71 : --idx;
210 71 : --nrpages;
211 : }
212 69 : prev_map[slot] = NULL;
213 : }
214 :
215 : /* Remap an IO device */
216 : void __init __iomem *
217 0 : early_ioremap(resource_size_t phys_addr, unsigned long size)
218 : {
219 0 : return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
220 : }
221 :
222 : /* Remap memory */
223 : void __init *
224 69 : early_memremap(resource_size_t phys_addr, unsigned long size)
225 : {
226 69 : pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
227 69 : FIXMAP_PAGE_NORMAL);
228 :
229 69 : return (__force void *)__early_ioremap(phys_addr, size, prot);
230 : }
231 : #ifdef FIXMAP_PAGE_RO
232 : void __init *
233 0 : early_memremap_ro(resource_size_t phys_addr, unsigned long size)
234 : {
235 0 : pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
236 0 : FIXMAP_PAGE_RO);
237 :
238 0 : return (__force void *)__early_ioremap(phys_addr, size, prot);
239 : }
240 : #endif
241 :
242 : #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
243 : void __init *
244 : early_memremap_prot(resource_size_t phys_addr, unsigned long size,
245 : unsigned long prot_val)
246 : {
247 : return (__force void *)__early_ioremap(phys_addr, size,
248 : __pgprot(prot_val));
249 : }
250 : #endif
251 :
252 : #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
253 :
254 0 : void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
255 : {
256 0 : unsigned long slop, clen;
257 0 : char *p;
258 :
259 0 : while (size) {
260 0 : slop = offset_in_page(src);
261 0 : clen = size;
262 0 : if (clen > MAX_MAP_CHUNK - slop)
263 : clen = MAX_MAP_CHUNK - slop;
264 0 : p = early_memremap(src & PAGE_MASK, clen + slop);
265 0 : memcpy(dest, p + slop, clen);
266 0 : early_memunmap(p, clen + slop);
267 0 : dest += clen;
268 0 : src += clen;
269 0 : size -= clen;
270 : }
271 0 : }
272 :
273 : #else /* CONFIG_MMU */
274 :
275 : void __init __iomem *
276 : early_ioremap(resource_size_t phys_addr, unsigned long size)
277 : {
278 : return (__force void __iomem *)phys_addr;
279 : }
280 :
281 : /* Remap memory */
282 : void __init *
283 : early_memremap(resource_size_t phys_addr, unsigned long size)
284 : {
285 : return (void *)phys_addr;
286 : }
287 : void __init *
288 : early_memremap_ro(resource_size_t phys_addr, unsigned long size)
289 : {
290 : return (void *)phys_addr;
291 : }
292 :
293 : void __init early_iounmap(void __iomem *addr, unsigned long size)
294 : {
295 : }
296 :
297 : #endif /* CONFIG_MMU */
298 :
299 :
300 69 : void __init early_memunmap(void *addr, unsigned long size)
301 : {
302 69 : early_iounmap((__force void __iomem *)addr, size);
303 69 : }
|