Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Procedures for maintaining information about logical memory blocks.
4 : *
5 : * Peter Bergner, IBM Corp. June 2001.
6 : * Copyright (C) 2001 Peter Bergner.
7 : */
8 :
9 : #include <linux/kernel.h>
10 : #include <linux/slab.h>
11 : #include <linux/init.h>
12 : #include <linux/bitops.h>
13 : #include <linux/poison.h>
14 : #include <linux/pfn.h>
15 : #include <linux/debugfs.h>
16 : #include <linux/kmemleak.h>
17 : #include <linux/seq_file.h>
18 : #include <linux/memblock.h>
19 :
20 : #include <asm/sections.h>
21 : #include <linux/io.h>
22 :
23 : #include "internal.h"
24 :
25 : #define INIT_MEMBLOCK_REGIONS 128
26 : #define INIT_PHYSMEM_REGIONS 4
27 :
28 : #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29 : # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30 : #endif
31 :
32 : /**
33 : * DOC: memblock overview
34 : *
35 : * Memblock is a method of managing memory regions during the early
36 : * boot period when the usual kernel memory allocators are not up and
37 : * running.
38 : *
39 : * Memblock views the system memory as collections of contiguous
40 : * regions. There are several types of these collections:
41 : *
42 : * * ``memory`` - describes the physical memory available to the
43 : * kernel; this may differ from the actual physical memory installed
44 : * in the system, for instance when the memory is restricted with
45 : * ``mem=`` command line parameter
46 : * * ``reserved`` - describes the regions that were allocated
47 : * * ``physmem`` - describes the actual physical memory available during
48 : * boot regardless of the possible restrictions and memory hot(un)plug;
49 : * the ``physmem`` type is only available on some architectures.
50 : *
51 : * Each region is represented by struct memblock_region that
52 : * defines the region extents, its attributes and NUMA node id on NUMA
53 : * systems. Every memory type is described by the struct memblock_type
54 : * which contains an array of memory regions along with
55 : * the allocator metadata. The "memory" and "reserved" types are nicely
56 : * wrapped with struct memblock. This structure is statically
57 : * initialized at build time. The region arrays are initially sized to
58 : * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
59 : * for "reserved". The region array for "physmem" is initially sized to
60 : * %INIT_PHYSMEM_REGIONS.
61 : * The memblock_allow_resize() enables automatic resizing of the region
62 : * arrays during addition of new regions. This feature should be used
63 : * with care so that memory allocated for the region array will not
64 : * overlap with areas that should be reserved, for example initrd.
65 : *
66 : * The early architecture setup should tell memblock what the physical
67 : * memory layout is by using memblock_add() or memblock_add_node()
68 : * functions. The first function does not assign the region to a NUMA
69 : * node and it is appropriate for UMA systems. Yet, it is possible to
70 : * use it on NUMA systems as well and assign the region to a NUMA node
71 : * later in the setup process using memblock_set_node(). The
72 : * memblock_add_node() performs such an assignment directly.
73 : *
74 : * Once memblock is setup the memory can be allocated using one of the
75 : * API variants:
76 : *
77 : * * memblock_phys_alloc*() - these functions return the **physical**
78 : * address of the allocated memory
79 : * * memblock_alloc*() - these functions return the **virtual** address
80 : * of the allocated memory.
81 : *
82 : * Note, that both API variants use implicit assumptions about allowed
83 : * memory ranges and the fallback methods. Consult the documentation
84 : * of memblock_alloc_internal() and memblock_alloc_range_nid()
85 : * functions for more elaborate description.
86 : *
87 : * As the system boot progresses, the architecture specific mem_init()
88 : * function frees all the memory to the buddy page allocator.
89 : *
90 : * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
91 : * memblock data structures (except "physmem") will be discarded after the
92 : * system initialization completes.
93 : */
94 :
95 : #ifndef CONFIG_NEED_MULTIPLE_NODES
96 : struct pglist_data __refdata contig_page_data;
97 : EXPORT_SYMBOL(contig_page_data);
98 : #endif
99 :
100 : unsigned long max_low_pfn;
101 : unsigned long min_low_pfn;
102 : unsigned long max_pfn;
103 : unsigned long long max_possible_pfn;
104 :
105 : static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
106 : static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
107 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
108 : static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
109 : #endif
110 :
111 : struct memblock memblock __initdata_memblock = {
112 : .memory.regions = memblock_memory_init_regions,
113 : .memory.cnt = 1, /* empty dummy entry */
114 : .memory.max = INIT_MEMBLOCK_REGIONS,
115 : .memory.name = "memory",
116 :
117 : .reserved.regions = memblock_reserved_init_regions,
118 : .reserved.cnt = 1, /* empty dummy entry */
119 : .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
120 : .reserved.name = "reserved",
121 :
122 : .bottom_up = false,
123 : .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
124 : };
125 :
126 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
127 : struct memblock_type physmem = {
128 : .regions = memblock_physmem_init_regions,
129 : .cnt = 1, /* empty dummy entry */
130 : .max = INIT_PHYSMEM_REGIONS,
131 : .name = "physmem",
132 : };
133 : #endif
134 :
135 : /*
136 : * keep a pointer to &memblock.memory in the text section to use it in
137 : * __next_mem_range() and its helpers.
138 : * For architectures that do not keep memblock data after init, this
139 : * pointer will be reset to NULL at memblock_discard()
140 : */
141 : static __refdata struct memblock_type *memblock_memory = &memblock.memory;
142 :
143 : #define for_each_memblock_type(i, memblock_type, rgn) \
144 : for (i = 0, rgn = &memblock_type->regions[0]; \
145 : i < memblock_type->cnt; \
146 : i++, rgn = &memblock_type->regions[i])
147 :
148 : #define memblock_dbg(fmt, ...) \
149 : do { \
150 : if (memblock_debug) \
151 : pr_info(fmt, ##__VA_ARGS__); \
152 : } while (0)
153 :
154 : static int memblock_debug __initdata_memblock;
155 : static bool system_has_some_mirror __initdata_memblock = false;
156 : static int memblock_can_resize __initdata_memblock;
157 : static int memblock_memory_in_slab __initdata_memblock = 0;
158 : static int memblock_reserved_in_slab __initdata_memblock = 0;
159 :
160 364 : static enum memblock_flags __init_memblock choose_memblock_flags(void)
161 : {
162 364 : return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
163 : }
164 :
165 : /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
166 392 : static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
167 : {
168 392 : return *size = min(*size, PHYS_ADDR_MAX - base);
169 : }
170 :
171 : /*
172 : * Address comparison utilities
173 : */
174 1 : static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
175 : phys_addr_t base2, phys_addr_t size2)
176 : {
177 1 : return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
178 : }
179 :
180 1 : bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
181 : phys_addr_t base, phys_addr_t size)
182 : {
183 1 : unsigned long i;
184 :
185 1 : for (i = 0; i < type->cnt; i++)
186 1 : if (memblock_addrs_overlap(base, size, type->regions[i].base,
187 1 : type->regions[i].size))
188 : break;
189 1 : return i < type->cnt;
190 : }
191 :
192 : /**
193 : * __memblock_find_range_bottom_up - find free area utility in bottom-up
194 : * @start: start of candidate range
195 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
196 : * %MEMBLOCK_ALLOC_ACCESSIBLE
197 : * @size: size of free area to find
198 : * @align: alignment of free area to find
199 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
200 : * @flags: pick from blocks based on memory attributes
201 : *
202 : * Utility called from memblock_find_in_range_node(), find free area bottom-up.
203 : *
204 : * Return:
205 : * Found address on success, 0 on failure.
206 : */
207 : static phys_addr_t __init_memblock
208 0 : __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
209 : phys_addr_t size, phys_addr_t align, int nid,
210 : enum memblock_flags flags)
211 : {
212 0 : phys_addr_t this_start, this_end, cand;
213 0 : u64 i;
214 :
215 0 : for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
216 0 : this_start = clamp(this_start, start, end);
217 0 : this_end = clamp(this_end, start, end);
218 :
219 0 : cand = round_up(this_start, align);
220 0 : if (cand < this_end && this_end - cand >= size)
221 0 : return cand;
222 : }
223 :
224 : return 0;
225 : }
226 :
227 : /**
228 : * __memblock_find_range_top_down - find free area utility, in top-down
229 : * @start: start of candidate range
230 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
231 : * %MEMBLOCK_ALLOC_ACCESSIBLE
232 : * @size: size of free area to find
233 : * @align: alignment of free area to find
234 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
235 : * @flags: pick from blocks based on memory attributes
236 : *
237 : * Utility called from memblock_find_in_range_node(), find free area top-down.
238 : *
239 : * Return:
240 : * Found address on success, 0 on failure.
241 : */
242 : static phys_addr_t __init_memblock
243 364 : __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
244 : phys_addr_t size, phys_addr_t align, int nid,
245 : enum memblock_flags flags)
246 : {
247 364 : phys_addr_t this_start, this_end, cand;
248 364 : u64 i;
249 :
250 922 : for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
251 : NULL) {
252 922 : this_start = clamp(this_start, start, end);
253 922 : this_end = clamp(this_end, start, end);
254 :
255 922 : if (this_end < size)
256 0 : continue;
257 :
258 922 : cand = round_down(this_end - size, align);
259 922 : if (cand >= this_start)
260 364 : return cand;
261 : }
262 :
263 : return 0;
264 : }
265 :
266 : /**
267 : * memblock_find_in_range_node - find free area in given range and node
268 : * @size: size of free area to find
269 : * @align: alignment of free area to find
270 : * @start: start of candidate range
271 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
272 : * %MEMBLOCK_ALLOC_ACCESSIBLE
273 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
274 : * @flags: pick from blocks based on memory attributes
275 : *
276 : * Find @size free area aligned to @align in the specified range and node.
277 : *
278 : * Return:
279 : * Found address on success, 0 on failure.
280 : */
281 364 : static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
282 : phys_addr_t align, phys_addr_t start,
283 : phys_addr_t end, int nid,
284 : enum memblock_flags flags)
285 : {
286 : /* pump up @end */
287 364 : if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
288 : end == MEMBLOCK_ALLOC_KASAN)
289 362 : end = memblock.current_limit;
290 :
291 : /* avoid allocating the first page */
292 364 : start = max_t(phys_addr_t, start, PAGE_SIZE);
293 364 : end = max(start, end);
294 :
295 364 : if (memblock_bottom_up())
296 0 : return __memblock_find_range_bottom_up(start, end, size, align,
297 : nid, flags);
298 : else
299 364 : return __memblock_find_range_top_down(start, end, size, align,
300 : nid, flags);
301 : }
302 :
303 : /**
304 : * memblock_find_in_range - find free area in given range
305 : * @start: start of candidate range
306 : * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
307 : * %MEMBLOCK_ALLOC_ACCESSIBLE
308 : * @size: size of free area to find
309 : * @align: alignment of free area to find
310 : *
311 : * Find @size free area aligned to @align in the specified range.
312 : *
313 : * Return:
314 : * Found address on success, 0 on failure.
315 : */
316 2 : phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
317 : phys_addr_t end, phys_addr_t size,
318 : phys_addr_t align)
319 : {
320 2 : phys_addr_t ret;
321 2 : enum memblock_flags flags = choose_memblock_flags();
322 :
323 2 : again:
324 2 : ret = memblock_find_in_range_node(size, align, start, end,
325 : NUMA_NO_NODE, flags);
326 :
327 2 : if (!ret && (flags & MEMBLOCK_MIRROR)) {
328 0 : pr_warn("Could not allocate %pap bytes of mirrored memory\n",
329 : &size);
330 0 : flags &= ~MEMBLOCK_MIRROR;
331 0 : goto again;
332 : }
333 :
334 2 : return ret;
335 : }
336 :
337 6 : static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
338 : {
339 6 : type->total_size -= type->regions[r].size;
340 6 : memmove(&type->regions[r], &type->regions[r + 1],
341 6 : (type->cnt - (r + 1)) * sizeof(type->regions[r]));
342 6 : type->cnt--;
343 :
344 : /* Special case for empty arrays */
345 6 : if (type->cnt == 0) {
346 0 : WARN_ON(type->total_size != 0);
347 0 : type->cnt = 1;
348 0 : type->regions[0].base = 0;
349 0 : type->regions[0].size = 0;
350 0 : type->regions[0].flags = 0;
351 0 : memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
352 : }
353 6 : }
354 :
355 : #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
356 : /**
357 : * memblock_discard - discard memory and reserved arrays if they were allocated
358 : */
359 1 : void __init memblock_discard(void)
360 : {
361 1 : phys_addr_t addr, size;
362 :
363 1 : if (memblock.reserved.regions != memblock_reserved_init_regions) {
364 0 : addr = __pa(memblock.reserved.regions);
365 0 : size = PAGE_ALIGN(sizeof(struct memblock_region) *
366 : memblock.reserved.max);
367 0 : __memblock_free_late(addr, size);
368 : }
369 :
370 1 : if (memblock.memory.regions != memblock_memory_init_regions) {
371 0 : addr = __pa(memblock.memory.regions);
372 0 : size = PAGE_ALIGN(sizeof(struct memblock_region) *
373 : memblock.memory.max);
374 0 : __memblock_free_late(addr, size);
375 : }
376 :
377 1 : memblock_memory = NULL;
378 1 : }
379 : #endif
380 :
381 : /**
382 : * memblock_double_array - double the size of the memblock regions array
383 : * @type: memblock type of the regions array being doubled
384 : * @new_area_start: starting address of memory range to avoid overlap with
385 : * @new_area_size: size of memory range to avoid overlap with
386 : *
387 : * Double the size of the @type regions array. If memblock is being used to
388 : * allocate memory for a new reserved regions array and there is a previously
389 : * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
390 : * waiting to be reserved, ensure the memory used by the new array does
391 : * not overlap.
392 : *
393 : * Return:
394 : * 0 on success, -1 on failure.
395 : */
396 0 : static int __init_memblock memblock_double_array(struct memblock_type *type,
397 : phys_addr_t new_area_start,
398 : phys_addr_t new_area_size)
399 : {
400 0 : struct memblock_region *new_array, *old_array;
401 0 : phys_addr_t old_alloc_size, new_alloc_size;
402 0 : phys_addr_t old_size, new_size, addr, new_end;
403 0 : int use_slab = slab_is_available();
404 0 : int *in_slab;
405 :
406 : /* We don't allow resizing until we know about the reserved regions
407 : * of memory that aren't suitable for allocation
408 : */
409 0 : if (!memblock_can_resize)
410 : return -1;
411 :
412 : /* Calculate new doubled size */
413 0 : old_size = type->max * sizeof(struct memblock_region);
414 0 : new_size = old_size << 1;
415 : /*
416 : * We need to allocated new one align to PAGE_SIZE,
417 : * so we can free them completely later.
418 : */
419 0 : old_alloc_size = PAGE_ALIGN(old_size);
420 0 : new_alloc_size = PAGE_ALIGN(new_size);
421 :
422 : /* Retrieve the slab flag */
423 0 : if (type == &memblock.memory)
424 : in_slab = &memblock_memory_in_slab;
425 : else
426 0 : in_slab = &memblock_reserved_in_slab;
427 :
428 : /* Try to find some space for it */
429 0 : if (use_slab) {
430 0 : new_array = kmalloc(new_size, GFP_KERNEL);
431 0 : addr = new_array ? __pa(new_array) : 0;
432 : } else {
433 : /* only exclude range when trying to double reserved.regions */
434 0 : if (type != &memblock.reserved)
435 0 : new_area_start = new_area_size = 0;
436 :
437 0 : addr = memblock_find_in_range(new_area_start + new_area_size,
438 : memblock.current_limit,
439 : new_alloc_size, PAGE_SIZE);
440 0 : if (!addr && new_area_size)
441 0 : addr = memblock_find_in_range(0,
442 0 : min(new_area_start, memblock.current_limit),
443 : new_alloc_size, PAGE_SIZE);
444 :
445 0 : new_array = addr ? __va(addr) : NULL;
446 : }
447 0 : if (!addr) {
448 0 : pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
449 : type->name, type->max, type->max * 2);
450 0 : return -1;
451 : }
452 :
453 0 : new_end = addr + new_size - 1;
454 0 : memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
455 : type->name, type->max * 2, &addr, &new_end);
456 :
457 : /*
458 : * Found space, we now need to move the array over before we add the
459 : * reserved region since it may be our reserved array itself that is
460 : * full.
461 : */
462 0 : memcpy(new_array, type->regions, old_size);
463 0 : memset(new_array + type->max, 0, old_size);
464 0 : old_array = type->regions;
465 0 : type->regions = new_array;
466 0 : type->max <<= 1;
467 :
468 : /* Free old array. We needn't free it if the array is the static one */
469 0 : if (*in_slab)
470 0 : kfree(old_array);
471 0 : else if (old_array != memblock_memory_init_regions &&
472 : old_array != memblock_reserved_init_regions)
473 0 : memblock_free(__pa(old_array), old_alloc_size);
474 :
475 : /*
476 : * Reserve the new array if that comes from the memblock. Otherwise, we
477 : * needn't do it
478 : */
479 0 : if (!use_slab)
480 0 : BUG_ON(memblock_reserve(addr, new_alloc_size));
481 :
482 : /* Update slab flag */
483 0 : *in_slab = use_slab;
484 :
485 0 : return 0;
486 : }
487 :
488 : /**
489 : * memblock_merge_regions - merge neighboring compatible regions
490 : * @type: memblock type to scan
491 : *
492 : * Scan @type and merge neighboring compatible regions.
493 : */
494 377 : static void __init_memblock memblock_merge_regions(struct memblock_type *type)
495 : {
496 377 : int i = 0;
497 :
498 : /* cnt never goes below 1 */
499 2764 : while (i < type->cnt - 1) {
500 2387 : struct memblock_region *this = &type->regions[i];
501 2387 : struct memblock_region *next = &type->regions[i + 1];
502 :
503 2387 : if (this->base + this->size != next->base ||
504 355 : memblock_get_region_node(this) !=
505 355 : memblock_get_region_node(next) ||
506 355 : this->flags != next->flags) {
507 2032 : BUG_ON(this->base + this->size > next->base);
508 2032 : i++;
509 2032 : continue;
510 : }
511 :
512 355 : this->size += next->size;
513 : /* move forward from next + 1, index of which is i + 2 */
514 355 : memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
515 355 : type->cnt--;
516 : }
517 377 : }
518 :
519 : /**
520 : * memblock_insert_region - insert new memblock region
521 : * @type: memblock type to insert into
522 : * @idx: index for the insertion point
523 : * @base: base address of the new region
524 : * @size: size of the new region
525 : * @nid: node id of the new region
526 : * @flags: flags of the new region
527 : *
528 : * Insert new memblock region [@base, @base + @size) into @type at @idx.
529 : * @type must already have extra room to accommodate the new region.
530 : */
531 381 : static void __init_memblock memblock_insert_region(struct memblock_type *type,
532 : int idx, phys_addr_t base,
533 : phys_addr_t size,
534 : int nid,
535 : enum memblock_flags flags)
536 : {
537 381 : struct memblock_region *rgn = &type->regions[idx];
538 :
539 381 : BUG_ON(type->cnt >= type->max);
540 381 : memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
541 381 : rgn->base = base;
542 381 : rgn->size = size;
543 381 : rgn->flags = flags;
544 381 : memblock_set_region_node(rgn, nid);
545 381 : type->cnt++;
546 381 : type->total_size += size;
547 381 : }
548 :
549 : /**
550 : * memblock_add_range - add new memblock region
551 : * @type: memblock type to add new region into
552 : * @base: base address of the new region
553 : * @size: size of the new region
554 : * @nid: nid of the new region
555 : * @flags: flags of the new region
556 : *
557 : * Add new memblock region [@base, @base + @size) into @type. The new region
558 : * is allowed to overlap with existing ones - overlaps don't affect already
559 : * existing regions. @type is guaranteed to be minimal (all neighbouring
560 : * compatible regions are merged) after the addition.
561 : *
562 : * Return:
563 : * 0 on success, -errno on failure.
564 : */
565 372 : static int __init_memblock memblock_add_range(struct memblock_type *type,
566 : phys_addr_t base, phys_addr_t size,
567 : int nid, enum memblock_flags flags)
568 : {
569 372 : bool insert = false;
570 372 : phys_addr_t obase = base;
571 372 : phys_addr_t end = base + memblock_cap_size(base, &size);
572 372 : int idx, nr_new;
573 372 : struct memblock_region *rgn;
574 :
575 372 : if (!size)
576 : return 0;
577 :
578 : /* special case for empty array */
579 372 : if (type->regions[0].size == 0) {
580 4 : WARN_ON(type->cnt != 1 || type->total_size);
581 2 : type->regions[0].base = base;
582 2 : type->regions[0].size = size;
583 2 : type->regions[0].flags = flags;
584 2 : memblock_set_region_node(&type->regions[0], nid);
585 2 : type->total_size = size;
586 2 : return 0;
587 : }
588 370 : repeat:
589 : /*
590 : * The following is executed twice. Once with %false @insert and
591 : * then with %true. The first counts the number of regions needed
592 : * to accommodate the new area. The second actually inserts them.
593 : */
594 740 : base = obase;
595 740 : nr_new = 0;
596 :
597 3644 : for_each_memblock_type(idx, type, rgn) {
598 3638 : phys_addr_t rbase = rgn->base;
599 3638 : phys_addr_t rend = rbase + rgn->size;
600 :
601 3638 : if (rbase >= end)
602 : break;
603 2904 : if (rend <= base)
604 2900 : continue;
605 : /*
606 : * @rgn overlaps. If it separates the lower part of new
607 : * area, insert that portion.
608 : */
609 4 : if (rbase > base) {
610 : #ifdef CONFIG_NEED_MULTIPLE_NODES
611 2 : WARN_ON(nid != memblock_get_region_node(rgn));
612 : #endif
613 2 : WARN_ON(flags != rgn->flags);
614 2 : nr_new++;
615 2 : if (insert)
616 1 : memblock_insert_region(type, idx++, base,
617 : rbase - base, nid,
618 : flags);
619 : }
620 : /* area below @rend is dealt with, forget about it */
621 4 : base = min(rend, end);
622 : }
623 :
624 : /* insert the remaining portion */
625 740 : if (base < end) {
626 740 : nr_new++;
627 740 : if (insert)
628 370 : memblock_insert_region(type, idx, base, end - base,
629 : nid, flags);
630 : }
631 :
632 740 : if (!nr_new)
633 : return 0;
634 :
635 : /*
636 : * If this was the first round, resize array and repeat for actual
637 : * insertions; otherwise, merge and return.
638 : */
639 740 : if (!insert) {
640 370 : while (type->cnt + nr_new > type->max)
641 0 : if (memblock_double_array(type, obase, size) < 0)
642 : return -ENOMEM;
643 370 : insert = true;
644 370 : goto repeat;
645 : } else {
646 370 : memblock_merge_regions(type);
647 370 : return 0;
648 : }
649 : }
650 :
651 : /**
652 : * memblock_add_node - add new memblock region within a NUMA node
653 : * @base: base address of the new region
654 : * @size: size of the new region
655 : * @nid: nid of the new region
656 : *
657 : * Add new memblock region [@base, @base + @size) to the "memory"
658 : * type. See memblock_add_range() description for mode details
659 : *
660 : * Return:
661 : * 0 on success, -errno on failure.
662 : */
663 0 : int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
664 : int nid)
665 : {
666 0 : return memblock_add_range(&memblock.memory, base, size, nid, 0);
667 : }
668 :
669 : /**
670 : * memblock_add - add new memblock region
671 : * @base: base address of the new region
672 : * @size: size of the new region
673 : *
674 : * Add new memblock region [@base, @base + @size) to the "memory"
675 : * type. See memblock_add_range() description for mode details
676 : *
677 : * Return:
678 : * 0 on success, -errno on failure.
679 : */
680 2 : int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
681 : {
682 2 : phys_addr_t end = base + size - 1;
683 :
684 2 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
685 : &base, &end, (void *)_RET_IP_);
686 :
687 2 : return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
688 : }
689 :
690 : /**
691 : * memblock_isolate_range - isolate given range into disjoint memblocks
692 : * @type: memblock type to isolate range for
693 : * @base: base of range to isolate
694 : * @size: size of range to isolate
695 : * @start_rgn: out parameter for the start of isolated region
696 : * @end_rgn: out parameter for the end of isolated region
697 : *
698 : * Walk @type and ensure that regions don't cross the boundaries defined by
699 : * [@base, @base + @size). Crossing regions are split at the boundaries,
700 : * which may create at most two more regions. The index of the first
701 : * region inside the range is returned in *@start_rgn and end in *@end_rgn.
702 : *
703 : * Return:
704 : * 0 on success, -errno on failure.
705 : */
706 13 : static int __init_memblock memblock_isolate_range(struct memblock_type *type,
707 : phys_addr_t base, phys_addr_t size,
708 : int *start_rgn, int *end_rgn)
709 : {
710 13 : phys_addr_t end = base + memblock_cap_size(base, &size);
711 13 : int idx;
712 13 : struct memblock_region *rgn;
713 :
714 13 : *start_rgn = *end_rgn = 0;
715 :
716 13 : if (!size)
717 : return 0;
718 :
719 : /* we'll create at most two more regions */
720 13 : while (type->cnt + 2 > type->max)
721 0 : if (memblock_double_array(type, base, size) < 0)
722 : return -ENOMEM;
723 :
724 79 : for_each_memblock_type(idx, type, rgn) {
725 72 : phys_addr_t rbase = rgn->base;
726 72 : phys_addr_t rend = rbase + rgn->size;
727 :
728 72 : if (rbase >= end)
729 : break;
730 66 : if (rend <= base)
731 34 : continue;
732 :
733 32 : if (rbase < base) {
734 : /*
735 : * @rgn intersects from below. Split and continue
736 : * to process the next region - the new top half.
737 : */
738 5 : rgn->base = base;
739 5 : rgn->size -= base - rbase;
740 5 : type->total_size -= base - rbase;
741 5 : memblock_insert_region(type, idx, rbase, base - rbase,
742 : memblock_get_region_node(rgn),
743 : rgn->flags);
744 27 : } else if (rend > end) {
745 : /*
746 : * @rgn intersects from above. Split and redo the
747 : * current region - the new bottom half.
748 : */
749 5 : rgn->base = end;
750 5 : rgn->size -= end - rbase;
751 5 : type->total_size -= end - rbase;
752 5 : memblock_insert_region(type, idx--, rbase, end - rbase,
753 : memblock_get_region_node(rgn),
754 : rgn->flags);
755 : } else {
756 : /* @rgn is fully contained, record it */
757 22 : if (!*end_rgn)
758 13 : *start_rgn = idx;
759 22 : *end_rgn = idx + 1;
760 : }
761 : }
762 :
763 : return 0;
764 : }
765 :
766 6 : static int __init_memblock memblock_remove_range(struct memblock_type *type,
767 : phys_addr_t base, phys_addr_t size)
768 : {
769 6 : int start_rgn, end_rgn;
770 6 : int i, ret;
771 :
772 6 : ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
773 6 : if (ret)
774 : return ret;
775 :
776 12 : for (i = end_rgn - 1; i >= start_rgn; i--)
777 6 : memblock_remove_region(type, i);
778 : return 0;
779 : }
780 :
781 0 : int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
782 : {
783 0 : phys_addr_t end = base + size - 1;
784 :
785 0 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
786 : &base, &end, (void *)_RET_IP_);
787 :
788 0 : return memblock_remove_range(&memblock.memory, base, size);
789 : }
790 :
791 : /**
792 : * memblock_free - free boot memory block
793 : * @base: phys starting address of the boot memory block
794 : * @size: size of the boot memory block in bytes
795 : *
796 : * Free boot memory block previously allocated by memblock_alloc_xx() API.
797 : * The freeing memory will not be released to the buddy allocator.
798 : */
799 6 : int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
800 : {
801 6 : phys_addr_t end = base + size - 1;
802 :
803 6 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
804 : &base, &end, (void *)_RET_IP_);
805 :
806 6 : kmemleak_free_part_phys(base, size);
807 6 : return memblock_remove_range(&memblock.reserved, base, size);
808 : }
809 :
810 370 : int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
811 : {
812 370 : phys_addr_t end = base + size - 1;
813 :
814 370 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
815 : &base, &end, (void *)_RET_IP_);
816 :
817 370 : return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
818 : }
819 :
820 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
821 : int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
822 : {
823 : phys_addr_t end = base + size - 1;
824 :
825 : memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
826 : &base, &end, (void *)_RET_IP_);
827 :
828 : return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
829 : }
830 : #endif
831 :
832 : /**
833 : * memblock_setclr_flag - set or clear flag for a memory region
834 : * @base: base address of the region
835 : * @size: size of the region
836 : * @set: set or clear the flag
837 : * @flag: the flag to update
838 : *
839 : * This function isolates region [@base, @base + @size), and sets/clears flag
840 : *
841 : * Return: 0 on success, -errno on failure.
842 : */
843 3 : static int __init_memblock memblock_setclr_flag(phys_addr_t base,
844 : phys_addr_t size, int set, int flag)
845 : {
846 3 : struct memblock_type *type = &memblock.memory;
847 3 : int i, ret, start_rgn, end_rgn;
848 :
849 3 : ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
850 3 : if (ret)
851 : return ret;
852 :
853 9 : for (i = start_rgn; i < end_rgn; i++) {
854 6 : struct memblock_region *r = &type->regions[i];
855 :
856 6 : if (set)
857 0 : r->flags |= flag;
858 : else
859 6 : r->flags &= ~flag;
860 : }
861 :
862 3 : memblock_merge_regions(type);
863 3 : return 0;
864 : }
865 :
866 : /**
867 : * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
868 : * @base: the base phys addr of the region
869 : * @size: the size of the region
870 : *
871 : * Return: 0 on success, -errno on failure.
872 : */
873 0 : int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
874 : {
875 0 : return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
876 : }
877 :
878 : /**
879 : * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
880 : * @base: the base phys addr of the region
881 : * @size: the size of the region
882 : *
883 : * Return: 0 on success, -errno on failure.
884 : */
885 3 : int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
886 : {
887 3 : return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
888 : }
889 :
890 : /**
891 : * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
892 : * @base: the base phys addr of the region
893 : * @size: the size of the region
894 : *
895 : * Return: 0 on success, -errno on failure.
896 : */
897 0 : int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
898 : {
899 0 : system_has_some_mirror = true;
900 :
901 0 : return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
902 : }
903 :
904 : /**
905 : * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
906 : * @base: the base phys addr of the region
907 : * @size: the size of the region
908 : *
909 : * Return: 0 on success, -errno on failure.
910 : */
911 0 : int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
912 : {
913 0 : return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
914 : }
915 :
916 : /**
917 : * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
918 : * @base: the base phys addr of the region
919 : * @size: the size of the region
920 : *
921 : * Return: 0 on success, -errno on failure.
922 : */
923 0 : int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
924 : {
925 0 : return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
926 : }
927 :
928 968 : static bool should_skip_region(struct memblock_type *type,
929 : struct memblock_region *m,
930 : int nid, int flags)
931 : {
932 968 : int m_nid = memblock_get_region_node(m);
933 :
934 : /* we never skip regions when iterating memblock.reserved or physmem */
935 968 : if (type != memblock_memory)
936 : return false;
937 :
938 : /* only memory regions are associated with nodes, check it */
939 948 : if (nid != NUMA_NO_NODE && nid != m_nid)
940 : return true;
941 :
942 : /* skip hotpluggable memory regions if needed */
943 948 : if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
944 : return true;
945 :
946 : /* if we want mirror memory skip non-mirror memory regions */
947 948 : if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
948 : return true;
949 :
950 : /* skip nomap memory unless we were asked for it explicitly */
951 948 : if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
952 0 : return true;
953 :
954 : return false;
955 : }
956 :
957 : /**
958 : * __next_mem_range - next function for for_each_free_mem_range() etc.
959 : * @idx: pointer to u64 loop variable
960 : * @nid: node selector, %NUMA_NO_NODE for all nodes
961 : * @flags: pick from blocks based on memory attributes
962 : * @type_a: pointer to memblock_type from where the range is taken
963 : * @type_b: pointer to memblock_type which excludes memory from being taken
964 : * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
965 : * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
966 : * @out_nid: ptr to int for nid of the range, can be %NULL
967 : *
968 : * Find the first area from *@idx which matches @nid, fill the out
969 : * parameters, and update *@idx for the next iteration. The lower 32bit of
970 : * *@idx contains index into type_a and the upper 32bit indexes the
971 : * areas before each region in type_b. For example, if type_b regions
972 : * look like the following,
973 : *
974 : * 0:[0-16), 1:[32-48), 2:[128-130)
975 : *
976 : * The upper 32bit indexes the following regions.
977 : *
978 : * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
979 : *
980 : * As both region arrays are sorted, the function advances the two indices
981 : * in lockstep and returns each intersection.
982 : */
983 45 : void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
984 : struct memblock_type *type_a,
985 : struct memblock_type *type_b, phys_addr_t *out_start,
986 : phys_addr_t *out_end, int *out_nid)
987 : {
988 45 : int idx_a = *idx & 0xffffffff;
989 45 : int idx_b = *idx >> 32;
990 :
991 45 : if (WARN_ONCE(nid == MAX_NUMNODES,
992 : "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
993 0 : nid = NUMA_NO_NODE;
994 :
995 49 : for (; idx_a < type_a->cnt; idx_a++) {
996 46 : struct memblock_region *m = &type_a->regions[idx_a];
997 :
998 46 : phys_addr_t m_start = m->base;
999 46 : phys_addr_t m_end = m->base + m->size;
1000 46 : int m_nid = memblock_get_region_node(m);
1001 :
1002 46 : if (should_skip_region(type_a, m, nid, flags))
1003 0 : continue;
1004 :
1005 46 : if (!type_b) {
1006 20 : if (out_start)
1007 20 : *out_start = m_start;
1008 20 : if (out_end)
1009 20 : *out_end = m_end;
1010 20 : if (out_nid)
1011 0 : *out_nid = m_nid;
1012 20 : idx_a++;
1013 20 : *idx = (u32)idx_a | (u64)idx_b << 32;
1014 20 : return;
1015 : }
1016 :
1017 : /* scan areas before each reservation */
1018 28 : for (; idx_b < type_b->cnt + 1; idx_b++) {
1019 28 : struct memblock_region *r;
1020 28 : phys_addr_t r_start;
1021 28 : phys_addr_t r_end;
1022 :
1023 28 : r = &type_b->regions[idx_b];
1024 28 : r_start = idx_b ? r[-1].base + r[-1].size : 0;
1025 56 : r_end = idx_b < type_b->cnt ?
1026 28 : r->base : PHYS_ADDR_MAX;
1027 :
1028 : /*
1029 : * if idx_b advanced past idx_a,
1030 : * break out to advance idx_a
1031 : */
1032 28 : if (r_start >= m_end)
1033 : break;
1034 : /* if the two regions intersect, we're done */
1035 24 : if (m_start < r_end) {
1036 22 : if (out_start)
1037 22 : *out_start =
1038 22 : max(m_start, r_start);
1039 22 : if (out_end)
1040 22 : *out_end = min(m_end, r_end);
1041 22 : if (out_nid)
1042 0 : *out_nid = m_nid;
1043 : /*
1044 : * The region which ends first is
1045 : * advanced for the next iteration.
1046 : */
1047 22 : if (m_end <= r_end)
1048 0 : idx_a++;
1049 : else
1050 22 : idx_b++;
1051 22 : *idx = (u32)idx_a | (u64)idx_b << 32;
1052 22 : return;
1053 : }
1054 : }
1055 : }
1056 :
1057 : /* signal end of iteration */
1058 3 : *idx = ULLONG_MAX;
1059 : }
1060 :
1061 : /**
1062 : * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1063 : *
1064 : * @idx: pointer to u64 loop variable
1065 : * @nid: node selector, %NUMA_NO_NODE for all nodes
1066 : * @flags: pick from blocks based on memory attributes
1067 : * @type_a: pointer to memblock_type from where the range is taken
1068 : * @type_b: pointer to memblock_type which excludes memory from being taken
1069 : * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1070 : * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1071 : * @out_nid: ptr to int for nid of the range, can be %NULL
1072 : *
1073 : * Finds the next range from type_a which is not marked as unsuitable
1074 : * in type_b.
1075 : *
1076 : * Reverse of __next_mem_range().
1077 : */
1078 922 : void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1079 : enum memblock_flags flags,
1080 : struct memblock_type *type_a,
1081 : struct memblock_type *type_b,
1082 : phys_addr_t *out_start,
1083 : phys_addr_t *out_end, int *out_nid)
1084 : {
1085 922 : int idx_a = *idx & 0xffffffff;
1086 922 : int idx_b = *idx >> 32;
1087 :
1088 922 : if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1089 0 : nid = NUMA_NO_NODE;
1090 :
1091 922 : if (*idx == (u64)ULLONG_MAX) {
1092 364 : idx_a = type_a->cnt - 1;
1093 364 : if (type_b != NULL)
1094 364 : idx_b = type_b->cnt;
1095 : else
1096 : idx_b = 0;
1097 : }
1098 :
1099 922 : for (; idx_a >= 0; idx_a--) {
1100 922 : struct memblock_region *m = &type_a->regions[idx_a];
1101 :
1102 922 : phys_addr_t m_start = m->base;
1103 922 : phys_addr_t m_end = m->base + m->size;
1104 922 : int m_nid = memblock_get_region_node(m);
1105 :
1106 922 : if (should_skip_region(type_a, m, nid, flags))
1107 0 : continue;
1108 :
1109 922 : if (!type_b) {
1110 0 : if (out_start)
1111 0 : *out_start = m_start;
1112 0 : if (out_end)
1113 0 : *out_end = m_end;
1114 0 : if (out_nid)
1115 0 : *out_nid = m_nid;
1116 0 : idx_a--;
1117 0 : *idx = (u32)idx_a | (u64)idx_b << 32;
1118 0 : return;
1119 : }
1120 :
1121 : /* scan areas before each reservation */
1122 1284 : for (; idx_b >= 0; idx_b--) {
1123 1284 : struct memblock_region *r;
1124 1284 : phys_addr_t r_start;
1125 1284 : phys_addr_t r_end;
1126 :
1127 1284 : r = &type_b->regions[idx_b];
1128 1284 : r_start = idx_b ? r[-1].base + r[-1].size : 0;
1129 2568 : r_end = idx_b < type_b->cnt ?
1130 1284 : r->base : PHYS_ADDR_MAX;
1131 : /*
1132 : * if idx_b advanced past idx_a,
1133 : * break out to advance idx_a
1134 : */
1135 :
1136 1284 : if (r_end <= m_start)
1137 : break;
1138 : /* if the two regions intersect, we're done */
1139 1284 : if (m_end > r_start) {
1140 922 : if (out_start)
1141 922 : *out_start = max(m_start, r_start);
1142 922 : if (out_end)
1143 922 : *out_end = min(m_end, r_end);
1144 922 : if (out_nid)
1145 0 : *out_nid = m_nid;
1146 922 : if (m_start >= r_start)
1147 2 : idx_a--;
1148 : else
1149 920 : idx_b--;
1150 922 : *idx = (u32)idx_a | (u64)idx_b << 32;
1151 922 : return;
1152 : }
1153 : }
1154 : }
1155 : /* signal end of iteration */
1156 0 : *idx = ULLONG_MAX;
1157 : }
1158 :
1159 : /*
1160 : * Common iterator interface used to define for_each_mem_pfn_range().
1161 : */
1162 42 : void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1163 : unsigned long *out_start_pfn,
1164 : unsigned long *out_end_pfn, int *out_nid)
1165 : {
1166 42 : struct memblock_type *type = &memblock.memory;
1167 42 : struct memblock_region *r;
1168 42 : int r_nid;
1169 :
1170 42 : while (++*idx < type->cnt) {
1171 28 : r = &type->regions[*idx];
1172 28 : r_nid = memblock_get_region_node(r);
1173 :
1174 28 : if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1175 0 : continue;
1176 28 : if (nid == MAX_NUMNODES || nid == r_nid)
1177 : break;
1178 : }
1179 42 : if (*idx >= type->cnt) {
1180 14 : *idx = -1;
1181 14 : return;
1182 : }
1183 :
1184 28 : if (out_start_pfn)
1185 28 : *out_start_pfn = PFN_UP(r->base);
1186 28 : if (out_end_pfn)
1187 28 : *out_end_pfn = PFN_DOWN(r->base + r->size);
1188 28 : if (out_nid)
1189 6 : *out_nid = r_nid;
1190 : }
1191 :
1192 : /**
1193 : * memblock_set_node - set node ID on memblock regions
1194 : * @base: base of area to set node ID for
1195 : * @size: size of area to set node ID for
1196 : * @type: memblock type to set node ID for
1197 : * @nid: node ID to set
1198 : *
1199 : * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1200 : * Regions which cross the area boundaries are split as necessary.
1201 : *
1202 : * Return:
1203 : * 0 on success, -errno on failure.
1204 : */
1205 4 : int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1206 : struct memblock_type *type, int nid)
1207 : {
1208 : #ifdef CONFIG_NEED_MULTIPLE_NODES
1209 4 : int start_rgn, end_rgn;
1210 4 : int i, ret;
1211 :
1212 4 : ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1213 4 : if (ret)
1214 : return ret;
1215 :
1216 14 : for (i = start_rgn; i < end_rgn; i++)
1217 10 : memblock_set_region_node(&type->regions[i], nid);
1218 :
1219 4 : memblock_merge_regions(type);
1220 : #endif
1221 4 : return 0;
1222 : }
1223 :
1224 : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1225 : /**
1226 : * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1227 : *
1228 : * @idx: pointer to u64 loop variable
1229 : * @zone: zone in which all of the memory blocks reside
1230 : * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1231 : * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1232 : *
1233 : * This function is meant to be a zone/pfn specific wrapper for the
1234 : * for_each_mem_range type iterators. Specifically they are used in the
1235 : * deferred memory init routines and as such we were duplicating much of
1236 : * this logic throughout the code. So instead of having it in multiple
1237 : * locations it seemed like it would make more sense to centralize this to
1238 : * one new iterator that does everything they need.
1239 : */
1240 : void __init_memblock
1241 : __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1242 : unsigned long *out_spfn, unsigned long *out_epfn)
1243 : {
1244 : int zone_nid = zone_to_nid(zone);
1245 : phys_addr_t spa, epa;
1246 : int nid;
1247 :
1248 : __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1249 : &memblock.memory, &memblock.reserved,
1250 : &spa, &epa, &nid);
1251 :
1252 : while (*idx != U64_MAX) {
1253 : unsigned long epfn = PFN_DOWN(epa);
1254 : unsigned long spfn = PFN_UP(spa);
1255 :
1256 : /*
1257 : * Verify the end is at least past the start of the zone and
1258 : * that we have at least one PFN to initialize.
1259 : */
1260 : if (zone->zone_start_pfn < epfn && spfn < epfn) {
1261 : /* if we went too far just stop searching */
1262 : if (zone_end_pfn(zone) <= spfn) {
1263 : *idx = U64_MAX;
1264 : break;
1265 : }
1266 :
1267 : if (out_spfn)
1268 : *out_spfn = max(zone->zone_start_pfn, spfn);
1269 : if (out_epfn)
1270 : *out_epfn = min(zone_end_pfn(zone), epfn);
1271 :
1272 : return;
1273 : }
1274 :
1275 : __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1276 : &memblock.memory, &memblock.reserved,
1277 : &spa, &epa, &nid);
1278 : }
1279 :
1280 : /* signal end of iteration */
1281 : if (out_spfn)
1282 : *out_spfn = ULONG_MAX;
1283 : if (out_epfn)
1284 : *out_epfn = 0;
1285 : }
1286 :
1287 : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1288 :
1289 : /**
1290 : * memblock_alloc_range_nid - allocate boot memory block
1291 : * @size: size of memory block to be allocated in bytes
1292 : * @align: alignment of the region and block's size
1293 : * @start: the lower bound of the memory region to allocate (phys address)
1294 : * @end: the upper bound of the memory region to allocate (phys address)
1295 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1296 : * @exact_nid: control the allocation fall back to other nodes
1297 : *
1298 : * The allocation is performed from memory region limited by
1299 : * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1300 : *
1301 : * If the specified node can not hold the requested memory and @exact_nid
1302 : * is false, the allocation falls back to any node in the system.
1303 : *
1304 : * For systems with memory mirroring, the allocation is attempted first
1305 : * from the regions with mirroring enabled and then retried from any
1306 : * memory region.
1307 : *
1308 : * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1309 : * allocated boot memory block, so that it is never reported as leaks.
1310 : *
1311 : * Return:
1312 : * Physical address of allocated memory block on success, %0 on failure.
1313 : */
1314 362 : phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1315 : phys_addr_t align, phys_addr_t start,
1316 : phys_addr_t end, int nid,
1317 : bool exact_nid)
1318 : {
1319 362 : enum memblock_flags flags = choose_memblock_flags();
1320 362 : phys_addr_t found;
1321 :
1322 362 : if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1323 0 : nid = NUMA_NO_NODE;
1324 :
1325 362 : if (!align) {
1326 : /* Can't use WARNs this early in boot on powerpc */
1327 0 : dump_stack();
1328 0 : align = SMP_CACHE_BYTES;
1329 : }
1330 :
1331 362 : again:
1332 362 : found = memblock_find_in_range_node(size, align, start, end, nid,
1333 : flags);
1334 362 : if (found && !memblock_reserve(found, size))
1335 362 : goto done;
1336 :
1337 0 : if (nid != NUMA_NO_NODE && !exact_nid) {
1338 0 : found = memblock_find_in_range_node(size, align, start,
1339 : end, NUMA_NO_NODE,
1340 : flags);
1341 0 : if (found && !memblock_reserve(found, size))
1342 0 : goto done;
1343 : }
1344 :
1345 0 : if (flags & MEMBLOCK_MIRROR) {
1346 0 : flags &= ~MEMBLOCK_MIRROR;
1347 0 : pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1348 : &size);
1349 0 : goto again;
1350 : }
1351 :
1352 : return 0;
1353 :
1354 362 : done:
1355 : /* Skip kmemleak for kasan_init() due to high volume. */
1356 362 : if (end != MEMBLOCK_ALLOC_KASAN)
1357 : /*
1358 : * The min_count is set to 0 so that memblock allocated
1359 : * blocks are never reported as leaks. This is because many
1360 : * of these blocks are only referred via the physical
1361 : * address which is not looked up by kmemleak.
1362 : */
1363 362 : kmemleak_alloc_phys(found, size, 0, 0);
1364 :
1365 : return found;
1366 : }
1367 :
1368 : /**
1369 : * memblock_phys_alloc_range - allocate a memory block inside specified range
1370 : * @size: size of memory block to be allocated in bytes
1371 : * @align: alignment of the region and block's size
1372 : * @start: the lower bound of the memory region to allocate (physical address)
1373 : * @end: the upper bound of the memory region to allocate (physical address)
1374 : *
1375 : * Allocate @size bytes in the between @start and @end.
1376 : *
1377 : * Return: physical address of the allocated memory block on success,
1378 : * %0 on failure.
1379 : */
1380 0 : phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1381 : phys_addr_t align,
1382 : phys_addr_t start,
1383 : phys_addr_t end)
1384 : {
1385 0 : memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1386 : __func__, (u64)size, (u64)align, &start, &end,
1387 : (void *)_RET_IP_);
1388 0 : return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1389 : false);
1390 : }
1391 :
1392 : /**
1393 : * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1394 : * @size: size of memory block to be allocated in bytes
1395 : * @align: alignment of the region and block's size
1396 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1397 : *
1398 : * Allocates memory block from the specified NUMA node. If the node
1399 : * has no available memory, attempts to allocated from any node in the
1400 : * system.
1401 : *
1402 : * Return: physical address of the allocated memory block on success,
1403 : * %0 on failure.
1404 : */
1405 1 : phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1406 : {
1407 1 : return memblock_alloc_range_nid(size, align, 0,
1408 : MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1409 : }
1410 :
1411 : /**
1412 : * memblock_alloc_internal - allocate boot memory block
1413 : * @size: size of memory block to be allocated in bytes
1414 : * @align: alignment of the region and block's size
1415 : * @min_addr: the lower bound of the memory region to allocate (phys address)
1416 : * @max_addr: the upper bound of the memory region to allocate (phys address)
1417 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1418 : * @exact_nid: control the allocation fall back to other nodes
1419 : *
1420 : * Allocates memory block using memblock_alloc_range_nid() and
1421 : * converts the returned physical address to virtual.
1422 : *
1423 : * The @min_addr limit is dropped if it can not be satisfied and the allocation
1424 : * will fall back to memory below @min_addr. Other constraints, such
1425 : * as node and mirrored memory will be handled again in
1426 : * memblock_alloc_range_nid().
1427 : *
1428 : * Return:
1429 : * Virtual address of allocated memory block on success, NULL on failure.
1430 : */
1431 361 : static void * __init memblock_alloc_internal(
1432 : phys_addr_t size, phys_addr_t align,
1433 : phys_addr_t min_addr, phys_addr_t max_addr,
1434 : int nid, bool exact_nid)
1435 : {
1436 361 : phys_addr_t alloc;
1437 :
1438 : /*
1439 : * Detect any accidental use of these APIs after slab is ready, as at
1440 : * this moment memblock may be deinitialized already and its
1441 : * internal data may be destroyed (after execution of memblock_free_all)
1442 : */
1443 361 : if (WARN_ON_ONCE(slab_is_available()))
1444 0 : return kzalloc_node(size, GFP_NOWAIT, nid);
1445 :
1446 361 : if (max_addr > memblock.current_limit)
1447 : max_addr = memblock.current_limit;
1448 :
1449 361 : alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1450 : exact_nid);
1451 :
1452 : /* retry allocation without lower limit */
1453 361 : if (!alloc && min_addr)
1454 0 : alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1455 : exact_nid);
1456 :
1457 361 : if (!alloc)
1458 : return NULL;
1459 :
1460 361 : return phys_to_virt(alloc);
1461 : }
1462 :
1463 : /**
1464 : * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1465 : * without zeroing memory
1466 : * @size: size of memory block to be allocated in bytes
1467 : * @align: alignment of the region and block's size
1468 : * @min_addr: the lower bound of the memory region from where the allocation
1469 : * is preferred (phys address)
1470 : * @max_addr: the upper bound of the memory region from where the allocation
1471 : * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1472 : * allocate only from memory limited by memblock.current_limit value
1473 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1474 : *
1475 : * Public function, provides additional debug information (including caller
1476 : * info), if enabled. Does not zero allocated memory.
1477 : *
1478 : * Return:
1479 : * Virtual address of allocated memory block on success, NULL on failure.
1480 : */
1481 1 : void * __init memblock_alloc_exact_nid_raw(
1482 : phys_addr_t size, phys_addr_t align,
1483 : phys_addr_t min_addr, phys_addr_t max_addr,
1484 : int nid)
1485 : {
1486 1 : void *ptr;
1487 :
1488 1 : memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1489 : __func__, (u64)size, (u64)align, nid, &min_addr,
1490 : &max_addr, (void *)_RET_IP_);
1491 :
1492 1 : ptr = memblock_alloc_internal(size, align,
1493 : min_addr, max_addr, nid, true);
1494 1 : if (ptr && size > 0)
1495 1 : page_init_poison(ptr, size);
1496 :
1497 1 : return ptr;
1498 : }
1499 :
1500 : /**
1501 : * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1502 : * memory and without panicking
1503 : * @size: size of memory block to be allocated in bytes
1504 : * @align: alignment of the region and block's size
1505 : * @min_addr: the lower bound of the memory region from where the allocation
1506 : * is preferred (phys address)
1507 : * @max_addr: the upper bound of the memory region from where the allocation
1508 : * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1509 : * allocate only from memory limited by memblock.current_limit value
1510 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1511 : *
1512 : * Public function, provides additional debug information (including caller
1513 : * info), if enabled. Does not zero allocated memory, does not panic if request
1514 : * cannot be satisfied.
1515 : *
1516 : * Return:
1517 : * Virtual address of allocated memory block on success, NULL on failure.
1518 : */
1519 0 : void * __init memblock_alloc_try_nid_raw(
1520 : phys_addr_t size, phys_addr_t align,
1521 : phys_addr_t min_addr, phys_addr_t max_addr,
1522 : int nid)
1523 : {
1524 0 : void *ptr;
1525 :
1526 0 : memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1527 : __func__, (u64)size, (u64)align, nid, &min_addr,
1528 : &max_addr, (void *)_RET_IP_);
1529 :
1530 0 : ptr = memblock_alloc_internal(size, align,
1531 : min_addr, max_addr, nid, false);
1532 0 : if (ptr && size > 0)
1533 0 : page_init_poison(ptr, size);
1534 :
1535 0 : return ptr;
1536 : }
1537 :
1538 : /**
1539 : * memblock_alloc_try_nid - allocate boot memory block
1540 : * @size: size of memory block to be allocated in bytes
1541 : * @align: alignment of the region and block's size
1542 : * @min_addr: the lower bound of the memory region from where the allocation
1543 : * is preferred (phys address)
1544 : * @max_addr: the upper bound of the memory region from where the allocation
1545 : * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1546 : * allocate only from memory limited by memblock.current_limit value
1547 : * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1548 : *
1549 : * Public function, provides additional debug information (including caller
1550 : * info), if enabled. This function zeroes the allocated memory.
1551 : *
1552 : * Return:
1553 : * Virtual address of allocated memory block on success, NULL on failure.
1554 : */
1555 360 : void * __init memblock_alloc_try_nid(
1556 : phys_addr_t size, phys_addr_t align,
1557 : phys_addr_t min_addr, phys_addr_t max_addr,
1558 : int nid)
1559 : {
1560 360 : void *ptr;
1561 :
1562 360 : memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1563 : __func__, (u64)size, (u64)align, nid, &min_addr,
1564 : &max_addr, (void *)_RET_IP_);
1565 360 : ptr = memblock_alloc_internal(size, align,
1566 : min_addr, max_addr, nid, false);
1567 360 : if (ptr)
1568 360 : memset(ptr, 0, size);
1569 :
1570 360 : return ptr;
1571 : }
1572 :
1573 : /**
1574 : * __memblock_free_late - free pages directly to buddy allocator
1575 : * @base: phys starting address of the boot memory block
1576 : * @size: size of the boot memory block in bytes
1577 : *
1578 : * This is only useful when the memblock allocator has already been torn
1579 : * down, but we are still initializing the system. Pages are released directly
1580 : * to the buddy allocator.
1581 : */
1582 0 : void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1583 : {
1584 0 : phys_addr_t cursor, end;
1585 :
1586 0 : end = base + size - 1;
1587 0 : memblock_dbg("%s: [%pa-%pa] %pS\n",
1588 : __func__, &base, &end, (void *)_RET_IP_);
1589 0 : kmemleak_free_part_phys(base, size);
1590 0 : cursor = PFN_UP(base);
1591 0 : end = PFN_DOWN(base + size);
1592 :
1593 0 : for (; cursor < end; cursor++) {
1594 0 : memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1595 0 : totalram_pages_inc();
1596 : }
1597 0 : }
1598 :
1599 : /*
1600 : * Remaining API functions
1601 : */
1602 :
1603 0 : phys_addr_t __init_memblock memblock_phys_mem_size(void)
1604 : {
1605 0 : return memblock.memory.total_size;
1606 : }
1607 :
1608 0 : phys_addr_t __init_memblock memblock_reserved_size(void)
1609 : {
1610 0 : return memblock.reserved.total_size;
1611 : }
1612 :
1613 : /* lowest address */
1614 1 : phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1615 : {
1616 1 : return memblock.memory.regions[0].base;
1617 : }
1618 :
1619 0 : phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1620 : {
1621 0 : int idx = memblock.memory.cnt - 1;
1622 :
1623 0 : return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1624 : }
1625 :
1626 0 : static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1627 : {
1628 0 : phys_addr_t max_addr = PHYS_ADDR_MAX;
1629 0 : struct memblock_region *r;
1630 :
1631 : /*
1632 : * translate the memory @limit size into the max address within one of
1633 : * the memory memblock regions, if the @limit exceeds the total size
1634 : * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1635 : */
1636 0 : for_each_mem_region(r) {
1637 0 : if (limit <= r->size) {
1638 0 : max_addr = r->base + limit;
1639 0 : break;
1640 : }
1641 0 : limit -= r->size;
1642 : }
1643 :
1644 0 : return max_addr;
1645 : }
1646 :
1647 0 : void __init memblock_enforce_memory_limit(phys_addr_t limit)
1648 : {
1649 0 : phys_addr_t max_addr;
1650 :
1651 0 : if (!limit)
1652 : return;
1653 :
1654 0 : max_addr = __find_max_addr(limit);
1655 :
1656 : /* @limit exceeds the total size of the memory, do nothing */
1657 0 : if (max_addr == PHYS_ADDR_MAX)
1658 : return;
1659 :
1660 : /* truncate both memory and reserved regions */
1661 0 : memblock_remove_range(&memblock.memory, max_addr,
1662 : PHYS_ADDR_MAX);
1663 0 : memblock_remove_range(&memblock.reserved, max_addr,
1664 : PHYS_ADDR_MAX);
1665 : }
1666 :
1667 0 : void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1668 : {
1669 0 : int start_rgn, end_rgn;
1670 0 : int i, ret;
1671 :
1672 0 : if (!size)
1673 0 : return;
1674 :
1675 0 : ret = memblock_isolate_range(&memblock.memory, base, size,
1676 : &start_rgn, &end_rgn);
1677 0 : if (ret)
1678 : return;
1679 :
1680 : /* remove all the MAP regions */
1681 0 : for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1682 0 : if (!memblock_is_nomap(&memblock.memory.regions[i]))
1683 0 : memblock_remove_region(&memblock.memory, i);
1684 :
1685 0 : for (i = start_rgn - 1; i >= 0; i--)
1686 0 : if (!memblock_is_nomap(&memblock.memory.regions[i]))
1687 0 : memblock_remove_region(&memblock.memory, i);
1688 :
1689 : /* truncate the reserved regions */
1690 0 : memblock_remove_range(&memblock.reserved, 0, base);
1691 0 : memblock_remove_range(&memblock.reserved,
1692 : base + size, PHYS_ADDR_MAX);
1693 : }
1694 :
1695 0 : void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1696 : {
1697 0 : phys_addr_t max_addr;
1698 :
1699 0 : if (!limit)
1700 : return;
1701 :
1702 0 : max_addr = __find_max_addr(limit);
1703 :
1704 : /* @limit exceeds the total size of the memory, do nothing */
1705 0 : if (max_addr == PHYS_ADDR_MAX)
1706 : return;
1707 :
1708 0 : memblock_cap_memory_range(0, max_addr);
1709 : }
1710 :
1711 10 : static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1712 : {
1713 10 : unsigned int left = 0, right = type->cnt;
1714 :
1715 18 : do {
1716 18 : unsigned int mid = (right + left) / 2;
1717 :
1718 18 : if (addr < type->regions[mid].base)
1719 : right = mid;
1720 2 : else if (addr >= (type->regions[mid].base +
1721 2 : type->regions[mid].size))
1722 1 : left = mid + 1;
1723 : else
1724 1 : return mid;
1725 17 : } while (left < right);
1726 : return -1;
1727 : }
1728 :
1729 0 : bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1730 : {
1731 0 : return memblock_search(&memblock.reserved, addr) != -1;
1732 : }
1733 :
1734 0 : bool __init_memblock memblock_is_memory(phys_addr_t addr)
1735 : {
1736 0 : return memblock_search(&memblock.memory, addr) != -1;
1737 : }
1738 :
1739 0 : bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1740 : {
1741 0 : int i = memblock_search(&memblock.memory, addr);
1742 :
1743 0 : if (i == -1)
1744 : return false;
1745 0 : return !memblock_is_nomap(&memblock.memory.regions[i]);
1746 : }
1747 :
1748 3 : int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1749 : unsigned long *start_pfn, unsigned long *end_pfn)
1750 : {
1751 3 : struct memblock_type *type = &memblock.memory;
1752 3 : int mid = memblock_search(type, PFN_PHYS(pfn));
1753 :
1754 3 : if (mid == -1)
1755 : return -1;
1756 :
1757 1 : *start_pfn = PFN_DOWN(type->regions[mid].base);
1758 1 : *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1759 :
1760 1 : return memblock_get_region_node(&type->regions[mid]);
1761 : }
1762 :
1763 : /**
1764 : * memblock_is_region_memory - check if a region is a subset of memory
1765 : * @base: base of region to check
1766 : * @size: size of region to check
1767 : *
1768 : * Check if the region [@base, @base + @size) is a subset of a memory block.
1769 : *
1770 : * Return:
1771 : * 0 if false, non-zero if true
1772 : */
1773 7 : bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1774 : {
1775 7 : int idx = memblock_search(&memblock.memory, base);
1776 7 : phys_addr_t end = base + memblock_cap_size(base, &size);
1777 :
1778 7 : if (idx == -1)
1779 : return false;
1780 0 : return (memblock.memory.regions[idx].base +
1781 0 : memblock.memory.regions[idx].size) >= end;
1782 : }
1783 :
1784 : /**
1785 : * memblock_is_region_reserved - check if a region intersects reserved memory
1786 : * @base: base of region to check
1787 : * @size: size of region to check
1788 : *
1789 : * Check if the region [@base, @base + @size) intersects a reserved
1790 : * memory block.
1791 : *
1792 : * Return:
1793 : * True if they intersect, false if not.
1794 : */
1795 0 : bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1796 : {
1797 0 : memblock_cap_size(base, &size);
1798 0 : return memblock_overlaps_region(&memblock.reserved, base, size);
1799 : }
1800 :
1801 1 : void __init_memblock memblock_trim_memory(phys_addr_t align)
1802 : {
1803 1 : phys_addr_t start, end, orig_start, orig_end;
1804 1 : struct memblock_region *r;
1805 :
1806 3 : for_each_mem_region(r) {
1807 2 : orig_start = r->base;
1808 2 : orig_end = r->base + r->size;
1809 2 : start = round_up(orig_start, align);
1810 2 : end = round_down(orig_end, align);
1811 :
1812 2 : if (start == orig_start && end == orig_end)
1813 1 : continue;
1814 :
1815 1 : if (start < end) {
1816 1 : r->base = start;
1817 1 : r->size = end - start;
1818 : } else {
1819 0 : memblock_remove_region(&memblock.memory,
1820 0 : r - memblock.memory.regions);
1821 0 : r--;
1822 : }
1823 : }
1824 1 : }
1825 :
1826 2 : void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1827 : {
1828 2 : memblock.current_limit = limit;
1829 2 : }
1830 :
1831 0 : phys_addr_t __init_memblock memblock_get_current_limit(void)
1832 : {
1833 0 : return memblock.current_limit;
1834 : }
1835 :
1836 0 : static void __init_memblock memblock_dump(struct memblock_type *type)
1837 : {
1838 0 : phys_addr_t base, end, size;
1839 0 : enum memblock_flags flags;
1840 0 : int idx;
1841 0 : struct memblock_region *rgn;
1842 :
1843 0 : pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1844 :
1845 0 : for_each_memblock_type(idx, type, rgn) {
1846 0 : char nid_buf[32] = "";
1847 :
1848 0 : base = rgn->base;
1849 0 : size = rgn->size;
1850 0 : end = base + size - 1;
1851 0 : flags = rgn->flags;
1852 : #ifdef CONFIG_NEED_MULTIPLE_NODES
1853 0 : if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1854 0 : snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1855 : memblock_get_region_node(rgn));
1856 : #endif
1857 0 : pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1858 : type->name, idx, &base, &end, &size, nid_buf, flags);
1859 : }
1860 0 : }
1861 :
1862 0 : static void __init_memblock __memblock_dump_all(void)
1863 : {
1864 0 : pr_info("MEMBLOCK configuration:\n");
1865 0 : pr_info(" memory size = %pa reserved size = %pa\n",
1866 : &memblock.memory.total_size,
1867 : &memblock.reserved.total_size);
1868 :
1869 0 : memblock_dump(&memblock.memory);
1870 0 : memblock_dump(&memblock.reserved);
1871 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1872 : memblock_dump(&physmem);
1873 : #endif
1874 0 : }
1875 :
1876 2 : void __init_memblock memblock_dump_all(void)
1877 : {
1878 2 : if (memblock_debug)
1879 0 : __memblock_dump_all();
1880 2 : }
1881 :
1882 1 : void __init memblock_allow_resize(void)
1883 : {
1884 1 : memblock_can_resize = 1;
1885 1 : }
1886 :
1887 0 : static int __init early_memblock(char *p)
1888 : {
1889 0 : if (p && strstr(p, "debug"))
1890 0 : memblock_debug = 1;
1891 0 : return 0;
1892 : }
1893 : early_param("memblock", early_memblock);
1894 :
1895 : static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1896 : {
1897 : struct page *start_pg, *end_pg;
1898 : phys_addr_t pg, pgend;
1899 :
1900 : /*
1901 : * Convert start_pfn/end_pfn to a struct page pointer.
1902 : */
1903 : start_pg = pfn_to_page(start_pfn - 1) + 1;
1904 : end_pg = pfn_to_page(end_pfn - 1) + 1;
1905 :
1906 : /*
1907 : * Convert to physical addresses, and round start upwards and end
1908 : * downwards.
1909 : */
1910 : pg = PAGE_ALIGN(__pa(start_pg));
1911 : pgend = __pa(end_pg) & PAGE_MASK;
1912 :
1913 : /*
1914 : * If there are free pages between these, free the section of the
1915 : * memmap array.
1916 : */
1917 : if (pg < pgend)
1918 : memblock_free(pg, pgend - pg);
1919 : }
1920 :
1921 : /*
1922 : * The mem_map array can get very big. Free the unused area of the memory map.
1923 : */
1924 1 : static void __init free_unused_memmap(void)
1925 : {
1926 1 : unsigned long start, end, prev_end = 0;
1927 1 : int i;
1928 :
1929 1 : if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1930 : IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1931 1 : return;
1932 :
1933 : /*
1934 : * This relies on each bank being in address order.
1935 : * The banks are sorted previously in bootmem_init().
1936 : */
1937 : for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1938 : #ifdef CONFIG_SPARSEMEM
1939 : /*
1940 : * Take care not to free memmap entries that don't exist
1941 : * due to SPARSEMEM sections which aren't present.
1942 : */
1943 : start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1944 : #else
1945 : /*
1946 : * Align down here since the VM subsystem insists that the
1947 : * memmap entries are valid from the bank start aligned to
1948 : * MAX_ORDER_NR_PAGES.
1949 : */
1950 : start = round_down(start, MAX_ORDER_NR_PAGES);
1951 : #endif
1952 :
1953 : /*
1954 : * If we had a previous bank, and there is a space
1955 : * between the current bank and the previous, free it.
1956 : */
1957 : if (prev_end && prev_end < start)
1958 : free_memmap(prev_end, start);
1959 :
1960 : /*
1961 : * Align up here since the VM subsystem insists that the
1962 : * memmap entries are valid from the bank end aligned to
1963 : * MAX_ORDER_NR_PAGES.
1964 : */
1965 : prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
1966 : }
1967 :
1968 : #ifdef CONFIG_SPARSEMEM
1969 : if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
1970 : free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1971 : #endif
1972 : }
1973 :
1974 9 : static void __init __free_pages_memory(unsigned long start, unsigned long end)
1975 : {
1976 9 : int order;
1977 :
1978 242 : while (start < end) {
1979 233 : order = min(MAX_ORDER - 1UL, __ffs(start));
1980 :
1981 251 : while (start + (1UL << order) > end)
1982 18 : order--;
1983 :
1984 233 : memblock_free_pages(pfn_to_page(start), start, order);
1985 :
1986 233 : start += (1UL << order);
1987 : }
1988 9 : }
1989 :
1990 19 : static unsigned long __init __free_memory_core(phys_addr_t start,
1991 : phys_addr_t end)
1992 : {
1993 19 : unsigned long start_pfn = PFN_UP(start);
1994 19 : unsigned long end_pfn = min_t(unsigned long,
1995 : PFN_DOWN(end), max_low_pfn);
1996 :
1997 19 : if (start_pfn >= end_pfn)
1998 : return 0;
1999 :
2000 9 : __free_pages_memory(start_pfn, end_pfn);
2001 :
2002 9 : return end_pfn - start_pfn;
2003 : }
2004 :
2005 1 : static unsigned long __init free_low_memory_core_early(void)
2006 : {
2007 1 : unsigned long count = 0;
2008 1 : phys_addr_t start, end;
2009 1 : u64 i;
2010 :
2011 1 : memblock_clear_hotplug(0, -1);
2012 :
2013 21 : for_each_reserved_mem_range(i, &start, &end)
2014 20 : reserve_bootmem_region(start, end);
2015 :
2016 : /*
2017 : * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2018 : * because in some case like Node0 doesn't have RAM installed
2019 : * low ram will be on Node1
2020 : */
2021 20 : for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2022 : NULL)
2023 19 : count += __free_memory_core(start, end);
2024 :
2025 1 : return count;
2026 : }
2027 :
2028 : static int reset_managed_pages_done __initdata;
2029 :
2030 1 : void reset_node_managed_pages(pg_data_t *pgdat)
2031 : {
2032 1 : struct zone *z;
2033 :
2034 4 : for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2035 3 : atomic_long_set(&z->managed_pages, 0);
2036 1 : }
2037 :
2038 1 : void __init reset_all_zones_managed_pages(void)
2039 : {
2040 1 : struct pglist_data *pgdat;
2041 :
2042 1 : if (reset_managed_pages_done)
2043 : return;
2044 :
2045 2 : for_each_online_pgdat(pgdat)
2046 1 : reset_node_managed_pages(pgdat);
2047 :
2048 1 : reset_managed_pages_done = 1;
2049 : }
2050 :
2051 : /**
2052 : * memblock_free_all - release free pages to the buddy allocator
2053 : */
2054 1 : void __init memblock_free_all(void)
2055 : {
2056 1 : unsigned long pages;
2057 :
2058 1 : free_unused_memmap();
2059 1 : reset_all_zones_managed_pages();
2060 :
2061 1 : pages = free_low_memory_core_early();
2062 1 : totalram_pages_add(pages);
2063 1 : }
2064 :
2065 : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2066 :
2067 : static int memblock_debug_show(struct seq_file *m, void *private)
2068 : {
2069 : struct memblock_type *type = m->private;
2070 : struct memblock_region *reg;
2071 : int i;
2072 : phys_addr_t end;
2073 :
2074 : for (i = 0; i < type->cnt; i++) {
2075 : reg = &type->regions[i];
2076 : end = reg->base + reg->size - 1;
2077 :
2078 : seq_printf(m, "%4d: ", i);
2079 : seq_printf(m, "%pa..%pa\n", ®->base, &end);
2080 : }
2081 : return 0;
2082 : }
2083 : DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2084 :
2085 : static int __init memblock_init_debugfs(void)
2086 : {
2087 : struct dentry *root = debugfs_create_dir("memblock", NULL);
2088 :
2089 : debugfs_create_file("memory", 0444, root,
2090 : &memblock.memory, &memblock_debug_fops);
2091 : debugfs_create_file("reserved", 0444, root,
2092 : &memblock.reserved, &memblock_debug_fops);
2093 : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2094 : debugfs_create_file("physmem", 0444, root, &physmem,
2095 : &memblock_debug_fops);
2096 : #endif
2097 :
2098 : return 0;
2099 : }
2100 : __initcall(memblock_init_debugfs);
2101 :
2102 : #endif /* CONFIG_DEBUG_FS */
|