LCOV - code coverage report
Current view: top level - include/linux - memblock.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 19 37 51.4 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : #ifndef _LINUX_MEMBLOCK_H
       3             : #define _LINUX_MEMBLOCK_H
       4             : #ifdef __KERNEL__
       5             : 
       6             : /*
       7             :  * Logical memory blocks.
       8             :  *
       9             :  * Copyright (C) 2001 Peter Bergner, IBM Corp.
      10             :  */
      11             : 
      12             : #include <linux/init.h>
      13             : #include <linux/mm.h>
      14             : #include <asm/dma.h>
      15             : 
      16             : extern unsigned long max_low_pfn;
      17             : extern unsigned long min_low_pfn;
      18             : 
      19             : /*
      20             :  * highest page
      21             :  */
      22             : extern unsigned long max_pfn;
      23             : /*
      24             :  * highest possible page
      25             :  */
      26             : extern unsigned long long max_possible_pfn;
      27             : 
      28             : /**
      29             :  * enum memblock_flags - definition of memory region attributes
      30             :  * @MEMBLOCK_NONE: no special request
      31             :  * @MEMBLOCK_HOTPLUG: hotpluggable region
      32             :  * @MEMBLOCK_MIRROR: mirrored region
      33             :  * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
      34             :  */
      35             : enum memblock_flags {
      36             :         MEMBLOCK_NONE           = 0x0,  /* No special request */
      37             :         MEMBLOCK_HOTPLUG        = 0x1,  /* hotpluggable region */
      38             :         MEMBLOCK_MIRROR         = 0x2,  /* mirrored region */
      39             :         MEMBLOCK_NOMAP          = 0x4,  /* don't add to kernel direct mapping */
      40             : };
      41             : 
      42             : /**
      43             :  * struct memblock_region - represents a memory region
      44             :  * @base: base address of the region
      45             :  * @size: size of the region
      46             :  * @flags: memory region attributes
      47             :  * @nid: NUMA node id
      48             :  */
      49             : struct memblock_region {
      50             :         phys_addr_t base;
      51             :         phys_addr_t size;
      52             :         enum memblock_flags flags;
      53             : #ifdef CONFIG_NEED_MULTIPLE_NODES
      54             :         int nid;
      55             : #endif
      56             : };
      57             : 
      58             : /**
      59             :  * struct memblock_type - collection of memory regions of certain type
      60             :  * @cnt: number of regions
      61             :  * @max: size of the allocated array
      62             :  * @total_size: size of all regions
      63             :  * @regions: array of regions
      64             :  * @name: the memory type symbolic name
      65             :  */
      66             : struct memblock_type {
      67             :         unsigned long cnt;
      68             :         unsigned long max;
      69             :         phys_addr_t total_size;
      70             :         struct memblock_region *regions;
      71             :         char *name;
      72             : };
      73             : 
      74             : /**
      75             :  * struct memblock - memblock allocator metadata
      76             :  * @bottom_up: is bottom up direction?
      77             :  * @current_limit: physical address of the current allocation limit
      78             :  * @memory: usable memory regions
      79             :  * @reserved: reserved memory regions
      80             :  */
      81             : struct memblock {
      82             :         bool bottom_up;  /* is bottom up direction? */
      83             :         phys_addr_t current_limit;
      84             :         struct memblock_type memory;
      85             :         struct memblock_type reserved;
      86             : };
      87             : 
      88             : extern struct memblock memblock;
      89             : 
      90             : #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
      91             : #define __init_memblock __meminit
      92             : #define __initdata_memblock __meminitdata
      93             : void memblock_discard(void);
      94             : #else
      95             : #define __init_memblock
      96             : #define __initdata_memblock
      97             : static inline void memblock_discard(void) {}
      98             : #endif
      99             : 
     100             : phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
     101             :                                    phys_addr_t size, phys_addr_t align);
     102             : void memblock_allow_resize(void);
     103             : int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
     104             : int memblock_add(phys_addr_t base, phys_addr_t size);
     105             : int memblock_remove(phys_addr_t base, phys_addr_t size);
     106             : int memblock_free(phys_addr_t base, phys_addr_t size);
     107             : int memblock_reserve(phys_addr_t base, phys_addr_t size);
     108             : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
     109             : int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
     110             : #endif
     111             : void memblock_trim_memory(phys_addr_t align);
     112             : bool memblock_overlaps_region(struct memblock_type *type,
     113             :                               phys_addr_t base, phys_addr_t size);
     114             : int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
     115             : int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
     116             : int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
     117             : int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
     118             : int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
     119             : 
     120             : void memblock_free_all(void);
     121             : void reset_node_managed_pages(pg_data_t *pgdat);
     122             : void reset_all_zones_managed_pages(void);
     123             : 
     124             : /* Low level functions */
     125             : void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
     126             :                       struct memblock_type *type_a,
     127             :                       struct memblock_type *type_b, phys_addr_t *out_start,
     128             :                       phys_addr_t *out_end, int *out_nid);
     129             : 
     130             : void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
     131             :                           struct memblock_type *type_a,
     132             :                           struct memblock_type *type_b, phys_addr_t *out_start,
     133             :                           phys_addr_t *out_end, int *out_nid);
     134             : 
     135             : void __memblock_free_late(phys_addr_t base, phys_addr_t size);
     136             : 
     137             : #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
     138             : static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
     139             :                                         phys_addr_t *out_start,
     140             :                                         phys_addr_t *out_end)
     141             : {
     142             :         extern struct memblock_type physmem;
     143             : 
     144             :         __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
     145             :                          out_start, out_end, NULL);
     146             : }
     147             : 
     148             : /**
     149             :  * for_each_physmem_range - iterate through physmem areas not included in type.
     150             :  * @i: u64 used as loop variable
     151             :  * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
     152             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     153             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     154             :  */
     155             : #define for_each_physmem_range(i, type, p_start, p_end)                 \
     156             :         for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
     157             :              i != (u64)ULLONG_MAX;                                      \
     158             :              __next_physmem_range(&i, type, p_start, p_end))
     159             : #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
     160             : 
     161             : /**
     162             :  * __for_each_mem_range - iterate through memblock areas from type_a and not
     163             :  * included in type_b. Or just type_a if type_b is NULL.
     164             :  * @i: u64 used as loop variable
     165             :  * @type_a: ptr to memblock_type to iterate
     166             :  * @type_b: ptr to memblock_type which excludes from the iteration
     167             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     168             :  * @flags: pick from blocks based on memory attributes
     169             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     170             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     171             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     172             :  */
     173             : #define __for_each_mem_range(i, type_a, type_b, nid, flags,             \
     174             :                            p_start, p_end, p_nid)                       \
     175             :         for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,        \
     176             :                                      p_start, p_end, p_nid);            \
     177             :              i != (u64)ULLONG_MAX;                                      \
     178             :              __next_mem_range(&i, nid, flags, type_a, type_b,               \
     179             :                               p_start, p_end, p_nid))
     180             : 
     181             : /**
     182             :  * __for_each_mem_range_rev - reverse iterate through memblock areas from
     183             :  * type_a and not included in type_b. Or just type_a if type_b is NULL.
     184             :  * @i: u64 used as loop variable
     185             :  * @type_a: ptr to memblock_type to iterate
     186             :  * @type_b: ptr to memblock_type which excludes from the iteration
     187             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     188             :  * @flags: pick from blocks based on memory attributes
     189             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     190             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     191             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     192             :  */
     193             : #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags,         \
     194             :                                  p_start, p_end, p_nid)                 \
     195             :         for (i = (u64)ULLONG_MAX,                                       \
     196             :                      __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
     197             :                                           p_start, p_end, p_nid);       \
     198             :              i != (u64)ULLONG_MAX;                                      \
     199             :              __next_mem_range_rev(&i, nid, flags, type_a, type_b,   \
     200             :                                   p_start, p_end, p_nid))
     201             : 
     202             : /**
     203             :  * for_each_mem_range - iterate through memory areas.
     204             :  * @i: u64 used as loop variable
     205             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     206             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     207             :  */
     208             : #define for_each_mem_range(i, p_start, p_end) \
     209             :         __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,       \
     210             :                              MEMBLOCK_NONE, p_start, p_end, NULL)
     211             : 
     212             : /**
     213             :  * for_each_mem_range_rev - reverse iterate through memblock areas from
     214             :  * type_a and not included in type_b. Or just type_a if type_b is NULL.
     215             :  * @i: u64 used as loop variable
     216             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     217             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     218             :  */
     219             : #define for_each_mem_range_rev(i, p_start, p_end)                       \
     220             :         __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
     221             :                                  MEMBLOCK_NONE, p_start, p_end, NULL)
     222             : 
     223             : /**
     224             :  * for_each_reserved_mem_range - iterate over all reserved memblock areas
     225             :  * @i: u64 used as loop variable
     226             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     227             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     228             :  *
     229             :  * Walks over reserved areas of memblock. Available as soon as memblock
     230             :  * is initialized.
     231             :  */
     232             : #define for_each_reserved_mem_range(i, p_start, p_end)                  \
     233             :         __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE,     \
     234             :                              MEMBLOCK_NONE, p_start, p_end, NULL)
     235             : 
     236             : static inline bool memblock_is_hotpluggable(struct memblock_region *m)
     237             : {
     238             :         return m->flags & MEMBLOCK_HOTPLUG;
     239             : }
     240             : 
     241           0 : static inline bool memblock_is_mirror(struct memblock_region *m)
     242             : {
     243           0 :         return m->flags & MEMBLOCK_MIRROR;
     244             : }
     245             : 
     246         948 : static inline bool memblock_is_nomap(struct memblock_region *m)
     247             : {
     248         948 :         return m->flags & MEMBLOCK_NOMAP;
     249             : }
     250             : 
     251             : int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
     252             :                             unsigned long  *end_pfn);
     253             : void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
     254             :                           unsigned long *out_end_pfn, int *out_nid);
     255             : 
     256             : /**
     257             :  * for_each_mem_pfn_range - early memory pfn range iterator
     258             :  * @i: an integer used as loop variable
     259             :  * @nid: node selector, %MAX_NUMNODES for all nodes
     260             :  * @p_start: ptr to ulong for start pfn of the range, can be %NULL
     261             :  * @p_end: ptr to ulong for end pfn of the range, can be %NULL
     262             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     263             :  *
     264             :  * Walks over configured memory ranges.
     265             :  */
     266             : #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)           \
     267             :         for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
     268             :              i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
     269             : 
     270             : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
     271             : void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
     272             :                                   unsigned long *out_spfn,
     273             :                                   unsigned long *out_epfn);
     274             : /**
     275             :  * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
     276             :  * memblock areas
     277             :  * @i: u64 used as loop variable
     278             :  * @zone: zone in which all of the memory blocks reside
     279             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     280             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     281             :  *
     282             :  * Walks over free (memory && !reserved) areas of memblock in a specific
     283             :  * zone. Available once memblock and an empty zone is initialized. The main
     284             :  * assumption is that the zone start, end, and pgdat have been associated.
     285             :  * This way we can use the zone to determine NUMA node, and if a given part
     286             :  * of the memblock is valid for the zone.
     287             :  */
     288             : #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end)    \
     289             :         for (i = 0,                                                     \
     290             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end);        \
     291             :              i != U64_MAX;                                      \
     292             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
     293             : 
     294             : /**
     295             :  * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
     296             :  * free memblock areas from a given point
     297             :  * @i: u64 used as loop variable
     298             :  * @zone: zone in which all of the memory blocks reside
     299             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     300             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     301             :  *
     302             :  * Walks over free (memory && !reserved) areas of memblock in a specific
     303             :  * zone, continuing from current position. Available as soon as memblock is
     304             :  * initialized.
     305             :  */
     306             : #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
     307             :         for (; i != U64_MAX;                                      \
     308             :              __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
     309             : 
     310             : int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
     311             : 
     312             : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
     313             : 
     314             : /**
     315             :  * for_each_free_mem_range - iterate through free memblock areas
     316             :  * @i: u64 used as loop variable
     317             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     318             :  * @flags: pick from blocks based on memory attributes
     319             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     320             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     321             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     322             :  *
     323             :  * Walks over free (memory && !reserved) areas of memblock.  Available as
     324             :  * soon as memblock is initialized.
     325             :  */
     326             : #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)   \
     327             :         __for_each_mem_range(i, &memblock.memory, &memblock.reserved,   \
     328             :                              nid, flags, p_start, p_end, p_nid)
     329             : 
     330             : /**
     331             :  * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
     332             :  * @i: u64 used as loop variable
     333             :  * @nid: node selector, %NUMA_NO_NODE for all nodes
     334             :  * @flags: pick from blocks based on memory attributes
     335             :  * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
     336             :  * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
     337             :  * @p_nid: ptr to int for nid of the range, can be %NULL
     338             :  *
     339             :  * Walks over free (memory && !reserved) areas of memblock in reverse
     340             :  * order.  Available as soon as memblock is initialized.
     341             :  */
     342             : #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,  \
     343             :                                         p_nid)                          \
     344             :         __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
     345             :                                  nid, flags, p_start, p_end, p_nid)
     346             : 
     347             : int memblock_set_node(phys_addr_t base, phys_addr_t size,
     348             :                       struct memblock_type *type, int nid);
     349             : 
     350             : #ifdef CONFIG_NEED_MULTIPLE_NODES
     351         393 : static inline void memblock_set_region_node(struct memblock_region *r, int nid)
     352             : {
     353          12 :         r->nid = nid;
     354           0 : }
     355             : 
     356        2335 : static inline int memblock_get_region_node(const struct memblock_region *r)
     357             : {
     358        2335 :         return r->nid;
     359             : }
     360             : #else
     361             : static inline void memblock_set_region_node(struct memblock_region *r, int nid)
     362             : {
     363             : }
     364             : 
     365             : static inline int memblock_get_region_node(const struct memblock_region *r)
     366             : {
     367             :         return 0;
     368             : }
     369             : #endif /* CONFIG_NEED_MULTIPLE_NODES */
     370             : 
     371             : /* Flags for memblock allocation APIs */
     372             : #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
     373             : #define MEMBLOCK_ALLOC_ACCESSIBLE       0
     374             : #define MEMBLOCK_ALLOC_KASAN            1
     375             : 
     376             : /* We are using top down, so it is safe to use 0 here */
     377             : #define MEMBLOCK_LOW_LIMIT 0
     378             : 
     379             : #ifndef ARCH_LOW_ADDRESS_LIMIT
     380             : #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
     381             : #endif
     382             : 
     383             : phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
     384             :                                       phys_addr_t start, phys_addr_t end);
     385             : phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
     386             :                                       phys_addr_t align, phys_addr_t start,
     387             :                                       phys_addr_t end, int nid, bool exact_nid);
     388             : phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
     389             : 
     390           0 : static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
     391             :                                               phys_addr_t align)
     392             : {
     393           0 :         return memblock_phys_alloc_range(size, align, 0,
     394             :                                          MEMBLOCK_ALLOC_ACCESSIBLE);
     395             : }
     396             : 
     397             : void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
     398             :                                  phys_addr_t min_addr, phys_addr_t max_addr,
     399             :                                  int nid);
     400             : void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
     401             :                                  phys_addr_t min_addr, phys_addr_t max_addr,
     402             :                                  int nid);
     403             : void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
     404             :                              phys_addr_t min_addr, phys_addr_t max_addr,
     405             :                              int nid);
     406             : 
     407          23 : static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
     408             : {
     409          23 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     410             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
     411             : }
     412             : 
     413           0 : static inline void *memblock_alloc_raw(phys_addr_t size,
     414             :                                                phys_addr_t align)
     415             : {
     416           0 :         return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
     417             :                                           MEMBLOCK_ALLOC_ACCESSIBLE,
     418             :                                           NUMA_NO_NODE);
     419             : }
     420             : 
     421           0 : static inline void *memblock_alloc_from(phys_addr_t size,
     422             :                                                 phys_addr_t align,
     423             :                                                 phys_addr_t min_addr)
     424             : {
     425           0 :         return memblock_alloc_try_nid(size, align, min_addr,
     426             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
     427             : }
     428             : 
     429           0 : static inline void *memblock_alloc_low(phys_addr_t size,
     430             :                                                phys_addr_t align)
     431             : {
     432           0 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     433             :                                       ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
     434             : }
     435             : 
     436           2 : static inline void *memblock_alloc_node(phys_addr_t size,
     437             :                                                 phys_addr_t align, int nid)
     438             : {
     439           2 :         return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
     440             :                                       MEMBLOCK_ALLOC_ACCESSIBLE, nid);
     441             : }
     442             : 
     443           2 : static inline void memblock_free_early(phys_addr_t base,
     444             :                                               phys_addr_t size)
     445             : {
     446           2 :         memblock_free(base, size);
     447           1 : }
     448             : 
     449             : static inline void memblock_free_early_nid(phys_addr_t base,
     450             :                                                   phys_addr_t size, int nid)
     451             : {
     452             :         memblock_free(base, size);
     453             : }
     454             : 
     455           0 : static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
     456             : {
     457           0 :         __memblock_free_late(base, size);
     458           0 : }
     459             : 
     460             : /*
     461             :  * Set the allocation direction to bottom-up or top-down.
     462             :  */
     463           1 : static inline __init void memblock_set_bottom_up(bool enable)
     464             : {
     465           1 :         memblock.bottom_up = enable;
     466             : }
     467             : 
     468             : /*
     469             :  * Check if the allocation direction is bottom-up or not.
     470             :  * if this is true, that said, memblock will allocate memory
     471             :  * in bottom-up direction.
     472             :  */
     473         365 : static inline __init bool memblock_bottom_up(void)
     474             : {
     475         365 :         return memblock.bottom_up;
     476             : }
     477             : 
     478             : phys_addr_t memblock_phys_mem_size(void);
     479             : phys_addr_t memblock_reserved_size(void);
     480             : phys_addr_t memblock_start_of_DRAM(void);
     481             : phys_addr_t memblock_end_of_DRAM(void);
     482             : void memblock_enforce_memory_limit(phys_addr_t memory_limit);
     483             : void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
     484             : void memblock_mem_limit_remove_map(phys_addr_t limit);
     485             : bool memblock_is_memory(phys_addr_t addr);
     486             : bool memblock_is_map_memory(phys_addr_t addr);
     487             : bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
     488             : bool memblock_is_reserved(phys_addr_t addr);
     489             : bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
     490             : 
     491             : void memblock_dump_all(void);
     492             : 
     493             : /**
     494             :  * memblock_set_current_limit - Set the current allocation limit to allow
     495             :  *                         limiting allocations to what is currently
     496             :  *                         accessible during boot
     497             :  * @limit: New limit value (physical address)
     498             :  */
     499             : void memblock_set_current_limit(phys_addr_t limit);
     500             : 
     501             : 
     502             : phys_addr_t memblock_get_current_limit(void);
     503             : 
     504             : /*
     505             :  * pfn conversion functions
     506             :  *
     507             :  * While the memory MEMBLOCKs should always be page aligned, the reserved
     508             :  * MEMBLOCKs may not be. This accessor attempt to provide a very clear
     509             :  * idea of what they return for such non aligned MEMBLOCKs.
     510             :  */
     511             : 
     512             : /**
     513             :  * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
     514             :  * @reg: memblock_region structure
     515             :  *
     516             :  * Return: the lowest pfn intersecting with the memory region
     517             :  */
     518           0 : static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
     519             : {
     520           0 :         return PFN_UP(reg->base);
     521             : }
     522             : 
     523             : /**
     524             :  * memblock_region_memory_end_pfn - get the end pfn of the memory region
     525             :  * @reg: memblock_region structure
     526             :  *
     527             :  * Return: the end_pfn of the reserved region
     528             :  */
     529           0 : static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
     530             : {
     531           0 :         return PFN_DOWN(reg->base + reg->size);
     532             : }
     533             : 
     534             : /**
     535             :  * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
     536             :  * @reg: memblock_region structure
     537             :  *
     538             :  * Return: the lowest pfn intersecting with the reserved region
     539             :  */
     540             : static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
     541             : {
     542             :         return PFN_DOWN(reg->base);
     543             : }
     544             : 
     545             : /**
     546             :  * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
     547             :  * @reg: memblock_region structure
     548             :  *
     549             :  * Return: the end_pfn of the reserved region
     550             :  */
     551             : static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
     552             : {
     553             :         return PFN_UP(reg->base + reg->size);
     554             : }
     555             : 
     556             : /**
     557             :  * for_each_mem_region - itereate over memory regions
     558             :  * @region: loop variable
     559             :  */
     560             : #define for_each_mem_region(region)                                     \
     561             :         for (region = memblock.memory.regions;                          \
     562             :              region < (memblock.memory.regions + memblock.memory.cnt);       \
     563             :              region++)
     564             : 
     565             : /**
     566             :  * for_each_reserved_mem_region - itereate over reserved memory regions
     567             :  * @region: loop variable
     568             :  */
     569             : #define for_each_reserved_mem_region(region)                            \
     570             :         for (region = memblock.reserved.regions;                        \
     571             :              region < (memblock.reserved.regions + memblock.reserved.cnt); \
     572             :              region++)
     573             : 
     574             : extern void *alloc_large_system_hash(const char *tablename,
     575             :                                      unsigned long bucketsize,
     576             :                                      unsigned long numentries,
     577             :                                      int scale,
     578             :                                      int flags,
     579             :                                      unsigned int *_hash_shift,
     580             :                                      unsigned int *_hash_mask,
     581             :                                      unsigned long low_limit,
     582             :                                      unsigned long high_limit);
     583             : 
     584             : #define HASH_EARLY      0x00000001      /* Allocating during early boot? */
     585             : #define HASH_SMALL      0x00000002      /* sub-page allocation allowed, min
     586             :                                          * shift passed via *_hash_shift */
     587             : #define HASH_ZERO       0x00000004      /* Zero allocated hash table */
     588             : 
     589             : /* Only NUMA needs hash distribution. 64bit NUMA architectures have
     590             :  * sufficient vmalloc space.
     591             :  */
     592             : #ifdef CONFIG_NUMA
     593             : #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
     594             : extern int hashdist;            /* Distribute hashes across NUMA nodes? */
     595             : #else
     596             : #define hashdist (0)
     597             : #endif
     598             : 
     599             : #ifdef CONFIG_MEMTEST
     600             : extern void early_memtest(phys_addr_t start, phys_addr_t end);
     601             : #else
     602           1 : static inline void early_memtest(phys_addr_t start, phys_addr_t end)
     603             : {
     604           1 : }
     605             : #endif
     606             : 
     607             : #endif /* __KERNEL__ */
     608             : 
     609             : #endif /* _LINUX_MEMBLOCK_H */

Generated by: LCOV version 1.14