Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_VMALLOC_H
3 : #define _LINUX_VMALLOC_H
4 :
5 : #include <linux/spinlock.h>
6 : #include <linux/init.h>
7 : #include <linux/list.h>
8 : #include <linux/llist.h>
9 : #include <asm/page.h> /* pgprot_t */
10 : #include <linux/rbtree.h>
11 : #include <linux/overflow.h>
12 :
13 : #include <asm/vmalloc.h>
14 :
15 : struct vm_area_struct; /* vma defining user mapping in mm_types.h */
16 : struct notifier_block; /* in notifier.h */
17 :
18 : /* bits in flags of vmalloc's vm_struct below */
19 : #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
20 : #define VM_ALLOC 0x00000002 /* vmalloc() */
21 : #define VM_MAP 0x00000004 /* vmap()ed pages */
22 : #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
23 : #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
24 : #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
25 : #define VM_NO_GUARD 0x00000040 /* don't add guard page */
26 : #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
27 : #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
28 : #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
29 :
30 : /*
31 : * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
32 : *
33 : * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
34 : * shadow memory has been mapped. It's used to handle allocation errors so that
35 : * we don't try to poision shadow on free if it was never allocated.
36 : *
37 : * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
38 : * determine which allocations need the module shadow freed.
39 : */
40 :
41 : /* bits [20..32] reserved for arch specific ioremap internals */
42 :
43 : /*
44 : * Maximum alignment for ioremap() regions.
45 : * Can be overriden by arch-specific value.
46 : */
47 : #ifndef IOREMAP_MAX_ORDER
48 : #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
49 : #endif
50 :
51 : struct vm_struct {
52 : struct vm_struct *next;
53 : void *addr;
54 : unsigned long size;
55 : unsigned long flags;
56 : struct page **pages;
57 : unsigned int nr_pages;
58 : phys_addr_t phys_addr;
59 : const void *caller;
60 : };
61 :
62 : struct vmap_area {
63 : unsigned long va_start;
64 : unsigned long va_end;
65 :
66 : struct rb_node rb_node; /* address sorted rbtree */
67 : struct list_head list; /* address sorted list */
68 :
69 : /*
70 : * The following two variables can be packed, because
71 : * a vmap_area object can be either:
72 : * 1) in "free" tree (root is vmap_area_root)
73 : * 2) or "busy" tree (root is free_vmap_area_root)
74 : */
75 : union {
76 : unsigned long subtree_max_size; /* in "free" tree */
77 : struct vm_struct *vm; /* in "busy" tree */
78 : };
79 : };
80 :
81 : /*
82 : * Highlevel APIs for driver use
83 : */
84 : extern void vm_unmap_ram(const void *mem, unsigned int count);
85 : extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
86 : extern void vm_unmap_aliases(void);
87 :
88 : #ifdef CONFIG_MMU
89 : extern void __init vmalloc_init(void);
90 : extern unsigned long vmalloc_nr_pages(void);
91 : #else
92 : static inline void vmalloc_init(void)
93 : {
94 : }
95 : static inline unsigned long vmalloc_nr_pages(void) { return 0; }
96 : #endif
97 :
98 : extern void *vmalloc(unsigned long size);
99 : extern void *vzalloc(unsigned long size);
100 : extern void *vmalloc_user(unsigned long size);
101 : extern void *vmalloc_node(unsigned long size, int node);
102 : extern void *vzalloc_node(unsigned long size, int node);
103 : extern void *vmalloc_32(unsigned long size);
104 : extern void *vmalloc_32_user(unsigned long size);
105 : extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
106 : extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
107 : unsigned long start, unsigned long end, gfp_t gfp_mask,
108 : pgprot_t prot, unsigned long vm_flags, int node,
109 : const void *caller);
110 : void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
111 : int node, const void *caller);
112 :
113 : extern void vfree(const void *addr);
114 : extern void vfree_atomic(const void *addr);
115 :
116 : extern void *vmap(struct page **pages, unsigned int count,
117 : unsigned long flags, pgprot_t prot);
118 : void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
119 : extern void vunmap(const void *addr);
120 :
121 : extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
122 : unsigned long uaddr, void *kaddr,
123 : unsigned long pgoff, unsigned long size);
124 :
125 : extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
126 : unsigned long pgoff);
127 :
128 : /*
129 : * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
130 : * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
131 : * needs to be called.
132 : */
133 : #ifndef ARCH_PAGE_TABLE_SYNC_MASK
134 : #define ARCH_PAGE_TABLE_SYNC_MASK 0
135 : #endif
136 :
137 : /*
138 : * There is no default implementation for arch_sync_kernel_mappings(). It is
139 : * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
140 : * is 0.
141 : */
142 : void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
143 :
144 : /*
145 : * Lowlevel-APIs (not for driver use!)
146 : */
147 :
148 53007 : static inline size_t get_vm_area_size(const struct vm_struct *area)
149 : {
150 53007 : if (!(area->flags & VM_NO_GUARD))
151 : /* return actual size without guard page */
152 53007 : return area->size - PAGE_SIZE;
153 : else
154 0 : return area->size;
155 :
156 : }
157 :
158 : extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
159 : extern struct vm_struct *get_vm_area_caller(unsigned long size,
160 : unsigned long flags, const void *caller);
161 : extern struct vm_struct *__get_vm_area_caller(unsigned long size,
162 : unsigned long flags,
163 : unsigned long start, unsigned long end,
164 : const void *caller);
165 : void free_vm_area(struct vm_struct *area);
166 : extern struct vm_struct *remove_vm_area(const void *addr);
167 : extern struct vm_struct *find_vm_area(const void *addr);
168 :
169 : #ifdef CONFIG_MMU
170 : extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
171 : pgprot_t prot, struct page **pages);
172 : int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
173 : struct page **pages);
174 : extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
175 : extern void unmap_kernel_range(unsigned long addr, unsigned long size);
176 7 : static inline void set_vm_flush_reset_perms(void *addr)
177 : {
178 7 : struct vm_struct *vm = find_vm_area(addr);
179 :
180 7 : if (vm)
181 7 : vm->flags |= VM_FLUSH_RESET_PERMS;
182 7 : }
183 : #else
184 : static inline int
185 : map_kernel_range_noflush(unsigned long start, unsigned long size,
186 : pgprot_t prot, struct page **pages)
187 : {
188 : return size >> PAGE_SHIFT;
189 : }
190 : #define map_kernel_range map_kernel_range_noflush
191 : static inline void
192 : unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
193 : {
194 : }
195 : #define unmap_kernel_range unmap_kernel_range_noflush
196 : static inline void set_vm_flush_reset_perms(void *addr)
197 : {
198 : }
199 : #endif
200 :
201 : /* for /dev/kmem */
202 : extern long vread(char *buf, char *addr, unsigned long count);
203 : extern long vwrite(char *buf, char *addr, unsigned long count);
204 :
205 : /*
206 : * Internals. Dont't use..
207 : */
208 : extern struct list_head vmap_area_list;
209 : extern __init void vm_area_add_early(struct vm_struct *vm);
210 : extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
211 :
212 : #ifdef CONFIG_SMP
213 : # ifdef CONFIG_MMU
214 : struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
215 : const size_t *sizes, int nr_vms,
216 : size_t align);
217 :
218 : void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
219 : # else
220 : static inline struct vm_struct **
221 : pcpu_get_vm_areas(const unsigned long *offsets,
222 : const size_t *sizes, int nr_vms,
223 : size_t align)
224 : {
225 : return NULL;
226 : }
227 :
228 : static inline void
229 : pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
230 : {
231 : }
232 : # endif
233 : #endif
234 :
235 : #ifdef CONFIG_MMU
236 : #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
237 : #else
238 : #define VMALLOC_TOTAL 0UL
239 : #endif
240 :
241 : int register_vmap_purge_notifier(struct notifier_block *nb);
242 : int unregister_vmap_purge_notifier(struct notifier_block *nb);
243 :
244 : #ifdef CONFIG_MMU
245 : bool vmalloc_dump_obj(void *object);
246 : #else
247 : static inline bool vmalloc_dump_obj(void *object) { return false; }
248 : #endif
249 :
250 : #endif /* _LINUX_VMALLOC_H */
|