Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_RMAP_H
3 : #define _LINUX_RMAP_H
4 : /*
5 : * Declarations for Reverse Mapping functions in mm/rmap.c
6 : */
7 :
8 : #include <linux/list.h>
9 : #include <linux/slab.h>
10 : #include <linux/mm.h>
11 : #include <linux/rwsem.h>
12 : #include <linux/memcontrol.h>
13 : #include <linux/highmem.h>
14 :
15 : /*
16 : * The anon_vma heads a list of private "related" vmas, to scan if
17 : * an anonymous page pointing to this anon_vma needs to be unmapped:
18 : * the vmas on the list will be related by forking, or by splitting.
19 : *
20 : * Since vmas come and go as they are split and merged (particularly
21 : * in mprotect), the mapping field of an anonymous page cannot point
22 : * directly to a vma: instead it points to an anon_vma, on whose list
23 : * the related vmas can be easily linked or unlinked.
24 : *
25 : * After unlinking the last vma on the list, we must garbage collect
26 : * the anon_vma object itself: we're guaranteed no page can be
27 : * pointing to this anon_vma once its vma list is empty.
28 : */
29 : struct anon_vma {
30 : struct anon_vma *root; /* Root of this anon_vma tree */
31 : struct rw_semaphore rwsem; /* W: modification, R: walking the list */
32 : /*
33 : * The refcount is taken on an anon_vma when there is no
34 : * guarantee that the vma of page tables will exist for
35 : * the duration of the operation. A caller that takes
36 : * the reference is responsible for clearing up the
37 : * anon_vma if they are the last user on release
38 : */
39 : atomic_t refcount;
40 :
41 : /*
42 : * Count of child anon_vmas and VMAs which points to this anon_vma.
43 : *
44 : * This counter is used for making decision about reusing anon_vma
45 : * instead of forking new one. See comments in function anon_vma_clone.
46 : */
47 : unsigned degree;
48 :
49 : struct anon_vma *parent; /* Parent of this anon_vma */
50 :
51 : /*
52 : * NOTE: the LSB of the rb_root.rb_node is set by
53 : * mm_take_all_locks() _after_ taking the above lock. So the
54 : * rb_root must only be read/written after taking the above lock
55 : * to be sure to see a valid next pointer. The LSB bit itself
56 : * is serialized by a system wide lock only visible to
57 : * mm_take_all_locks() (mm_all_locks_mutex).
58 : */
59 :
60 : /* Interval tree of private "related" vmas */
61 : struct rb_root_cached rb_root;
62 : };
63 :
64 : /*
65 : * The copy-on-write semantics of fork mean that an anon_vma
66 : * can become associated with multiple processes. Furthermore,
67 : * each child process will have its own anon_vma, where new
68 : * pages for that process are instantiated.
69 : *
70 : * This structure allows us to find the anon_vmas associated
71 : * with a VMA, or the VMAs associated with an anon_vma.
72 : * The "same_vma" list contains the anon_vma_chains linking
73 : * all the anon_vmas associated with this VMA.
74 : * The "rb" field indexes on an interval tree the anon_vma_chains
75 : * which link all the VMAs associated with this anon_vma.
76 : */
77 : struct anon_vma_chain {
78 : struct vm_area_struct *vma;
79 : struct anon_vma *anon_vma;
80 : struct list_head same_vma; /* locked by mmap_lock & page_table_lock */
81 : struct rb_node rb; /* locked by anon_vma->rwsem */
82 : unsigned long rb_subtree_last;
83 : #ifdef CONFIG_DEBUG_VM_RB
84 : unsigned long cached_vma_start, cached_vma_last;
85 : #endif
86 : };
87 :
88 : enum ttu_flags {
89 : TTU_MIGRATION = 0x1, /* migration mode */
90 : TTU_MUNLOCK = 0x2, /* munlock mode */
91 :
92 : TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
93 : TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
94 : TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
95 : TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
96 : * and caller guarantees they will
97 : * do a final flush if necessary */
98 : TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
99 : * caller holds it */
100 : TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
101 : };
102 :
103 : #ifdef CONFIG_MMU
104 27251 : static inline void get_anon_vma(struct anon_vma *anon_vma)
105 : {
106 27251 : atomic_inc(&anon_vma->refcount);
107 27251 : }
108 :
109 : void __put_anon_vma(struct anon_vma *anon_vma);
110 :
111 39936 : static inline void put_anon_vma(struct anon_vma *anon_vma)
112 : {
113 79872 : if (atomic_dec_and_test(&anon_vma->refcount))
114 39936 : __put_anon_vma(anon_vma);
115 39936 : }
116 :
117 51192 : static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
118 : {
119 51192 : down_write(&anon_vma->root->rwsem);
120 0 : }
121 :
122 51192 : static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
123 : {
124 51192 : up_write(&anon_vma->root->rwsem);
125 9403 : }
126 :
127 0 : static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
128 : {
129 0 : down_read(&anon_vma->root->rwsem);
130 : }
131 :
132 0 : static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
133 : {
134 0 : up_read(&anon_vma->root->rwsem);
135 0 : }
136 :
137 :
138 : /*
139 : * anon_vma helper functions.
140 : */
141 : void anon_vma_init(void); /* create anon_vma_cachep */
142 : int __anon_vma_prepare(struct vm_area_struct *);
143 : void unlink_anon_vmas(struct vm_area_struct *);
144 : int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
145 : int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
146 :
147 69425 : static inline int anon_vma_prepare(struct vm_area_struct *vma)
148 : {
149 69425 : if (likely(vma->anon_vma))
150 : return 0;
151 :
152 13350 : return __anon_vma_prepare(vma);
153 : }
154 :
155 0 : static inline void anon_vma_merge(struct vm_area_struct *vma,
156 : struct vm_area_struct *next)
157 : {
158 0 : VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
159 0 : unlink_anon_vmas(next);
160 0 : }
161 :
162 : struct anon_vma *page_get_anon_vma(struct page *page);
163 :
164 : /* bitflags for do_page_add_anon_rmap() */
165 : #define RMAP_EXCLUSIVE 0x01
166 : #define RMAP_COMPOUND 0x02
167 :
168 : /*
169 : * rmap interfaces called when adding or removing pte of page
170 : */
171 : void page_move_anon_rmap(struct page *, struct vm_area_struct *);
172 : void page_add_anon_rmap(struct page *, struct vm_area_struct *,
173 : unsigned long, bool);
174 : void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
175 : unsigned long, int);
176 : void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
177 : unsigned long, bool);
178 : void page_add_file_rmap(struct page *, bool);
179 : void page_remove_rmap(struct page *, bool);
180 :
181 : void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
182 : unsigned long);
183 : void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
184 : unsigned long);
185 :
186 81818 : static inline void page_dup_rmap(struct page *page, bool compound)
187 : {
188 81818 : atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
189 81817 : }
190 :
191 : /*
192 : * Called from mm/vmscan.c to handle paging out
193 : */
194 : int page_referenced(struct page *, int is_locked,
195 : struct mem_cgroup *memcg, unsigned long *vm_flags);
196 :
197 : bool try_to_unmap(struct page *, enum ttu_flags flags);
198 :
199 : /* Avoid racy checks */
200 : #define PVMW_SYNC (1 << 0)
201 : /* Look for migarion entries rather than present PTEs */
202 : #define PVMW_MIGRATION (1 << 1)
203 :
204 : struct page_vma_mapped_walk {
205 : struct page *page;
206 : struct vm_area_struct *vma;
207 : unsigned long address;
208 : pmd_t *pmd;
209 : pte_t *pte;
210 : spinlock_t *ptl;
211 : unsigned int flags;
212 : };
213 :
214 68 : static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
215 : {
216 : /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
217 68 : if (pvmw->pte && !PageHuge(pvmw->page))
218 68 : pte_unmap(pvmw->pte);
219 68 : if (pvmw->ptl)
220 136 : spin_unlock(pvmw->ptl);
221 : }
222 :
223 : bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
224 :
225 : /*
226 : * Used by swapoff to help locate where page is expected in vma.
227 : */
228 : unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
229 :
230 : /*
231 : * Cleans the PTEs of shared mappings.
232 : * (and since clean PTEs should also be readonly, write protects them too)
233 : *
234 : * returns the number of cleaned PTEs.
235 : */
236 : int page_mkclean(struct page *);
237 :
238 : /*
239 : * called in munlock()/munmap() path to check for other vmas holding
240 : * the page mlocked.
241 : */
242 : void try_to_munlock(struct page *);
243 :
244 : void remove_migration_ptes(struct page *old, struct page *new, bool locked);
245 :
246 : /*
247 : * Called by memory-failure.c to kill processes.
248 : */
249 : struct anon_vma *page_lock_anon_vma_read(struct page *page);
250 : void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
251 : int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
252 :
253 : /*
254 : * rmap_walk_control: To control rmap traversing for specific needs
255 : *
256 : * arg: passed to rmap_one() and invalid_vma()
257 : * rmap_one: executed on each vma where page is mapped
258 : * done: for checking traversing termination condition
259 : * anon_lock: for getting anon_lock by optimized way rather than default
260 : * invalid_vma: for skipping uninterested vma
261 : */
262 : struct rmap_walk_control {
263 : void *arg;
264 : /*
265 : * Return false if page table scanning in rmap_walk should be stopped.
266 : * Otherwise, return true.
267 : */
268 : bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
269 : unsigned long addr, void *arg);
270 : int (*done)(struct page *page);
271 : struct anon_vma *(*anon_lock)(struct page *page);
272 : bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
273 : };
274 :
275 : void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
276 : void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
277 :
278 : #else /* !CONFIG_MMU */
279 :
280 : #define anon_vma_init() do {} while (0)
281 : #define anon_vma_prepare(vma) (0)
282 : #define anon_vma_link(vma) do {} while (0)
283 :
284 : static inline int page_referenced(struct page *page, int is_locked,
285 : struct mem_cgroup *memcg,
286 : unsigned long *vm_flags)
287 : {
288 : *vm_flags = 0;
289 : return 0;
290 : }
291 :
292 : #define try_to_unmap(page, refs) false
293 :
294 : static inline int page_mkclean(struct page *page)
295 : {
296 : return 0;
297 : }
298 :
299 :
300 : #endif /* CONFIG_MMU */
301 :
302 : #endif /* _LINUX_RMAP_H */
|