Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Memory merging support.
4 : *
5 : * This code enables dynamic sharing of identical pages found in different
6 : * memory areas, even if they are not shared by fork()
7 : *
8 : * Copyright (C) 2008-2009 Red Hat, Inc.
9 : * Authors:
10 : * Izik Eidus
11 : * Andrea Arcangeli
12 : * Chris Wright
13 : * Hugh Dickins
14 : */
15 :
16 : #include <linux/errno.h>
17 : #include <linux/mm.h>
18 : #include <linux/fs.h>
19 : #include <linux/mman.h>
20 : #include <linux/sched.h>
21 : #include <linux/sched/mm.h>
22 : #include <linux/sched/coredump.h>
23 : #include <linux/rwsem.h>
24 : #include <linux/pagemap.h>
25 : #include <linux/rmap.h>
26 : #include <linux/spinlock.h>
27 : #include <linux/xxhash.h>
28 : #include <linux/delay.h>
29 : #include <linux/kthread.h>
30 : #include <linux/wait.h>
31 : #include <linux/slab.h>
32 : #include <linux/rbtree.h>
33 : #include <linux/memory.h>
34 : #include <linux/mmu_notifier.h>
35 : #include <linux/swap.h>
36 : #include <linux/ksm.h>
37 : #include <linux/hashtable.h>
38 : #include <linux/freezer.h>
39 : #include <linux/oom.h>
40 : #include <linux/numa.h>
41 :
42 : #include <asm/tlbflush.h>
43 : #include "internal.h"
44 :
45 : #ifdef CONFIG_NUMA
46 : #define NUMA(x) (x)
47 : #define DO_NUMA(x) do { (x); } while (0)
48 : #else
49 : #define NUMA(x) (0)
50 : #define DO_NUMA(x) do { } while (0)
51 : #endif
52 :
53 : /**
54 : * DOC: Overview
55 : *
56 : * A few notes about the KSM scanning process,
57 : * to make it easier to understand the data structures below:
58 : *
59 : * In order to reduce excessive scanning, KSM sorts the memory pages by their
60 : * contents into a data structure that holds pointers to the pages' locations.
61 : *
62 : * Since the contents of the pages may change at any moment, KSM cannot just
63 : * insert the pages into a normal sorted tree and expect it to find anything.
64 : * Therefore KSM uses two data structures - the stable and the unstable tree.
65 : *
66 : * The stable tree holds pointers to all the merged pages (ksm pages), sorted
67 : * by their contents. Because each such page is write-protected, searching on
68 : * this tree is fully assured to be working (except when pages are unmapped),
69 : * and therefore this tree is called the stable tree.
70 : *
71 : * The stable tree node includes information required for reverse
72 : * mapping from a KSM page to virtual addresses that map this page.
73 : *
74 : * In order to avoid large latencies of the rmap walks on KSM pages,
75 : * KSM maintains two types of nodes in the stable tree:
76 : *
77 : * * the regular nodes that keep the reverse mapping structures in a
78 : * linked list
79 : * * the "chains" that link nodes ("dups") that represent the same
80 : * write protected memory content, but each "dup" corresponds to a
81 : * different KSM page copy of that content
82 : *
83 : * Internally, the regular nodes, "dups" and "chains" are represented
84 : * using the same struct stable_node structure.
85 : *
86 : * In addition to the stable tree, KSM uses a second data structure called the
87 : * unstable tree: this tree holds pointers to pages which have been found to
88 : * be "unchanged for a period of time". The unstable tree sorts these pages
89 : * by their contents, but since they are not write-protected, KSM cannot rely
90 : * upon the unstable tree to work correctly - the unstable tree is liable to
91 : * be corrupted as its contents are modified, and so it is called unstable.
92 : *
93 : * KSM solves this problem by several techniques:
94 : *
95 : * 1) The unstable tree is flushed every time KSM completes scanning all
96 : * memory areas, and then the tree is rebuilt again from the beginning.
97 : * 2) KSM will only insert into the unstable tree, pages whose hash value
98 : * has not changed since the previous scan of all memory areas.
99 : * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
100 : * colors of the nodes and not on their contents, assuring that even when
101 : * the tree gets "corrupted" it won't get out of balance, so scanning time
102 : * remains the same (also, searching and inserting nodes in an rbtree uses
103 : * the same algorithm, so we have no overhead when we flush and rebuild).
104 : * 4) KSM never flushes the stable tree, which means that even if it were to
105 : * take 10 attempts to find a page in the unstable tree, once it is found,
106 : * it is secured in the stable tree. (When we scan a new page, we first
107 : * compare it against the stable tree, and then against the unstable tree.)
108 : *
109 : * If the merge_across_nodes tunable is unset, then KSM maintains multiple
110 : * stable trees and multiple unstable trees: one of each for each NUMA node.
111 : */
112 :
113 : /**
114 : * struct mm_slot - ksm information per mm that is being scanned
115 : * @link: link to the mm_slots hash list
116 : * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
117 : * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
118 : * @mm: the mm that this information is valid for
119 : */
120 : struct mm_slot {
121 : struct hlist_node link;
122 : struct list_head mm_list;
123 : struct rmap_item *rmap_list;
124 : struct mm_struct *mm;
125 : };
126 :
127 : /**
128 : * struct ksm_scan - cursor for scanning
129 : * @mm_slot: the current mm_slot we are scanning
130 : * @address: the next address inside that to be scanned
131 : * @rmap_list: link to the next rmap to be scanned in the rmap_list
132 : * @seqnr: count of completed full scans (needed when removing unstable node)
133 : *
134 : * There is only the one ksm_scan instance of this cursor structure.
135 : */
136 : struct ksm_scan {
137 : struct mm_slot *mm_slot;
138 : unsigned long address;
139 : struct rmap_item **rmap_list;
140 : unsigned long seqnr;
141 : };
142 :
143 : /**
144 : * struct stable_node - node of the stable rbtree
145 : * @node: rb node of this ksm page in the stable tree
146 : * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
147 : * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
148 : * @list: linked into migrate_nodes, pending placement in the proper node tree
149 : * @hlist: hlist head of rmap_items using this ksm page
150 : * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
151 : * @chain_prune_time: time of the last full garbage collection
152 : * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
153 : * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
154 : */
155 : struct stable_node {
156 : union {
157 : struct rb_node node; /* when node of stable tree */
158 : struct { /* when listed for migration */
159 : struct list_head *head;
160 : struct {
161 : struct hlist_node hlist_dup;
162 : struct list_head list;
163 : };
164 : };
165 : };
166 : struct hlist_head hlist;
167 : union {
168 : unsigned long kpfn;
169 : unsigned long chain_prune_time;
170 : };
171 : /*
172 : * STABLE_NODE_CHAIN can be any negative number in
173 : * rmap_hlist_len negative range, but better not -1 to be able
174 : * to reliably detect underflows.
175 : */
176 : #define STABLE_NODE_CHAIN -1024
177 : int rmap_hlist_len;
178 : #ifdef CONFIG_NUMA
179 : int nid;
180 : #endif
181 : };
182 :
183 : /**
184 : * struct rmap_item - reverse mapping item for virtual addresses
185 : * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
186 : * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
187 : * @nid: NUMA node id of unstable tree in which linked (may not match page)
188 : * @mm: the memory structure this rmap_item is pointing into
189 : * @address: the virtual address this rmap_item tracks (+ flags in low bits)
190 : * @oldchecksum: previous checksum of the page at that virtual address
191 : * @node: rb node of this rmap_item in the unstable tree
192 : * @head: pointer to stable_node heading this list in the stable tree
193 : * @hlist: link into hlist of rmap_items hanging off that stable_node
194 : */
195 : struct rmap_item {
196 : struct rmap_item *rmap_list;
197 : union {
198 : struct anon_vma *anon_vma; /* when stable */
199 : #ifdef CONFIG_NUMA
200 : int nid; /* when node of unstable tree */
201 : #endif
202 : };
203 : struct mm_struct *mm;
204 : unsigned long address; /* + low bits used for flags below */
205 : unsigned int oldchecksum; /* when unstable */
206 : union {
207 : struct rb_node node; /* when node of unstable tree */
208 : struct { /* when listed from stable tree */
209 : struct stable_node *head;
210 : struct hlist_node hlist;
211 : };
212 : };
213 : };
214 :
215 : #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
216 : #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
217 : #define STABLE_FLAG 0x200 /* is listed from the stable tree */
218 : #define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
219 : /* to mask all the flags */
220 :
221 : /* The stable and unstable tree heads */
222 : static struct rb_root one_stable_tree[1] = { RB_ROOT };
223 : static struct rb_root one_unstable_tree[1] = { RB_ROOT };
224 : static struct rb_root *root_stable_tree = one_stable_tree;
225 : static struct rb_root *root_unstable_tree = one_unstable_tree;
226 :
227 : /* Recently migrated nodes of stable tree, pending proper placement */
228 : static LIST_HEAD(migrate_nodes);
229 : #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
230 :
231 : #define MM_SLOTS_HASH_BITS 10
232 : static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
233 :
234 : static struct mm_slot ksm_mm_head = {
235 : .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
236 : };
237 : static struct ksm_scan ksm_scan = {
238 : .mm_slot = &ksm_mm_head,
239 : };
240 :
241 : static struct kmem_cache *rmap_item_cache;
242 : static struct kmem_cache *stable_node_cache;
243 : static struct kmem_cache *mm_slot_cache;
244 :
245 : /* The number of nodes in the stable tree */
246 : static unsigned long ksm_pages_shared;
247 :
248 : /* The number of page slots additionally sharing those nodes */
249 : static unsigned long ksm_pages_sharing;
250 :
251 : /* The number of nodes in the unstable tree */
252 : static unsigned long ksm_pages_unshared;
253 :
254 : /* The number of rmap_items in use: to calculate pages_volatile */
255 : static unsigned long ksm_rmap_items;
256 :
257 : /* The number of stable_node chains */
258 : static unsigned long ksm_stable_node_chains;
259 :
260 : /* The number of stable_node dups linked to the stable_node chains */
261 : static unsigned long ksm_stable_node_dups;
262 :
263 : /* Delay in pruning stale stable_node_dups in the stable_node_chains */
264 : static int ksm_stable_node_chains_prune_millisecs = 2000;
265 :
266 : /* Maximum number of page slots sharing a stable node */
267 : static int ksm_max_page_sharing = 256;
268 :
269 : /* Number of pages ksmd should scan in one batch */
270 : static unsigned int ksm_thread_pages_to_scan = 100;
271 :
272 : /* Milliseconds ksmd should sleep between batches */
273 : static unsigned int ksm_thread_sleep_millisecs = 20;
274 :
275 : /* Checksum of an empty (zeroed) page */
276 : static unsigned int zero_checksum __read_mostly;
277 :
278 : /* Whether to merge empty (zeroed) pages with actual zero pages */
279 : static bool ksm_use_zero_pages __read_mostly;
280 :
281 : #ifdef CONFIG_NUMA
282 : /* Zeroed when merging across nodes is not allowed */
283 : static unsigned int ksm_merge_across_nodes = 1;
284 : static int ksm_nr_node_ids = 1;
285 : #else
286 : #define ksm_merge_across_nodes 1U
287 : #define ksm_nr_node_ids 1
288 : #endif
289 :
290 : #define KSM_RUN_STOP 0
291 : #define KSM_RUN_MERGE 1
292 : #define KSM_RUN_UNMERGE 2
293 : #define KSM_RUN_OFFLINE 4
294 : static unsigned long ksm_run = KSM_RUN_STOP;
295 : static void wait_while_offlining(void);
296 :
297 : static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
298 : static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
299 : static DEFINE_MUTEX(ksm_thread_mutex);
300 : static DEFINE_SPINLOCK(ksm_mmlist_lock);
301 :
302 : #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
303 : sizeof(struct __struct), __alignof__(struct __struct),\
304 : (__flags), NULL)
305 :
306 1 : static int __init ksm_slab_init(void)
307 : {
308 1 : rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
309 1 : if (!rmap_item_cache)
310 0 : goto out;
311 :
312 1 : stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
313 1 : if (!stable_node_cache)
314 0 : goto out_free1;
315 :
316 1 : mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
317 1 : if (!mm_slot_cache)
318 0 : goto out_free2;
319 :
320 : return 0;
321 :
322 0 : out_free2:
323 0 : kmem_cache_destroy(stable_node_cache);
324 0 : out_free1:
325 0 : kmem_cache_destroy(rmap_item_cache);
326 : out:
327 : return -ENOMEM;
328 : }
329 :
330 0 : static void __init ksm_slab_free(void)
331 : {
332 0 : kmem_cache_destroy(mm_slot_cache);
333 0 : kmem_cache_destroy(stable_node_cache);
334 0 : kmem_cache_destroy(rmap_item_cache);
335 0 : mm_slot_cache = NULL;
336 0 : }
337 :
338 0 : static __always_inline bool is_stable_node_chain(struct stable_node *chain)
339 : {
340 0 : return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
341 : }
342 :
343 0 : static __always_inline bool is_stable_node_dup(struct stable_node *dup)
344 : {
345 0 : return dup->head == STABLE_NODE_DUP_HEAD;
346 : }
347 :
348 0 : static inline void stable_node_chain_add_dup(struct stable_node *dup,
349 : struct stable_node *chain)
350 : {
351 0 : VM_BUG_ON(is_stable_node_dup(dup));
352 0 : dup->head = STABLE_NODE_DUP_HEAD;
353 0 : VM_BUG_ON(!is_stable_node_chain(chain));
354 0 : hlist_add_head(&dup->hlist_dup, &chain->hlist);
355 0 : ksm_stable_node_dups++;
356 0 : }
357 :
358 0 : static inline void __stable_node_dup_del(struct stable_node *dup)
359 : {
360 0 : VM_BUG_ON(!is_stable_node_dup(dup));
361 0 : hlist_del(&dup->hlist_dup);
362 0 : ksm_stable_node_dups--;
363 0 : }
364 :
365 0 : static inline void stable_node_dup_del(struct stable_node *dup)
366 : {
367 0 : VM_BUG_ON(is_stable_node_chain(dup));
368 0 : if (is_stable_node_dup(dup))
369 0 : __stable_node_dup_del(dup);
370 : else
371 0 : rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
372 : #ifdef CONFIG_DEBUG_VM
373 0 : dup->head = NULL;
374 : #endif
375 0 : }
376 :
377 0 : static inline struct rmap_item *alloc_rmap_item(void)
378 : {
379 0 : struct rmap_item *rmap_item;
380 :
381 0 : rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
382 : __GFP_NORETRY | __GFP_NOWARN);
383 0 : if (rmap_item)
384 0 : ksm_rmap_items++;
385 0 : return rmap_item;
386 : }
387 :
388 0 : static inline void free_rmap_item(struct rmap_item *rmap_item)
389 : {
390 0 : ksm_rmap_items--;
391 0 : rmap_item->mm = NULL; /* debug safety */
392 0 : kmem_cache_free(rmap_item_cache, rmap_item);
393 0 : }
394 :
395 0 : static inline struct stable_node *alloc_stable_node(void)
396 : {
397 : /*
398 : * The allocation can take too long with GFP_KERNEL when memory is under
399 : * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
400 : * grants access to memory reserves, helping to avoid this problem.
401 : */
402 0 : return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
403 : }
404 :
405 0 : static inline void free_stable_node(struct stable_node *stable_node)
406 : {
407 0 : VM_BUG_ON(stable_node->rmap_hlist_len &&
408 : !is_stable_node_chain(stable_node));
409 0 : kmem_cache_free(stable_node_cache, stable_node);
410 0 : }
411 :
412 0 : static inline struct mm_slot *alloc_mm_slot(void)
413 : {
414 0 : if (!mm_slot_cache) /* initialization failed */
415 : return NULL;
416 0 : return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
417 : }
418 :
419 0 : static inline void free_mm_slot(struct mm_slot *mm_slot)
420 : {
421 0 : kmem_cache_free(mm_slot_cache, mm_slot);
422 : }
423 :
424 0 : static struct mm_slot *get_mm_slot(struct mm_struct *mm)
425 : {
426 0 : struct mm_slot *slot;
427 :
428 0 : hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
429 0 : if (slot->mm == mm)
430 0 : return slot;
431 :
432 : return NULL;
433 : }
434 :
435 0 : static void insert_to_mm_slots_hash(struct mm_struct *mm,
436 : struct mm_slot *mm_slot)
437 : {
438 0 : mm_slot->mm = mm;
439 0 : hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
440 0 : }
441 :
442 : /*
443 : * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
444 : * page tables after it has passed through ksm_exit() - which, if necessary,
445 : * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
446 : * a special flag: they can just back out as soon as mm_users goes to zero.
447 : * ksm_test_exit() is used throughout to make this test for exit: in some
448 : * places for correctness, in some places just to avoid unnecessary work.
449 : */
450 0 : static inline bool ksm_test_exit(struct mm_struct *mm)
451 : {
452 0 : return atomic_read(&mm->mm_users) == 0;
453 : }
454 :
455 : /*
456 : * We use break_ksm to break COW on a ksm page: it's a stripped down
457 : *
458 : * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1)
459 : * put_page(page);
460 : *
461 : * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
462 : * in case the application has unmapped and remapped mm,addr meanwhile.
463 : * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
464 : * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
465 : *
466 : * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
467 : * of the process that owns 'vma'. We also do not want to enforce
468 : * protection keys here anyway.
469 : */
470 0 : static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
471 : {
472 0 : struct page *page;
473 0 : vm_fault_t ret = 0;
474 :
475 0 : do {
476 0 : cond_resched();
477 0 : page = follow_page(vma, addr,
478 : FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
479 0 : if (IS_ERR_OR_NULL(page))
480 : break;
481 0 : if (PageKsm(page))
482 0 : ret = handle_mm_fault(vma, addr,
483 : FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
484 : NULL);
485 : else
486 : ret = VM_FAULT_WRITE;
487 0 : put_page(page);
488 0 : } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
489 : /*
490 : * We must loop because handle_mm_fault() may back out if there's
491 : * any difficulty e.g. if pte accessed bit gets updated concurrently.
492 : *
493 : * VM_FAULT_WRITE is what we have been hoping for: it indicates that
494 : * COW has been broken, even if the vma does not permit VM_WRITE;
495 : * but note that a concurrent fault might break PageKsm for us.
496 : *
497 : * VM_FAULT_SIGBUS could occur if we race with truncation of the
498 : * backing file, which also invalidates anonymous pages: that's
499 : * okay, that truncation will have unmapped the PageKsm for us.
500 : *
501 : * VM_FAULT_OOM: at the time of writing (late July 2009), setting
502 : * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
503 : * current task has TIF_MEMDIE set, and will be OOM killed on return
504 : * to user; and ksmd, having no mm, would never be chosen for that.
505 : *
506 : * But if the mm is in a limited mem_cgroup, then the fault may fail
507 : * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
508 : * even ksmd can fail in this way - though it's usually breaking ksm
509 : * just to undo a merge it made a moment before, so unlikely to oom.
510 : *
511 : * That's a pity: we might therefore have more kernel pages allocated
512 : * than we're counting as nodes in the stable tree; but ksm_do_scan
513 : * will retry to break_cow on each pass, so should recover the page
514 : * in due course. The important thing is to not let VM_MERGEABLE
515 : * be cleared while any such pages might remain in the area.
516 : */
517 0 : return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
518 : }
519 :
520 0 : static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
521 : unsigned long addr)
522 : {
523 0 : struct vm_area_struct *vma;
524 0 : if (ksm_test_exit(mm))
525 : return NULL;
526 0 : vma = find_vma(mm, addr);
527 0 : if (!vma || vma->vm_start > addr)
528 : return NULL;
529 0 : if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
530 0 : return NULL;
531 : return vma;
532 : }
533 :
534 0 : static void break_cow(struct rmap_item *rmap_item)
535 : {
536 0 : struct mm_struct *mm = rmap_item->mm;
537 0 : unsigned long addr = rmap_item->address;
538 0 : struct vm_area_struct *vma;
539 :
540 : /*
541 : * It is not an accident that whenever we want to break COW
542 : * to undo, we also need to drop a reference to the anon_vma.
543 : */
544 0 : put_anon_vma(rmap_item->anon_vma);
545 :
546 0 : mmap_read_lock(mm);
547 0 : vma = find_mergeable_vma(mm, addr);
548 0 : if (vma)
549 0 : break_ksm(vma, addr);
550 0 : mmap_read_unlock(mm);
551 0 : }
552 :
553 0 : static struct page *get_mergeable_page(struct rmap_item *rmap_item)
554 : {
555 0 : struct mm_struct *mm = rmap_item->mm;
556 0 : unsigned long addr = rmap_item->address;
557 0 : struct vm_area_struct *vma;
558 0 : struct page *page;
559 :
560 0 : mmap_read_lock(mm);
561 0 : vma = find_mergeable_vma(mm, addr);
562 0 : if (!vma)
563 0 : goto out;
564 :
565 0 : page = follow_page(vma, addr, FOLL_GET);
566 0 : if (IS_ERR_OR_NULL(page))
567 0 : goto out;
568 0 : if (PageAnon(page)) {
569 0 : flush_anon_page(vma, page, addr);
570 0 : flush_dcache_page(page);
571 : } else {
572 0 : put_page(page);
573 : out:
574 : page = NULL;
575 : }
576 0 : mmap_read_unlock(mm);
577 0 : return page;
578 : }
579 :
580 : /*
581 : * This helper is used for getting right index into array of tree roots.
582 : * When merge_across_nodes knob is set to 1, there are only two rb-trees for
583 : * stable and unstable pages from all nodes with roots in index 0. Otherwise,
584 : * every node has its own stable and unstable tree.
585 : */
586 0 : static inline int get_kpfn_nid(unsigned long kpfn)
587 : {
588 0 : return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
589 : }
590 :
591 0 : static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
592 : struct rb_root *root)
593 : {
594 0 : struct stable_node *chain = alloc_stable_node();
595 0 : VM_BUG_ON(is_stable_node_chain(dup));
596 0 : if (likely(chain)) {
597 0 : INIT_HLIST_HEAD(&chain->hlist);
598 0 : chain->chain_prune_time = jiffies;
599 0 : chain->rmap_hlist_len = STABLE_NODE_CHAIN;
600 : #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
601 0 : chain->nid = NUMA_NO_NODE; /* debug */
602 : #endif
603 0 : ksm_stable_node_chains++;
604 :
605 : /*
606 : * Put the stable node chain in the first dimension of
607 : * the stable tree and at the same time remove the old
608 : * stable node.
609 : */
610 0 : rb_replace_node(&dup->node, &chain->node, root);
611 :
612 : /*
613 : * Move the old stable node to the second dimension
614 : * queued in the hlist_dup. The invariant is that all
615 : * dup stable_nodes in the chain->hlist point to pages
616 : * that are write protected and have the exact same
617 : * content.
618 : */
619 0 : stable_node_chain_add_dup(dup, chain);
620 : }
621 0 : return chain;
622 : }
623 :
624 0 : static inline void free_stable_node_chain(struct stable_node *chain,
625 : struct rb_root *root)
626 : {
627 0 : rb_erase(&chain->node, root);
628 0 : free_stable_node(chain);
629 0 : ksm_stable_node_chains--;
630 0 : }
631 :
632 0 : static void remove_node_from_stable_tree(struct stable_node *stable_node)
633 : {
634 0 : struct rmap_item *rmap_item;
635 :
636 : /* check it's not STABLE_NODE_CHAIN or negative */
637 0 : BUG_ON(stable_node->rmap_hlist_len < 0);
638 :
639 0 : hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
640 0 : if (rmap_item->hlist.next)
641 0 : ksm_pages_sharing--;
642 : else
643 0 : ksm_pages_shared--;
644 0 : VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
645 0 : stable_node->rmap_hlist_len--;
646 0 : put_anon_vma(rmap_item->anon_vma);
647 0 : rmap_item->address &= PAGE_MASK;
648 0 : cond_resched();
649 : }
650 :
651 : /*
652 : * We need the second aligned pointer of the migrate_nodes
653 : * list_head to stay clear from the rb_parent_color union
654 : * (aligned and different than any node) and also different
655 : * from &migrate_nodes. This will verify that future list.h changes
656 : * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
657 : */
658 : #if defined(GCC_VERSION) && GCC_VERSION >= 40903
659 0 : BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
660 0 : BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
661 : #endif
662 :
663 0 : if (stable_node->head == &migrate_nodes)
664 0 : list_del(&stable_node->list);
665 : else
666 0 : stable_node_dup_del(stable_node);
667 0 : free_stable_node(stable_node);
668 0 : }
669 :
670 : enum get_ksm_page_flags {
671 : GET_KSM_PAGE_NOLOCK,
672 : GET_KSM_PAGE_LOCK,
673 : GET_KSM_PAGE_TRYLOCK
674 : };
675 :
676 : /*
677 : * get_ksm_page: checks if the page indicated by the stable node
678 : * is still its ksm page, despite having held no reference to it.
679 : * In which case we can trust the content of the page, and it
680 : * returns the gotten page; but if the page has now been zapped,
681 : * remove the stale node from the stable tree and return NULL.
682 : * But beware, the stable node's page might be being migrated.
683 : *
684 : * You would expect the stable_node to hold a reference to the ksm page.
685 : * But if it increments the page's count, swapping out has to wait for
686 : * ksmd to come around again before it can free the page, which may take
687 : * seconds or even minutes: much too unresponsive. So instead we use a
688 : * "keyhole reference": access to the ksm page from the stable node peeps
689 : * out through its keyhole to see if that page still holds the right key,
690 : * pointing back to this stable node. This relies on freeing a PageAnon
691 : * page to reset its page->mapping to NULL, and relies on no other use of
692 : * a page to put something that might look like our key in page->mapping.
693 : * is on its way to being freed; but it is an anomaly to bear in mind.
694 : */
695 0 : static struct page *get_ksm_page(struct stable_node *stable_node,
696 : enum get_ksm_page_flags flags)
697 : {
698 0 : struct page *page;
699 0 : void *expected_mapping;
700 0 : unsigned long kpfn;
701 :
702 0 : expected_mapping = (void *)((unsigned long)stable_node |
703 : PAGE_MAPPING_KSM);
704 0 : again:
705 0 : kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
706 0 : page = pfn_to_page(kpfn);
707 0 : if (READ_ONCE(page->mapping) != expected_mapping)
708 0 : goto stale;
709 :
710 : /*
711 : * We cannot do anything with the page while its refcount is 0.
712 : * Usually 0 means free, or tail of a higher-order page: in which
713 : * case this node is no longer referenced, and should be freed;
714 : * however, it might mean that the page is under page_ref_freeze().
715 : * The __remove_mapping() case is easy, again the node is now stale;
716 : * the same is in reuse_ksm_page() case; but if page is swapcache
717 : * in migrate_page_move_mapping(), it might still be our page,
718 : * in which case it's essential to keep the node.
719 : */
720 0 : while (!get_page_unless_zero(page)) {
721 : /*
722 : * Another check for page->mapping != expected_mapping would
723 : * work here too. We have chosen the !PageSwapCache test to
724 : * optimize the common case, when the page is or is about to
725 : * be freed: PageSwapCache is cleared (under spin_lock_irq)
726 : * in the ref_freeze section of __remove_mapping(); but Anon
727 : * page->mapping reset to NULL later, in free_pages_prepare().
728 : */
729 0 : if (!PageSwapCache(page))
730 0 : goto stale;
731 : cpu_relax();
732 : }
733 :
734 0 : if (READ_ONCE(page->mapping) != expected_mapping) {
735 0 : put_page(page);
736 0 : goto stale;
737 : }
738 :
739 0 : if (flags == GET_KSM_PAGE_TRYLOCK) {
740 0 : if (!trylock_page(page)) {
741 0 : put_page(page);
742 0 : return ERR_PTR(-EBUSY);
743 : }
744 0 : } else if (flags == GET_KSM_PAGE_LOCK)
745 0 : lock_page(page);
746 :
747 0 : if (flags != GET_KSM_PAGE_NOLOCK) {
748 0 : if (READ_ONCE(page->mapping) != expected_mapping) {
749 0 : unlock_page(page);
750 0 : put_page(page);
751 0 : goto stale;
752 : }
753 : }
754 : return page;
755 :
756 0 : stale:
757 : /*
758 : * We come here from above when page->mapping or !PageSwapCache
759 : * suggests that the node is stale; but it might be under migration.
760 : * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
761 : * before checking whether node->kpfn has been changed.
762 : */
763 0 : smp_rmb();
764 0 : if (READ_ONCE(stable_node->kpfn) != kpfn)
765 0 : goto again;
766 0 : remove_node_from_stable_tree(stable_node);
767 0 : return NULL;
768 : }
769 :
770 : /*
771 : * Removing rmap_item from stable or unstable tree.
772 : * This function will clean the information from the stable/unstable tree.
773 : */
774 0 : static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
775 : {
776 0 : if (rmap_item->address & STABLE_FLAG) {
777 0 : struct stable_node *stable_node;
778 0 : struct page *page;
779 :
780 0 : stable_node = rmap_item->head;
781 0 : page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
782 0 : if (!page)
783 0 : goto out;
784 :
785 0 : hlist_del(&rmap_item->hlist);
786 0 : unlock_page(page);
787 0 : put_page(page);
788 :
789 0 : if (!hlist_empty(&stable_node->hlist))
790 0 : ksm_pages_sharing--;
791 : else
792 0 : ksm_pages_shared--;
793 0 : VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
794 0 : stable_node->rmap_hlist_len--;
795 :
796 0 : put_anon_vma(rmap_item->anon_vma);
797 0 : rmap_item->address &= PAGE_MASK;
798 :
799 0 : } else if (rmap_item->address & UNSTABLE_FLAG) {
800 0 : unsigned char age;
801 : /*
802 : * Usually ksmd can and must skip the rb_erase, because
803 : * root_unstable_tree was already reset to RB_ROOT.
804 : * But be careful when an mm is exiting: do the rb_erase
805 : * if this rmap_item was inserted by this scan, rather
806 : * than left over from before.
807 : */
808 0 : age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
809 0 : BUG_ON(age > 1);
810 0 : if (!age)
811 0 : rb_erase(&rmap_item->node,
812 0 : root_unstable_tree + NUMA(rmap_item->nid));
813 0 : ksm_pages_unshared--;
814 0 : rmap_item->address &= PAGE_MASK;
815 : }
816 0 : out:
817 0 : cond_resched(); /* we're called from many long loops */
818 0 : }
819 :
820 0 : static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
821 : struct rmap_item **rmap_list)
822 : {
823 0 : while (*rmap_list) {
824 0 : struct rmap_item *rmap_item = *rmap_list;
825 0 : *rmap_list = rmap_item->rmap_list;
826 0 : remove_rmap_item_from_tree(rmap_item);
827 0 : free_rmap_item(rmap_item);
828 : }
829 0 : }
830 :
831 : /*
832 : * Though it's very tempting to unmerge rmap_items from stable tree rather
833 : * than check every pte of a given vma, the locking doesn't quite work for
834 : * that - an rmap_item is assigned to the stable tree after inserting ksm
835 : * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
836 : * rmap_items from parent to child at fork time (so as not to waste time
837 : * if exit comes before the next scan reaches it).
838 : *
839 : * Similarly, although we'd like to remove rmap_items (so updating counts
840 : * and freeing memory) when unmerging an area, it's easier to leave that
841 : * to the next pass of ksmd - consider, for example, how ksmd might be
842 : * in cmp_and_merge_page on one of the rmap_items we would be removing.
843 : */
844 0 : static int unmerge_ksm_pages(struct vm_area_struct *vma,
845 : unsigned long start, unsigned long end)
846 : {
847 0 : unsigned long addr;
848 0 : int err = 0;
849 :
850 0 : for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
851 0 : if (ksm_test_exit(vma->vm_mm))
852 : break;
853 0 : if (signal_pending(current))
854 : err = -ERESTARTSYS;
855 : else
856 0 : err = break_ksm(vma, addr);
857 : }
858 0 : return err;
859 : }
860 :
861 0 : static inline struct stable_node *page_stable_node(struct page *page)
862 : {
863 0 : return PageKsm(page) ? page_rmapping(page) : NULL;
864 : }
865 :
866 0 : static inline void set_page_stable_node(struct page *page,
867 : struct stable_node *stable_node)
868 : {
869 0 : page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
870 0 : }
871 :
872 : #ifdef CONFIG_SYSFS
873 : /*
874 : * Only called through the sysfs control interface:
875 : */
876 0 : static int remove_stable_node(struct stable_node *stable_node)
877 : {
878 0 : struct page *page;
879 0 : int err;
880 :
881 0 : page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
882 0 : if (!page) {
883 : /*
884 : * get_ksm_page did remove_node_from_stable_tree itself.
885 : */
886 : return 0;
887 : }
888 :
889 : /*
890 : * Page could be still mapped if this races with __mmput() running in
891 : * between ksm_exit() and exit_mmap(). Just refuse to let
892 : * merge_across_nodes/max_page_sharing be switched.
893 : */
894 0 : err = -EBUSY;
895 0 : if (!page_mapped(page)) {
896 : /*
897 : * The stable node did not yet appear stale to get_ksm_page(),
898 : * since that allows for an unmapped ksm page to be recognized
899 : * right up until it is freed; but the node is safe to remove.
900 : * This page might be in a pagevec waiting to be freed,
901 : * or it might be PageSwapCache (perhaps under writeback),
902 : * or it might have been removed from swapcache a moment ago.
903 : */
904 0 : set_page_stable_node(page, NULL);
905 0 : remove_node_from_stable_tree(stable_node);
906 0 : err = 0;
907 : }
908 :
909 0 : unlock_page(page);
910 0 : put_page(page);
911 0 : return err;
912 : }
913 :
914 0 : static int remove_stable_node_chain(struct stable_node *stable_node,
915 : struct rb_root *root)
916 : {
917 0 : struct stable_node *dup;
918 0 : struct hlist_node *hlist_safe;
919 :
920 0 : if (!is_stable_node_chain(stable_node)) {
921 0 : VM_BUG_ON(is_stable_node_dup(stable_node));
922 0 : if (remove_stable_node(stable_node))
923 : return true;
924 : else
925 0 : return false;
926 : }
927 :
928 0 : hlist_for_each_entry_safe(dup, hlist_safe,
929 : &stable_node->hlist, hlist_dup) {
930 0 : VM_BUG_ON(!is_stable_node_dup(dup));
931 0 : if (remove_stable_node(dup))
932 : return true;
933 : }
934 0 : BUG_ON(!hlist_empty(&stable_node->hlist));
935 0 : free_stable_node_chain(stable_node, root);
936 0 : return false;
937 : }
938 :
939 0 : static int remove_all_stable_nodes(void)
940 : {
941 0 : struct stable_node *stable_node, *next;
942 0 : int nid;
943 0 : int err = 0;
944 :
945 0 : for (nid = 0; nid < ksm_nr_node_ids; nid++) {
946 0 : while (root_stable_tree[nid].rb_node) {
947 0 : stable_node = rb_entry(root_stable_tree[nid].rb_node,
948 : struct stable_node, node);
949 0 : if (remove_stable_node_chain(stable_node,
950 : root_stable_tree + nid)) {
951 : err = -EBUSY;
952 : break; /* proceed to next nid */
953 : }
954 0 : cond_resched();
955 : }
956 : }
957 0 : list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
958 0 : if (remove_stable_node(stable_node))
959 0 : err = -EBUSY;
960 0 : cond_resched();
961 : }
962 0 : return err;
963 : }
964 :
965 0 : static int unmerge_and_remove_all_rmap_items(void)
966 : {
967 0 : struct mm_slot *mm_slot;
968 0 : struct mm_struct *mm;
969 0 : struct vm_area_struct *vma;
970 0 : int err = 0;
971 :
972 0 : spin_lock(&ksm_mmlist_lock);
973 0 : ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
974 : struct mm_slot, mm_list);
975 0 : spin_unlock(&ksm_mmlist_lock);
976 :
977 0 : for (mm_slot = ksm_scan.mm_slot;
978 0 : mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
979 0 : mm = mm_slot->mm;
980 0 : mmap_read_lock(mm);
981 0 : for (vma = mm->mmap; vma; vma = vma->vm_next) {
982 0 : if (ksm_test_exit(mm))
983 : break;
984 0 : if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
985 0 : continue;
986 0 : err = unmerge_ksm_pages(vma,
987 : vma->vm_start, vma->vm_end);
988 0 : if (err)
989 0 : goto error;
990 : }
991 :
992 0 : remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
993 0 : mmap_read_unlock(mm);
994 :
995 0 : spin_lock(&ksm_mmlist_lock);
996 0 : ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
997 : struct mm_slot, mm_list);
998 0 : if (ksm_test_exit(mm)) {
999 0 : hash_del(&mm_slot->link);
1000 0 : list_del(&mm_slot->mm_list);
1001 0 : spin_unlock(&ksm_mmlist_lock);
1002 :
1003 0 : free_mm_slot(mm_slot);
1004 0 : clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1005 0 : mmdrop(mm);
1006 : } else
1007 0 : spin_unlock(&ksm_mmlist_lock);
1008 : }
1009 :
1010 : /* Clean up stable nodes, but don't worry if some are still busy */
1011 0 : remove_all_stable_nodes();
1012 0 : ksm_scan.seqnr = 0;
1013 0 : return 0;
1014 :
1015 0 : error:
1016 0 : mmap_read_unlock(mm);
1017 0 : spin_lock(&ksm_mmlist_lock);
1018 0 : ksm_scan.mm_slot = &ksm_mm_head;
1019 0 : spin_unlock(&ksm_mmlist_lock);
1020 0 : return err;
1021 : }
1022 : #endif /* CONFIG_SYSFS */
1023 :
1024 1 : static u32 calc_checksum(struct page *page)
1025 : {
1026 1 : u32 checksum;
1027 1 : void *addr = kmap_atomic(page);
1028 1 : checksum = xxhash(addr, PAGE_SIZE, 0);
1029 1 : kunmap_atomic(addr);
1030 1 : return checksum;
1031 : }
1032 :
1033 0 : static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1034 : pte_t *orig_pte)
1035 : {
1036 0 : struct mm_struct *mm = vma->vm_mm;
1037 0 : struct page_vma_mapped_walk pvmw = {
1038 : .page = page,
1039 : .vma = vma,
1040 : };
1041 0 : int swapped;
1042 0 : int err = -EFAULT;
1043 0 : struct mmu_notifier_range range;
1044 :
1045 0 : pvmw.address = page_address_in_vma(page, vma);
1046 0 : if (pvmw.address == -EFAULT)
1047 0 : goto out;
1048 :
1049 0 : BUG_ON(PageTransCompound(page));
1050 :
1051 0 : mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
1052 : pvmw.address,
1053 : pvmw.address + PAGE_SIZE);
1054 0 : mmu_notifier_invalidate_range_start(&range);
1055 :
1056 0 : if (!page_vma_mapped_walk(&pvmw))
1057 0 : goto out_mn;
1058 0 : if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1059 0 : goto out_unlock;
1060 :
1061 0 : if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
1062 0 : (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
1063 0 : mm_tlb_flush_pending(mm)) {
1064 0 : pte_t entry;
1065 :
1066 0 : swapped = PageSwapCache(page);
1067 0 : flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1068 : /*
1069 : * Ok this is tricky, when get_user_pages_fast() run it doesn't
1070 : * take any lock, therefore the check that we are going to make
1071 : * with the pagecount against the mapcount is racey and
1072 : * O_DIRECT can happen right after the check.
1073 : * So we clear the pte and flush the tlb before the check
1074 : * this assure us that no O_DIRECT can happen after the check
1075 : * or in the middle of the check.
1076 : *
1077 : * No need to notify as we are downgrading page table to read
1078 : * only not changing it to point to a new page.
1079 : *
1080 : * See Documentation/vm/mmu_notifier.rst
1081 : */
1082 0 : entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1083 : /*
1084 : * Check that no O_DIRECT or similar I/O is in progress on the
1085 : * page
1086 : */
1087 0 : if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1088 0 : set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1089 0 : goto out_unlock;
1090 : }
1091 0 : if (pte_dirty(entry))
1092 0 : set_page_dirty(page);
1093 :
1094 0 : if (pte_protnone(entry))
1095 : entry = pte_mkclean(pte_clear_savedwrite(entry));
1096 : else
1097 0 : entry = pte_mkclean(pte_wrprotect(entry));
1098 0 : set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
1099 : }
1100 0 : *orig_pte = *pvmw.pte;
1101 0 : err = 0;
1102 :
1103 0 : out_unlock:
1104 0 : page_vma_mapped_walk_done(&pvmw);
1105 : out_mn:
1106 0 : mmu_notifier_invalidate_range_end(&range);
1107 0 : out:
1108 0 : return err;
1109 : }
1110 :
1111 : /**
1112 : * replace_page - replace page in vma by new ksm page
1113 : * @vma: vma that holds the pte pointing to page
1114 : * @page: the page we are replacing by kpage
1115 : * @kpage: the ksm page we replace page by
1116 : * @orig_pte: the original value of the pte
1117 : *
1118 : * Returns 0 on success, -EFAULT on failure.
1119 : */
1120 0 : static int replace_page(struct vm_area_struct *vma, struct page *page,
1121 : struct page *kpage, pte_t orig_pte)
1122 : {
1123 0 : struct mm_struct *mm = vma->vm_mm;
1124 0 : pmd_t *pmd;
1125 0 : pte_t *ptep;
1126 0 : pte_t newpte;
1127 0 : spinlock_t *ptl;
1128 0 : unsigned long addr;
1129 0 : int err = -EFAULT;
1130 0 : struct mmu_notifier_range range;
1131 :
1132 0 : addr = page_address_in_vma(page, vma);
1133 0 : if (addr == -EFAULT)
1134 0 : goto out;
1135 :
1136 0 : pmd = mm_find_pmd(mm, addr);
1137 0 : if (!pmd)
1138 0 : goto out;
1139 :
1140 0 : mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
1141 : addr + PAGE_SIZE);
1142 0 : mmu_notifier_invalidate_range_start(&range);
1143 :
1144 0 : ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1145 0 : if (!pte_same(*ptep, orig_pte)) {
1146 0 : pte_unmap_unlock(ptep, ptl);
1147 0 : goto out_mn;
1148 : }
1149 :
1150 : /*
1151 : * No need to check ksm_use_zero_pages here: we can only have a
1152 : * zero_page here if ksm_use_zero_pages was enabled already.
1153 : */
1154 0 : if (!is_zero_pfn(page_to_pfn(kpage))) {
1155 0 : get_page(kpage);
1156 0 : page_add_anon_rmap(kpage, vma, addr, false);
1157 0 : newpte = mk_pte(kpage, vma->vm_page_prot);
1158 : } else {
1159 0 : newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
1160 : vma->vm_page_prot));
1161 : /*
1162 : * We're replacing an anonymous page with a zero page, which is
1163 : * not anonymous. We need to do proper accounting otherwise we
1164 : * will get wrong values in /proc, and a BUG message in dmesg
1165 : * when tearing down the mm.
1166 : */
1167 0 : dec_mm_counter(mm, MM_ANONPAGES);
1168 : }
1169 :
1170 0 : flush_cache_page(vma, addr, pte_pfn(*ptep));
1171 : /*
1172 : * No need to notify as we are replacing a read only page with another
1173 : * read only page with the same content.
1174 : *
1175 : * See Documentation/vm/mmu_notifier.rst
1176 : */
1177 0 : ptep_clear_flush(vma, addr, ptep);
1178 0 : set_pte_at_notify(mm, addr, ptep, newpte);
1179 :
1180 0 : page_remove_rmap(page, false);
1181 0 : if (!page_mapped(page))
1182 0 : try_to_free_swap(page);
1183 0 : put_page(page);
1184 :
1185 0 : pte_unmap_unlock(ptep, ptl);
1186 0 : err = 0;
1187 : out_mn:
1188 0 : mmu_notifier_invalidate_range_end(&range);
1189 0 : out:
1190 0 : return err;
1191 : }
1192 :
1193 : /*
1194 : * try_to_merge_one_page - take two pages and merge them into one
1195 : * @vma: the vma that holds the pte pointing to page
1196 : * @page: the PageAnon page that we want to replace with kpage
1197 : * @kpage: the PageKsm page that we want to map instead of page,
1198 : * or NULL the first time when we want to use page as kpage.
1199 : *
1200 : * This function returns 0 if the pages were merged, -EFAULT otherwise.
1201 : */
1202 0 : static int try_to_merge_one_page(struct vm_area_struct *vma,
1203 : struct page *page, struct page *kpage)
1204 : {
1205 0 : pte_t orig_pte = __pte(0);
1206 0 : int err = -EFAULT;
1207 :
1208 0 : if (page == kpage) /* ksm page forked */
1209 : return 0;
1210 :
1211 0 : if (!PageAnon(page))
1212 0 : goto out;
1213 :
1214 : /*
1215 : * We need the page lock to read a stable PageSwapCache in
1216 : * write_protect_page(). We use trylock_page() instead of
1217 : * lock_page() because we don't want to wait here - we
1218 : * prefer to continue scanning and merging different pages,
1219 : * then come back to this page when it is unlocked.
1220 : */
1221 0 : if (!trylock_page(page))
1222 0 : goto out;
1223 :
1224 0 : if (PageTransCompound(page)) {
1225 0 : if (split_huge_page(page))
1226 0 : goto out_unlock;
1227 : }
1228 :
1229 : /*
1230 : * If this anonymous page is mapped only here, its pte may need
1231 : * to be write-protected. If it's mapped elsewhere, all of its
1232 : * ptes are necessarily already write-protected. But in either
1233 : * case, we need to lock and check page_count is not raised.
1234 : */
1235 0 : if (write_protect_page(vma, page, &orig_pte) == 0) {
1236 0 : if (!kpage) {
1237 : /*
1238 : * While we hold page lock, upgrade page from
1239 : * PageAnon+anon_vma to PageKsm+NULL stable_node:
1240 : * stable_tree_insert() will update stable_node.
1241 : */
1242 0 : set_page_stable_node(page, NULL);
1243 0 : mark_page_accessed(page);
1244 : /*
1245 : * Page reclaim just frees a clean page with no dirty
1246 : * ptes: make sure that the ksm page would be swapped.
1247 : */
1248 0 : if (!PageDirty(page))
1249 0 : SetPageDirty(page);
1250 : err = 0;
1251 0 : } else if (pages_identical(page, kpage))
1252 0 : err = replace_page(vma, page, kpage, orig_pte);
1253 : }
1254 :
1255 0 : if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1256 0 : munlock_vma_page(page);
1257 0 : if (!PageMlocked(kpage)) {
1258 0 : unlock_page(page);
1259 0 : lock_page(kpage);
1260 0 : mlock_vma_page(kpage);
1261 0 : page = kpage; /* for final unlock */
1262 : }
1263 : }
1264 :
1265 0 : out_unlock:
1266 0 : unlock_page(page);
1267 : out:
1268 : return err;
1269 : }
1270 :
1271 : /*
1272 : * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1273 : * but no new kernel page is allocated: kpage must already be a ksm page.
1274 : *
1275 : * This function returns 0 if the pages were merged, -EFAULT otherwise.
1276 : */
1277 0 : static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
1278 : struct page *page, struct page *kpage)
1279 : {
1280 0 : struct mm_struct *mm = rmap_item->mm;
1281 0 : struct vm_area_struct *vma;
1282 0 : int err = -EFAULT;
1283 :
1284 0 : mmap_read_lock(mm);
1285 0 : vma = find_mergeable_vma(mm, rmap_item->address);
1286 0 : if (!vma)
1287 0 : goto out;
1288 :
1289 0 : err = try_to_merge_one_page(vma, page, kpage);
1290 0 : if (err)
1291 0 : goto out;
1292 :
1293 : /* Unstable nid is in union with stable anon_vma: remove first */
1294 0 : remove_rmap_item_from_tree(rmap_item);
1295 :
1296 : /* Must get reference to anon_vma while still holding mmap_lock */
1297 0 : rmap_item->anon_vma = vma->anon_vma;
1298 0 : get_anon_vma(vma->anon_vma);
1299 0 : out:
1300 0 : mmap_read_unlock(mm);
1301 0 : return err;
1302 : }
1303 :
1304 : /*
1305 : * try_to_merge_two_pages - take two identical pages and prepare them
1306 : * to be merged into one page.
1307 : *
1308 : * This function returns the kpage if we successfully merged two identical
1309 : * pages into one ksm page, NULL otherwise.
1310 : *
1311 : * Note that this function upgrades page to ksm page: if one of the pages
1312 : * is already a ksm page, try_to_merge_with_ksm_page should be used.
1313 : */
1314 0 : static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1315 : struct page *page,
1316 : struct rmap_item *tree_rmap_item,
1317 : struct page *tree_page)
1318 : {
1319 0 : int err;
1320 :
1321 0 : err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1322 0 : if (!err) {
1323 0 : err = try_to_merge_with_ksm_page(tree_rmap_item,
1324 : tree_page, page);
1325 : /*
1326 : * If that fails, we have a ksm page with only one pte
1327 : * pointing to it: so break it.
1328 : */
1329 0 : if (err)
1330 0 : break_cow(rmap_item);
1331 : }
1332 0 : return err ? NULL : page;
1333 : }
1334 :
1335 : static __always_inline
1336 0 : bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
1337 : {
1338 0 : VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1339 : /*
1340 : * Check that at least one mapping still exists, otherwise
1341 : * there's no much point to merge and share with this
1342 : * stable_node, as the underlying tree_page of the other
1343 : * sharer is going to be freed soon.
1344 : */
1345 0 : return stable_node->rmap_hlist_len &&
1346 0 : stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1347 : }
1348 :
1349 : static __always_inline
1350 0 : bool is_page_sharing_candidate(struct stable_node *stable_node)
1351 : {
1352 0 : return __is_page_sharing_candidate(stable_node, 0);
1353 : }
1354 :
1355 0 : static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1356 : struct stable_node **_stable_node,
1357 : struct rb_root *root,
1358 : bool prune_stale_stable_nodes)
1359 : {
1360 0 : struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1361 0 : struct hlist_node *hlist_safe;
1362 0 : struct page *_tree_page, *tree_page = NULL;
1363 0 : int nr = 0;
1364 0 : int found_rmap_hlist_len;
1365 :
1366 0 : if (!prune_stale_stable_nodes ||
1367 0 : time_before(jiffies, stable_node->chain_prune_time +
1368 : msecs_to_jiffies(
1369 : ksm_stable_node_chains_prune_millisecs)))
1370 : prune_stale_stable_nodes = false;
1371 : else
1372 0 : stable_node->chain_prune_time = jiffies;
1373 :
1374 0 : hlist_for_each_entry_safe(dup, hlist_safe,
1375 : &stable_node->hlist, hlist_dup) {
1376 0 : cond_resched();
1377 : /*
1378 : * We must walk all stable_node_dup to prune the stale
1379 : * stable nodes during lookup.
1380 : *
1381 : * get_ksm_page can drop the nodes from the
1382 : * stable_node->hlist if they point to freed pages
1383 : * (that's why we do a _safe walk). The "dup"
1384 : * stable_node parameter itself will be freed from
1385 : * under us if it returns NULL.
1386 : */
1387 0 : _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
1388 0 : if (!_tree_page)
1389 0 : continue;
1390 0 : nr += 1;
1391 0 : if (is_page_sharing_candidate(dup)) {
1392 0 : if (!found ||
1393 : dup->rmap_hlist_len > found_rmap_hlist_len) {
1394 0 : if (found)
1395 0 : put_page(tree_page);
1396 0 : found = dup;
1397 0 : found_rmap_hlist_len = found->rmap_hlist_len;
1398 0 : tree_page = _tree_page;
1399 :
1400 : /* skip put_page for found dup */
1401 0 : if (!prune_stale_stable_nodes)
1402 : break;
1403 0 : continue;
1404 : }
1405 : }
1406 0 : put_page(_tree_page);
1407 : }
1408 :
1409 0 : if (found) {
1410 : /*
1411 : * nr is counting all dups in the chain only if
1412 : * prune_stale_stable_nodes is true, otherwise we may
1413 : * break the loop at nr == 1 even if there are
1414 : * multiple entries.
1415 : */
1416 0 : if (prune_stale_stable_nodes && nr == 1) {
1417 : /*
1418 : * If there's not just one entry it would
1419 : * corrupt memory, better BUG_ON. In KSM
1420 : * context with no lock held it's not even
1421 : * fatal.
1422 : */
1423 0 : BUG_ON(stable_node->hlist.first->next);
1424 :
1425 : /*
1426 : * There's just one entry and it is below the
1427 : * deduplication limit so drop the chain.
1428 : */
1429 0 : rb_replace_node(&stable_node->node, &found->node,
1430 : root);
1431 0 : free_stable_node(stable_node);
1432 0 : ksm_stable_node_chains--;
1433 0 : ksm_stable_node_dups--;
1434 : /*
1435 : * NOTE: the caller depends on the stable_node
1436 : * to be equal to stable_node_dup if the chain
1437 : * was collapsed.
1438 : */
1439 0 : *_stable_node = found;
1440 : /*
1441 : * Just for robustneess as stable_node is
1442 : * otherwise left as a stable pointer, the
1443 : * compiler shall optimize it away at build
1444 : * time.
1445 : */
1446 0 : stable_node = NULL;
1447 0 : } else if (stable_node->hlist.first != &found->hlist_dup &&
1448 0 : __is_page_sharing_candidate(found, 1)) {
1449 : /*
1450 : * If the found stable_node dup can accept one
1451 : * more future merge (in addition to the one
1452 : * that is underway) and is not at the head of
1453 : * the chain, put it there so next search will
1454 : * be quicker in the !prune_stale_stable_nodes
1455 : * case.
1456 : *
1457 : * NOTE: it would be inaccurate to use nr > 1
1458 : * instead of checking the hlist.first pointer
1459 : * directly, because in the
1460 : * prune_stale_stable_nodes case "nr" isn't
1461 : * the position of the found dup in the chain,
1462 : * but the total number of dups in the chain.
1463 : */
1464 0 : hlist_del(&found->hlist_dup);
1465 0 : hlist_add_head(&found->hlist_dup,
1466 : &stable_node->hlist);
1467 : }
1468 : }
1469 :
1470 0 : *_stable_node_dup = found;
1471 0 : return tree_page;
1472 : }
1473 :
1474 0 : static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
1475 : struct rb_root *root)
1476 : {
1477 0 : if (!is_stable_node_chain(stable_node))
1478 : return stable_node;
1479 0 : if (hlist_empty(&stable_node->hlist)) {
1480 0 : free_stable_node_chain(stable_node, root);
1481 0 : return NULL;
1482 : }
1483 0 : return hlist_entry(stable_node->hlist.first,
1484 : typeof(*stable_node), hlist_dup);
1485 : }
1486 :
1487 : /*
1488 : * Like for get_ksm_page, this function can free the *_stable_node and
1489 : * *_stable_node_dup if the returned tree_page is NULL.
1490 : *
1491 : * It can also free and overwrite *_stable_node with the found
1492 : * stable_node_dup if the chain is collapsed (in which case
1493 : * *_stable_node will be equal to *_stable_node_dup like if the chain
1494 : * never existed). It's up to the caller to verify tree_page is not
1495 : * NULL before dereferencing *_stable_node or *_stable_node_dup.
1496 : *
1497 : * *_stable_node_dup is really a second output parameter of this
1498 : * function and will be overwritten in all cases, the caller doesn't
1499 : * need to initialize it.
1500 : */
1501 0 : static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
1502 : struct stable_node **_stable_node,
1503 : struct rb_root *root,
1504 : bool prune_stale_stable_nodes)
1505 : {
1506 0 : struct stable_node *stable_node = *_stable_node;
1507 0 : if (!is_stable_node_chain(stable_node)) {
1508 0 : if (is_page_sharing_candidate(stable_node)) {
1509 0 : *_stable_node_dup = stable_node;
1510 0 : return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
1511 : }
1512 : /*
1513 : * _stable_node_dup set to NULL means the stable_node
1514 : * reached the ksm_max_page_sharing limit.
1515 : */
1516 0 : *_stable_node_dup = NULL;
1517 0 : return NULL;
1518 : }
1519 0 : return stable_node_dup(_stable_node_dup, _stable_node, root,
1520 : prune_stale_stable_nodes);
1521 : }
1522 :
1523 0 : static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
1524 : struct stable_node **s_n,
1525 : struct rb_root *root)
1526 : {
1527 0 : return __stable_node_chain(s_n_d, s_n, root, true);
1528 : }
1529 :
1530 0 : static __always_inline struct page *chain(struct stable_node **s_n_d,
1531 : struct stable_node *s_n,
1532 : struct rb_root *root)
1533 : {
1534 0 : struct stable_node *old_stable_node = s_n;
1535 0 : struct page *tree_page;
1536 :
1537 0 : tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
1538 : /* not pruning dups so s_n cannot have changed */
1539 0 : VM_BUG_ON(s_n != old_stable_node);
1540 0 : return tree_page;
1541 : }
1542 :
1543 : /*
1544 : * stable_tree_search - search for page inside the stable tree
1545 : *
1546 : * This function checks if there is a page inside the stable tree
1547 : * with identical content to the page that we are scanning right now.
1548 : *
1549 : * This function returns the stable tree node of identical content if found,
1550 : * NULL otherwise.
1551 : */
1552 0 : static struct page *stable_tree_search(struct page *page)
1553 : {
1554 0 : int nid;
1555 0 : struct rb_root *root;
1556 0 : struct rb_node **new;
1557 0 : struct rb_node *parent;
1558 0 : struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1559 0 : struct stable_node *page_node;
1560 :
1561 0 : page_node = page_stable_node(page);
1562 0 : if (page_node && page_node->head != &migrate_nodes) {
1563 : /* ksm page forked */
1564 0 : get_page(page);
1565 0 : return page;
1566 : }
1567 :
1568 0 : nid = get_kpfn_nid(page_to_pfn(page));
1569 0 : root = root_stable_tree + nid;
1570 : again:
1571 0 : new = &root->rb_node;
1572 0 : parent = NULL;
1573 :
1574 0 : while (*new) {
1575 0 : struct page *tree_page;
1576 0 : int ret;
1577 :
1578 0 : cond_resched();
1579 0 : stable_node = rb_entry(*new, struct stable_node, node);
1580 0 : stable_node_any = NULL;
1581 0 : tree_page = chain_prune(&stable_node_dup, &stable_node, root);
1582 : /*
1583 : * NOTE: stable_node may have been freed by
1584 : * chain_prune() if the returned stable_node_dup is
1585 : * not NULL. stable_node_dup may have been inserted in
1586 : * the rbtree instead as a regular stable_node (in
1587 : * order to collapse the stable_node chain if a single
1588 : * stable_node dup was found in it). In such case the
1589 : * stable_node is overwritten by the calleee to point
1590 : * to the stable_node_dup that was collapsed in the
1591 : * stable rbtree and stable_node will be equal to
1592 : * stable_node_dup like if the chain never existed.
1593 : */
1594 0 : if (!stable_node_dup) {
1595 : /*
1596 : * Either all stable_node dups were full in
1597 : * this stable_node chain, or this chain was
1598 : * empty and should be rb_erased.
1599 : */
1600 0 : stable_node_any = stable_node_dup_any(stable_node,
1601 : root);
1602 0 : if (!stable_node_any) {
1603 : /* rb_erase just run */
1604 0 : goto again;
1605 : }
1606 : /*
1607 : * Take any of the stable_node dups page of
1608 : * this stable_node chain to let the tree walk
1609 : * continue. All KSM pages belonging to the
1610 : * stable_node dups in a stable_node chain
1611 : * have the same content and they're
1612 : * write protected at all times. Any will work
1613 : * fine to continue the walk.
1614 : */
1615 0 : tree_page = get_ksm_page(stable_node_any,
1616 : GET_KSM_PAGE_NOLOCK);
1617 : }
1618 0 : VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1619 0 : if (!tree_page) {
1620 : /*
1621 : * If we walked over a stale stable_node,
1622 : * get_ksm_page() will call rb_erase() and it
1623 : * may rebalance the tree from under us. So
1624 : * restart the search from scratch. Returning
1625 : * NULL would be safe too, but we'd generate
1626 : * false negative insertions just because some
1627 : * stable_node was stale.
1628 : */
1629 0 : goto again;
1630 : }
1631 :
1632 0 : ret = memcmp_pages(page, tree_page);
1633 0 : put_page(tree_page);
1634 :
1635 0 : parent = *new;
1636 0 : if (ret < 0)
1637 0 : new = &parent->rb_left;
1638 0 : else if (ret > 0)
1639 0 : new = &parent->rb_right;
1640 : else {
1641 0 : if (page_node) {
1642 0 : VM_BUG_ON(page_node->head != &migrate_nodes);
1643 : /*
1644 : * Test if the migrated page should be merged
1645 : * into a stable node dup. If the mapcount is
1646 : * 1 we can migrate it with another KSM page
1647 : * without adding it to the chain.
1648 : */
1649 0 : if (page_mapcount(page) > 1)
1650 0 : goto chain_append;
1651 : }
1652 :
1653 0 : if (!stable_node_dup) {
1654 : /*
1655 : * If the stable_node is a chain and
1656 : * we got a payload match in memcmp
1657 : * but we cannot merge the scanned
1658 : * page in any of the existing
1659 : * stable_node dups because they're
1660 : * all full, we need to wait the
1661 : * scanned page to find itself a match
1662 : * in the unstable tree to create a
1663 : * brand new KSM page to add later to
1664 : * the dups of this stable_node.
1665 : */
1666 : return NULL;
1667 : }
1668 :
1669 : /*
1670 : * Lock and unlock the stable_node's page (which
1671 : * might already have been migrated) so that page
1672 : * migration is sure to notice its raised count.
1673 : * It would be more elegant to return stable_node
1674 : * than kpage, but that involves more changes.
1675 : */
1676 0 : tree_page = get_ksm_page(stable_node_dup,
1677 : GET_KSM_PAGE_TRYLOCK);
1678 :
1679 0 : if (PTR_ERR(tree_page) == -EBUSY)
1680 0 : return ERR_PTR(-EBUSY);
1681 :
1682 0 : if (unlikely(!tree_page))
1683 : /*
1684 : * The tree may have been rebalanced,
1685 : * so re-evaluate parent and new.
1686 : */
1687 0 : goto again;
1688 0 : unlock_page(tree_page);
1689 :
1690 0 : if (get_kpfn_nid(stable_node_dup->kpfn) !=
1691 0 : NUMA(stable_node_dup->nid)) {
1692 0 : put_page(tree_page);
1693 0 : goto replace;
1694 : }
1695 : return tree_page;
1696 : }
1697 : }
1698 :
1699 0 : if (!page_node)
1700 : return NULL;
1701 :
1702 0 : list_del(&page_node->list);
1703 0 : DO_NUMA(page_node->nid = nid);
1704 0 : rb_link_node(&page_node->node, parent, new);
1705 0 : rb_insert_color(&page_node->node, root);
1706 0 : out:
1707 0 : if (is_page_sharing_candidate(page_node)) {
1708 0 : get_page(page);
1709 0 : return page;
1710 : } else
1711 : return NULL;
1712 :
1713 0 : replace:
1714 : /*
1715 : * If stable_node was a chain and chain_prune collapsed it,
1716 : * stable_node has been updated to be the new regular
1717 : * stable_node. A collapse of the chain is indistinguishable
1718 : * from the case there was no chain in the stable
1719 : * rbtree. Otherwise stable_node is the chain and
1720 : * stable_node_dup is the dup to replace.
1721 : */
1722 0 : if (stable_node_dup == stable_node) {
1723 0 : VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1724 0 : VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1725 : /* there is no chain */
1726 0 : if (page_node) {
1727 0 : VM_BUG_ON(page_node->head != &migrate_nodes);
1728 0 : list_del(&page_node->list);
1729 0 : DO_NUMA(page_node->nid = nid);
1730 0 : rb_replace_node(&stable_node_dup->node,
1731 : &page_node->node,
1732 : root);
1733 0 : if (is_page_sharing_candidate(page_node))
1734 0 : get_page(page);
1735 : else
1736 : page = NULL;
1737 : } else {
1738 0 : rb_erase(&stable_node_dup->node, root);
1739 0 : page = NULL;
1740 : }
1741 : } else {
1742 0 : VM_BUG_ON(!is_stable_node_chain(stable_node));
1743 0 : __stable_node_dup_del(stable_node_dup);
1744 0 : if (page_node) {
1745 0 : VM_BUG_ON(page_node->head != &migrate_nodes);
1746 0 : list_del(&page_node->list);
1747 0 : DO_NUMA(page_node->nid = nid);
1748 0 : stable_node_chain_add_dup(page_node, stable_node);
1749 0 : if (is_page_sharing_candidate(page_node))
1750 0 : get_page(page);
1751 : else
1752 : page = NULL;
1753 : } else {
1754 : page = NULL;
1755 : }
1756 : }
1757 0 : stable_node_dup->head = &migrate_nodes;
1758 0 : list_add(&stable_node_dup->list, stable_node_dup->head);
1759 0 : return page;
1760 :
1761 0 : chain_append:
1762 : /* stable_node_dup could be null if it reached the limit */
1763 0 : if (!stable_node_dup)
1764 0 : stable_node_dup = stable_node_any;
1765 : /*
1766 : * If stable_node was a chain and chain_prune collapsed it,
1767 : * stable_node has been updated to be the new regular
1768 : * stable_node. A collapse of the chain is indistinguishable
1769 : * from the case there was no chain in the stable
1770 : * rbtree. Otherwise stable_node is the chain and
1771 : * stable_node_dup is the dup to replace.
1772 : */
1773 0 : if (stable_node_dup == stable_node) {
1774 0 : VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1775 0 : VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1776 : /* chain is missing so create it */
1777 0 : stable_node = alloc_stable_node_chain(stable_node_dup,
1778 : root);
1779 0 : if (!stable_node)
1780 : return NULL;
1781 : }
1782 : /*
1783 : * Add this stable_node dup that was
1784 : * migrated to the stable_node chain
1785 : * of the current nid for this page
1786 : * content.
1787 : */
1788 0 : VM_BUG_ON(!is_stable_node_chain(stable_node));
1789 0 : VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
1790 0 : VM_BUG_ON(page_node->head != &migrate_nodes);
1791 0 : list_del(&page_node->list);
1792 0 : DO_NUMA(page_node->nid = nid);
1793 0 : stable_node_chain_add_dup(page_node, stable_node);
1794 0 : goto out;
1795 : }
1796 :
1797 : /*
1798 : * stable_tree_insert - insert stable tree node pointing to new ksm page
1799 : * into the stable tree.
1800 : *
1801 : * This function returns the stable tree node just allocated on success,
1802 : * NULL otherwise.
1803 : */
1804 0 : static struct stable_node *stable_tree_insert(struct page *kpage)
1805 : {
1806 0 : int nid;
1807 0 : unsigned long kpfn;
1808 0 : struct rb_root *root;
1809 0 : struct rb_node **new;
1810 0 : struct rb_node *parent;
1811 0 : struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1812 0 : bool need_chain = false;
1813 :
1814 0 : kpfn = page_to_pfn(kpage);
1815 0 : nid = get_kpfn_nid(kpfn);
1816 0 : root = root_stable_tree + nid;
1817 : again:
1818 0 : parent = NULL;
1819 0 : new = &root->rb_node;
1820 :
1821 0 : while (*new) {
1822 0 : struct page *tree_page;
1823 0 : int ret;
1824 :
1825 0 : cond_resched();
1826 0 : stable_node = rb_entry(*new, struct stable_node, node);
1827 0 : stable_node_any = NULL;
1828 0 : tree_page = chain(&stable_node_dup, stable_node, root);
1829 0 : if (!stable_node_dup) {
1830 : /*
1831 : * Either all stable_node dups were full in
1832 : * this stable_node chain, or this chain was
1833 : * empty and should be rb_erased.
1834 : */
1835 0 : stable_node_any = stable_node_dup_any(stable_node,
1836 : root);
1837 0 : if (!stable_node_any) {
1838 : /* rb_erase just run */
1839 0 : goto again;
1840 : }
1841 : /*
1842 : * Take any of the stable_node dups page of
1843 : * this stable_node chain to let the tree walk
1844 : * continue. All KSM pages belonging to the
1845 : * stable_node dups in a stable_node chain
1846 : * have the same content and they're
1847 : * write protected at all times. Any will work
1848 : * fine to continue the walk.
1849 : */
1850 0 : tree_page = get_ksm_page(stable_node_any,
1851 : GET_KSM_PAGE_NOLOCK);
1852 : }
1853 0 : VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1854 0 : if (!tree_page) {
1855 : /*
1856 : * If we walked over a stale stable_node,
1857 : * get_ksm_page() will call rb_erase() and it
1858 : * may rebalance the tree from under us. So
1859 : * restart the search from scratch. Returning
1860 : * NULL would be safe too, but we'd generate
1861 : * false negative insertions just because some
1862 : * stable_node was stale.
1863 : */
1864 0 : goto again;
1865 : }
1866 :
1867 0 : ret = memcmp_pages(kpage, tree_page);
1868 0 : put_page(tree_page);
1869 :
1870 0 : parent = *new;
1871 0 : if (ret < 0)
1872 0 : new = &parent->rb_left;
1873 0 : else if (ret > 0)
1874 0 : new = &parent->rb_right;
1875 : else {
1876 : need_chain = true;
1877 : break;
1878 : }
1879 : }
1880 :
1881 0 : stable_node_dup = alloc_stable_node();
1882 0 : if (!stable_node_dup)
1883 : return NULL;
1884 :
1885 0 : INIT_HLIST_HEAD(&stable_node_dup->hlist);
1886 0 : stable_node_dup->kpfn = kpfn;
1887 0 : set_page_stable_node(kpage, stable_node_dup);
1888 0 : stable_node_dup->rmap_hlist_len = 0;
1889 0 : DO_NUMA(stable_node_dup->nid = nid);
1890 0 : if (!need_chain) {
1891 0 : rb_link_node(&stable_node_dup->node, parent, new);
1892 0 : rb_insert_color(&stable_node_dup->node, root);
1893 : } else {
1894 0 : if (!is_stable_node_chain(stable_node)) {
1895 0 : struct stable_node *orig = stable_node;
1896 : /* chain is missing so create it */
1897 0 : stable_node = alloc_stable_node_chain(orig, root);
1898 0 : if (!stable_node) {
1899 0 : free_stable_node(stable_node_dup);
1900 0 : return NULL;
1901 : }
1902 : }
1903 0 : stable_node_chain_add_dup(stable_node_dup, stable_node);
1904 : }
1905 :
1906 0 : return stable_node_dup;
1907 : }
1908 :
1909 : /*
1910 : * unstable_tree_search_insert - search for identical page,
1911 : * else insert rmap_item into the unstable tree.
1912 : *
1913 : * This function searches for a page in the unstable tree identical to the
1914 : * page currently being scanned; and if no identical page is found in the
1915 : * tree, we insert rmap_item as a new object into the unstable tree.
1916 : *
1917 : * This function returns pointer to rmap_item found to be identical
1918 : * to the currently scanned page, NULL otherwise.
1919 : *
1920 : * This function does both searching and inserting, because they share
1921 : * the same walking algorithm in an rbtree.
1922 : */
1923 : static
1924 0 : struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1925 : struct page *page,
1926 : struct page **tree_pagep)
1927 : {
1928 0 : struct rb_node **new;
1929 0 : struct rb_root *root;
1930 0 : struct rb_node *parent = NULL;
1931 0 : int nid;
1932 :
1933 0 : nid = get_kpfn_nid(page_to_pfn(page));
1934 0 : root = root_unstable_tree + nid;
1935 0 : new = &root->rb_node;
1936 :
1937 0 : while (*new) {
1938 0 : struct rmap_item *tree_rmap_item;
1939 0 : struct page *tree_page;
1940 0 : int ret;
1941 :
1942 0 : cond_resched();
1943 0 : tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1944 0 : tree_page = get_mergeable_page(tree_rmap_item);
1945 0 : if (!tree_page)
1946 : return NULL;
1947 :
1948 : /*
1949 : * Don't substitute a ksm page for a forked page.
1950 : */
1951 0 : if (page == tree_page) {
1952 0 : put_page(tree_page);
1953 0 : return NULL;
1954 : }
1955 :
1956 0 : ret = memcmp_pages(page, tree_page);
1957 :
1958 0 : parent = *new;
1959 0 : if (ret < 0) {
1960 0 : put_page(tree_page);
1961 0 : new = &parent->rb_left;
1962 0 : } else if (ret > 0) {
1963 0 : put_page(tree_page);
1964 0 : new = &parent->rb_right;
1965 0 : } else if (!ksm_merge_across_nodes &&
1966 0 : page_to_nid(tree_page) != nid) {
1967 : /*
1968 : * If tree_page has been migrated to another NUMA node,
1969 : * it will be flushed out and put in the right unstable
1970 : * tree next time: only merge with it when across_nodes.
1971 : */
1972 0 : put_page(tree_page);
1973 0 : return NULL;
1974 : } else {
1975 0 : *tree_pagep = tree_page;
1976 0 : return tree_rmap_item;
1977 : }
1978 : }
1979 :
1980 0 : rmap_item->address |= UNSTABLE_FLAG;
1981 0 : rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1982 0 : DO_NUMA(rmap_item->nid = nid);
1983 0 : rb_link_node(&rmap_item->node, parent, new);
1984 0 : rb_insert_color(&rmap_item->node, root);
1985 :
1986 0 : ksm_pages_unshared++;
1987 0 : return NULL;
1988 : }
1989 :
1990 : /*
1991 : * stable_tree_append - add another rmap_item to the linked list of
1992 : * rmap_items hanging off a given node of the stable tree, all sharing
1993 : * the same ksm page.
1994 : */
1995 0 : static void stable_tree_append(struct rmap_item *rmap_item,
1996 : struct stable_node *stable_node,
1997 : bool max_page_sharing_bypass)
1998 : {
1999 : /*
2000 : * rmap won't find this mapping if we don't insert the
2001 : * rmap_item in the right stable_node
2002 : * duplicate. page_migration could break later if rmap breaks,
2003 : * so we can as well crash here. We really need to check for
2004 : * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2005 : * for other negative values as an underflow if detected here
2006 : * for the first time (and not when decreasing rmap_hlist_len)
2007 : * would be sign of memory corruption in the stable_node.
2008 : */
2009 0 : BUG_ON(stable_node->rmap_hlist_len < 0);
2010 :
2011 0 : stable_node->rmap_hlist_len++;
2012 0 : if (!max_page_sharing_bypass)
2013 : /* possibly non fatal but unexpected overflow, only warn */
2014 0 : WARN_ON_ONCE(stable_node->rmap_hlist_len >
2015 : ksm_max_page_sharing);
2016 :
2017 0 : rmap_item->head = stable_node;
2018 0 : rmap_item->address |= STABLE_FLAG;
2019 0 : hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2020 :
2021 0 : if (rmap_item->hlist.next)
2022 0 : ksm_pages_sharing++;
2023 : else
2024 0 : ksm_pages_shared++;
2025 0 : }
2026 :
2027 : /*
2028 : * cmp_and_merge_page - first see if page can be merged into the stable tree;
2029 : * if not, compare checksum to previous and if it's the same, see if page can
2030 : * be inserted into the unstable tree, or merged with a page already there and
2031 : * both transferred to the stable tree.
2032 : *
2033 : * @page: the page that we are searching identical page to.
2034 : * @rmap_item: the reverse mapping into the virtual address of this page
2035 : */
2036 0 : static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2037 : {
2038 0 : struct mm_struct *mm = rmap_item->mm;
2039 0 : struct rmap_item *tree_rmap_item;
2040 0 : struct page *tree_page = NULL;
2041 0 : struct stable_node *stable_node;
2042 0 : struct page *kpage;
2043 0 : unsigned int checksum;
2044 0 : int err;
2045 0 : bool max_page_sharing_bypass = false;
2046 :
2047 0 : stable_node = page_stable_node(page);
2048 0 : if (stable_node) {
2049 0 : if (stable_node->head != &migrate_nodes &&
2050 0 : get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2051 0 : NUMA(stable_node->nid)) {
2052 0 : stable_node_dup_del(stable_node);
2053 0 : stable_node->head = &migrate_nodes;
2054 0 : list_add(&stable_node->list, stable_node->head);
2055 : }
2056 0 : if (stable_node->head != &migrate_nodes &&
2057 0 : rmap_item->head == stable_node)
2058 0 : return;
2059 : /*
2060 : * If it's a KSM fork, allow it to go over the sharing limit
2061 : * without warnings.
2062 : */
2063 0 : if (!is_page_sharing_candidate(stable_node))
2064 0 : max_page_sharing_bypass = true;
2065 : }
2066 :
2067 : /* We first start with searching the page inside the stable tree */
2068 0 : kpage = stable_tree_search(page);
2069 0 : if (kpage == page && rmap_item->head == stable_node) {
2070 0 : put_page(kpage);
2071 0 : return;
2072 : }
2073 :
2074 0 : remove_rmap_item_from_tree(rmap_item);
2075 :
2076 0 : if (kpage) {
2077 0 : if (PTR_ERR(kpage) == -EBUSY)
2078 : return;
2079 :
2080 0 : err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2081 0 : if (!err) {
2082 : /*
2083 : * The page was successfully merged:
2084 : * add its rmap_item to the stable tree.
2085 : */
2086 0 : lock_page(kpage);
2087 0 : stable_tree_append(rmap_item, page_stable_node(kpage),
2088 : max_page_sharing_bypass);
2089 0 : unlock_page(kpage);
2090 : }
2091 0 : put_page(kpage);
2092 0 : return;
2093 : }
2094 :
2095 : /*
2096 : * If the hash value of the page has changed from the last time
2097 : * we calculated it, this page is changing frequently: therefore we
2098 : * don't want to insert it in the unstable tree, and we don't want
2099 : * to waste our time searching for something identical to it there.
2100 : */
2101 0 : checksum = calc_checksum(page);
2102 0 : if (rmap_item->oldchecksum != checksum) {
2103 0 : rmap_item->oldchecksum = checksum;
2104 0 : return;
2105 : }
2106 :
2107 : /*
2108 : * Same checksum as an empty page. We attempt to merge it with the
2109 : * appropriate zero page if the user enabled this via sysfs.
2110 : */
2111 0 : if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2112 0 : struct vm_area_struct *vma;
2113 :
2114 0 : mmap_read_lock(mm);
2115 0 : vma = find_mergeable_vma(mm, rmap_item->address);
2116 0 : if (vma) {
2117 0 : err = try_to_merge_one_page(vma, page,
2118 0 : ZERO_PAGE(rmap_item->address));
2119 : } else {
2120 : /*
2121 : * If the vma is out of date, we do not need to
2122 : * continue.
2123 : */
2124 : err = 0;
2125 : }
2126 0 : mmap_read_unlock(mm);
2127 : /*
2128 : * In case of failure, the page was not really empty, so we
2129 : * need to continue. Otherwise we're done.
2130 : */
2131 0 : if (!err)
2132 : return;
2133 : }
2134 0 : tree_rmap_item =
2135 0 : unstable_tree_search_insert(rmap_item, page, &tree_page);
2136 0 : if (tree_rmap_item) {
2137 0 : bool split;
2138 :
2139 0 : kpage = try_to_merge_two_pages(rmap_item, page,
2140 : tree_rmap_item, tree_page);
2141 : /*
2142 : * If both pages we tried to merge belong to the same compound
2143 : * page, then we actually ended up increasing the reference
2144 : * count of the same compound page twice, and split_huge_page
2145 : * failed.
2146 : * Here we set a flag if that happened, and we use it later to
2147 : * try split_huge_page again. Since we call put_page right
2148 : * afterwards, the reference count will be correct and
2149 : * split_huge_page should succeed.
2150 : */
2151 0 : split = PageTransCompound(page)
2152 0 : && compound_head(page) == compound_head(tree_page);
2153 0 : put_page(tree_page);
2154 0 : if (kpage) {
2155 : /*
2156 : * The pages were successfully merged: insert new
2157 : * node in the stable tree and add both rmap_items.
2158 : */
2159 0 : lock_page(kpage);
2160 0 : stable_node = stable_tree_insert(kpage);
2161 0 : if (stable_node) {
2162 0 : stable_tree_append(tree_rmap_item, stable_node,
2163 : false);
2164 0 : stable_tree_append(rmap_item, stable_node,
2165 : false);
2166 : }
2167 0 : unlock_page(kpage);
2168 :
2169 : /*
2170 : * If we fail to insert the page into the stable tree,
2171 : * we will have 2 virtual addresses that are pointing
2172 : * to a ksm page left outside the stable tree,
2173 : * in which case we need to break_cow on both.
2174 : */
2175 0 : if (!stable_node) {
2176 0 : break_cow(tree_rmap_item);
2177 0 : break_cow(rmap_item);
2178 : }
2179 0 : } else if (split) {
2180 : /*
2181 : * We are here if we tried to merge two pages and
2182 : * failed because they both belonged to the same
2183 : * compound page. We will split the page now, but no
2184 : * merging will take place.
2185 : * We do not want to add the cost of a full lock; if
2186 : * the page is locked, it is better to skip it and
2187 : * perhaps try again later.
2188 : */
2189 0 : if (!trylock_page(page))
2190 : return;
2191 0 : split_huge_page(page);
2192 0 : unlock_page(page);
2193 : }
2194 : }
2195 : }
2196 :
2197 0 : static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
2198 : struct rmap_item **rmap_list,
2199 : unsigned long addr)
2200 : {
2201 0 : struct rmap_item *rmap_item;
2202 :
2203 0 : while (*rmap_list) {
2204 0 : rmap_item = *rmap_list;
2205 0 : if ((rmap_item->address & PAGE_MASK) == addr)
2206 0 : return rmap_item;
2207 0 : if (rmap_item->address > addr)
2208 : break;
2209 0 : *rmap_list = rmap_item->rmap_list;
2210 0 : remove_rmap_item_from_tree(rmap_item);
2211 0 : free_rmap_item(rmap_item);
2212 : }
2213 :
2214 0 : rmap_item = alloc_rmap_item();
2215 0 : if (rmap_item) {
2216 : /* It has already been zeroed */
2217 0 : rmap_item->mm = mm_slot->mm;
2218 0 : rmap_item->address = addr;
2219 0 : rmap_item->rmap_list = *rmap_list;
2220 0 : *rmap_list = rmap_item;
2221 : }
2222 : return rmap_item;
2223 : }
2224 :
2225 0 : static struct rmap_item *scan_get_next_rmap_item(struct page **page)
2226 : {
2227 0 : struct mm_struct *mm;
2228 0 : struct mm_slot *slot;
2229 0 : struct vm_area_struct *vma;
2230 0 : struct rmap_item *rmap_item;
2231 0 : int nid;
2232 :
2233 0 : if (list_empty(&ksm_mm_head.mm_list))
2234 : return NULL;
2235 :
2236 0 : slot = ksm_scan.mm_slot;
2237 0 : if (slot == &ksm_mm_head) {
2238 : /*
2239 : * A number of pages can hang around indefinitely on per-cpu
2240 : * pagevecs, raised page count preventing write_protect_page
2241 : * from merging them. Though it doesn't really matter much,
2242 : * it is puzzling to see some stuck in pages_volatile until
2243 : * other activity jostles them out, and they also prevented
2244 : * LTP's KSM test from succeeding deterministically; so drain
2245 : * them here (here rather than on entry to ksm_do_scan(),
2246 : * so we don't IPI too often when pages_to_scan is set low).
2247 : */
2248 0 : lru_add_drain_all();
2249 :
2250 : /*
2251 : * Whereas stale stable_nodes on the stable_tree itself
2252 : * get pruned in the regular course of stable_tree_search(),
2253 : * those moved out to the migrate_nodes list can accumulate:
2254 : * so prune them once before each full scan.
2255 : */
2256 0 : if (!ksm_merge_across_nodes) {
2257 0 : struct stable_node *stable_node, *next;
2258 0 : struct page *page;
2259 :
2260 0 : list_for_each_entry_safe(stable_node, next,
2261 : &migrate_nodes, list) {
2262 0 : page = get_ksm_page(stable_node,
2263 : GET_KSM_PAGE_NOLOCK);
2264 0 : if (page)
2265 0 : put_page(page);
2266 0 : cond_resched();
2267 : }
2268 : }
2269 :
2270 0 : for (nid = 0; nid < ksm_nr_node_ids; nid++)
2271 0 : root_unstable_tree[nid] = RB_ROOT;
2272 :
2273 0 : spin_lock(&ksm_mmlist_lock);
2274 0 : slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
2275 0 : ksm_scan.mm_slot = slot;
2276 0 : spin_unlock(&ksm_mmlist_lock);
2277 : /*
2278 : * Although we tested list_empty() above, a racing __ksm_exit
2279 : * of the last mm on the list may have removed it since then.
2280 : */
2281 0 : if (slot == &ksm_mm_head)
2282 : return NULL;
2283 0 : next_mm:
2284 0 : ksm_scan.address = 0;
2285 0 : ksm_scan.rmap_list = &slot->rmap_list;
2286 : }
2287 :
2288 0 : mm = slot->mm;
2289 0 : mmap_read_lock(mm);
2290 0 : if (ksm_test_exit(mm))
2291 : vma = NULL;
2292 : else
2293 0 : vma = find_vma(mm, ksm_scan.address);
2294 :
2295 0 : for (; vma; vma = vma->vm_next) {
2296 0 : if (!(vma->vm_flags & VM_MERGEABLE))
2297 0 : continue;
2298 0 : if (ksm_scan.address < vma->vm_start)
2299 0 : ksm_scan.address = vma->vm_start;
2300 0 : if (!vma->anon_vma)
2301 0 : ksm_scan.address = vma->vm_end;
2302 :
2303 0 : while (ksm_scan.address < vma->vm_end) {
2304 0 : if (ksm_test_exit(mm))
2305 : break;
2306 0 : *page = follow_page(vma, ksm_scan.address, FOLL_GET);
2307 0 : if (IS_ERR_OR_NULL(*page)) {
2308 0 : ksm_scan.address += PAGE_SIZE;
2309 0 : cond_resched();
2310 0 : continue;
2311 : }
2312 0 : if (PageAnon(*page)) {
2313 0 : flush_anon_page(vma, *page, ksm_scan.address);
2314 0 : flush_dcache_page(*page);
2315 0 : rmap_item = get_next_rmap_item(slot,
2316 : ksm_scan.rmap_list, ksm_scan.address);
2317 0 : if (rmap_item) {
2318 0 : ksm_scan.rmap_list =
2319 0 : &rmap_item->rmap_list;
2320 0 : ksm_scan.address += PAGE_SIZE;
2321 : } else
2322 0 : put_page(*page);
2323 0 : mmap_read_unlock(mm);
2324 0 : return rmap_item;
2325 : }
2326 0 : put_page(*page);
2327 0 : ksm_scan.address += PAGE_SIZE;
2328 0 : cond_resched();
2329 : }
2330 : }
2331 :
2332 0 : if (ksm_test_exit(mm)) {
2333 0 : ksm_scan.address = 0;
2334 0 : ksm_scan.rmap_list = &slot->rmap_list;
2335 : }
2336 : /*
2337 : * Nuke all the rmap_items that are above this current rmap:
2338 : * because there were no VM_MERGEABLE vmas with such addresses.
2339 : */
2340 0 : remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
2341 :
2342 0 : spin_lock(&ksm_mmlist_lock);
2343 0 : ksm_scan.mm_slot = list_entry(slot->mm_list.next,
2344 : struct mm_slot, mm_list);
2345 0 : if (ksm_scan.address == 0) {
2346 : /*
2347 : * We've completed a full scan of all vmas, holding mmap_lock
2348 : * throughout, and found no VM_MERGEABLE: so do the same as
2349 : * __ksm_exit does to remove this mm from all our lists now.
2350 : * This applies either when cleaning up after __ksm_exit
2351 : * (but beware: we can reach here even before __ksm_exit),
2352 : * or when all VM_MERGEABLE areas have been unmapped (and
2353 : * mmap_lock then protects against race with MADV_MERGEABLE).
2354 : */
2355 0 : hash_del(&slot->link);
2356 0 : list_del(&slot->mm_list);
2357 0 : spin_unlock(&ksm_mmlist_lock);
2358 :
2359 0 : free_mm_slot(slot);
2360 0 : clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2361 0 : mmap_read_unlock(mm);
2362 0 : mmdrop(mm);
2363 : } else {
2364 0 : mmap_read_unlock(mm);
2365 : /*
2366 : * mmap_read_unlock(mm) first because after
2367 : * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2368 : * already have been freed under us by __ksm_exit()
2369 : * because the "mm_slot" is still hashed and
2370 : * ksm_scan.mm_slot doesn't point to it anymore.
2371 : */
2372 0 : spin_unlock(&ksm_mmlist_lock);
2373 : }
2374 :
2375 : /* Repeat until we've completed scanning the whole list */
2376 0 : slot = ksm_scan.mm_slot;
2377 0 : if (slot != &ksm_mm_head)
2378 0 : goto next_mm;
2379 :
2380 0 : ksm_scan.seqnr++;
2381 0 : return NULL;
2382 : }
2383 :
2384 : /**
2385 : * ksm_do_scan - the ksm scanner main worker function.
2386 : * @scan_npages: number of pages we want to scan before we return.
2387 : */
2388 0 : static void ksm_do_scan(unsigned int scan_npages)
2389 : {
2390 0 : struct rmap_item *rmap_item;
2391 0 : struct page *page;
2392 :
2393 0 : while (scan_npages-- && likely(!freezing(current))) {
2394 0 : cond_resched();
2395 0 : rmap_item = scan_get_next_rmap_item(&page);
2396 0 : if (!rmap_item)
2397 0 : return;
2398 0 : cmp_and_merge_page(page, rmap_item);
2399 0 : put_page(page);
2400 : }
2401 : }
2402 :
2403 4 : static int ksmd_should_run(void)
2404 : {
2405 0 : return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
2406 : }
2407 :
2408 1 : static int ksm_scan_thread(void *nothing)
2409 : {
2410 1 : unsigned int sleep_ms;
2411 :
2412 1 : set_freezable();
2413 1 : set_user_nice(current, 5);
2414 :
2415 1 : while (!kthread_should_stop()) {
2416 1 : mutex_lock(&ksm_thread_mutex);
2417 1 : wait_while_offlining();
2418 1 : if (ksmd_should_run())
2419 0 : ksm_do_scan(ksm_thread_pages_to_scan);
2420 1 : mutex_unlock(&ksm_thread_mutex);
2421 :
2422 1 : try_to_freeze();
2423 :
2424 1 : if (ksmd_should_run()) {
2425 0 : sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
2426 0 : wait_event_interruptible_timeout(ksm_iter_wait,
2427 : sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2428 : msecs_to_jiffies(sleep_ms));
2429 : } else {
2430 1 : wait_event_freezable(ksm_thread_wait,
2431 : ksmd_should_run() || kthread_should_stop());
2432 : }
2433 : }
2434 0 : return 0;
2435 : }
2436 :
2437 0 : int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2438 : unsigned long end, int advice, unsigned long *vm_flags)
2439 : {
2440 0 : struct mm_struct *mm = vma->vm_mm;
2441 0 : int err;
2442 :
2443 0 : switch (advice) {
2444 0 : case MADV_MERGEABLE:
2445 : /*
2446 : * Be somewhat over-protective for now!
2447 : */
2448 0 : if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
2449 : VM_PFNMAP | VM_IO | VM_DONTEXPAND |
2450 : VM_HUGETLB | VM_MIXEDMAP))
2451 : return 0; /* just ignore the advice */
2452 :
2453 0 : if (vma_is_dax(vma))
2454 : return 0;
2455 :
2456 : #ifdef VM_SAO
2457 : if (*vm_flags & VM_SAO)
2458 : return 0;
2459 : #endif
2460 : #ifdef VM_SPARC_ADI
2461 : if (*vm_flags & VM_SPARC_ADI)
2462 : return 0;
2463 : #endif
2464 :
2465 0 : if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2466 0 : err = __ksm_enter(mm);
2467 0 : if (err)
2468 : return err;
2469 : }
2470 :
2471 0 : *vm_flags |= VM_MERGEABLE;
2472 0 : break;
2473 :
2474 0 : case MADV_UNMERGEABLE:
2475 0 : if (!(*vm_flags & VM_MERGEABLE))
2476 : return 0; /* just ignore the advice */
2477 :
2478 0 : if (vma->anon_vma) {
2479 0 : err = unmerge_ksm_pages(vma, start, end);
2480 0 : if (err)
2481 : return err;
2482 : }
2483 :
2484 0 : *vm_flags &= ~VM_MERGEABLE;
2485 0 : break;
2486 : }
2487 :
2488 : return 0;
2489 : }
2490 : EXPORT_SYMBOL_GPL(ksm_madvise);
2491 :
2492 0 : int __ksm_enter(struct mm_struct *mm)
2493 : {
2494 0 : struct mm_slot *mm_slot;
2495 0 : int needs_wakeup;
2496 :
2497 0 : mm_slot = alloc_mm_slot();
2498 0 : if (!mm_slot)
2499 : return -ENOMEM;
2500 :
2501 : /* Check ksm_run too? Would need tighter locking */
2502 0 : needs_wakeup = list_empty(&ksm_mm_head.mm_list);
2503 :
2504 0 : spin_lock(&ksm_mmlist_lock);
2505 0 : insert_to_mm_slots_hash(mm, mm_slot);
2506 : /*
2507 : * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2508 : * insert just behind the scanning cursor, to let the area settle
2509 : * down a little; when fork is followed by immediate exec, we don't
2510 : * want ksmd to waste time setting up and tearing down an rmap_list.
2511 : *
2512 : * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2513 : * scanning cursor, otherwise KSM pages in newly forked mms will be
2514 : * missed: then we might as well insert at the end of the list.
2515 : */
2516 0 : if (ksm_run & KSM_RUN_UNMERGE)
2517 0 : list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
2518 : else
2519 0 : list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
2520 0 : spin_unlock(&ksm_mmlist_lock);
2521 :
2522 0 : set_bit(MMF_VM_MERGEABLE, &mm->flags);
2523 0 : mmgrab(mm);
2524 :
2525 0 : if (needs_wakeup)
2526 0 : wake_up_interruptible(&ksm_thread_wait);
2527 :
2528 : return 0;
2529 : }
2530 :
2531 0 : void __ksm_exit(struct mm_struct *mm)
2532 : {
2533 0 : struct mm_slot *mm_slot;
2534 0 : int easy_to_free = 0;
2535 :
2536 : /*
2537 : * This process is exiting: if it's straightforward (as is the
2538 : * case when ksmd was never running), free mm_slot immediately.
2539 : * But if it's at the cursor or has rmap_items linked to it, use
2540 : * mmap_lock to synchronize with any break_cows before pagetables
2541 : * are freed, and leave the mm_slot on the list for ksmd to free.
2542 : * Beware: ksm may already have noticed it exiting and freed the slot.
2543 : */
2544 :
2545 0 : spin_lock(&ksm_mmlist_lock);
2546 0 : mm_slot = get_mm_slot(mm);
2547 0 : if (mm_slot && ksm_scan.mm_slot != mm_slot) {
2548 0 : if (!mm_slot->rmap_list) {
2549 0 : hash_del(&mm_slot->link);
2550 0 : list_del(&mm_slot->mm_list);
2551 0 : easy_to_free = 1;
2552 : } else {
2553 0 : list_move(&mm_slot->mm_list,
2554 : &ksm_scan.mm_slot->mm_list);
2555 : }
2556 : }
2557 0 : spin_unlock(&ksm_mmlist_lock);
2558 :
2559 0 : if (easy_to_free) {
2560 0 : free_mm_slot(mm_slot);
2561 0 : clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2562 0 : mmdrop(mm);
2563 0 : } else if (mm_slot) {
2564 0 : mmap_write_lock(mm);
2565 0 : mmap_write_unlock(mm);
2566 : }
2567 0 : }
2568 :
2569 0 : struct page *ksm_might_need_to_copy(struct page *page,
2570 : struct vm_area_struct *vma, unsigned long address)
2571 : {
2572 0 : struct anon_vma *anon_vma = page_anon_vma(page);
2573 0 : struct page *new_page;
2574 :
2575 0 : if (PageKsm(page)) {
2576 0 : if (page_stable_node(page) &&
2577 0 : !(ksm_run & KSM_RUN_UNMERGE))
2578 : return page; /* no need to copy it */
2579 0 : } else if (!anon_vma) {
2580 : return page; /* no need to copy it */
2581 0 : } else if (anon_vma->root == vma->anon_vma->root &&
2582 0 : page->index == linear_page_index(vma, address)) {
2583 : return page; /* still no need to copy it */
2584 : }
2585 0 : if (!PageUptodate(page))
2586 : return page; /* let do_swap_page report the error */
2587 :
2588 0 : new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2589 0 : if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
2590 : put_page(new_page);
2591 : new_page = NULL;
2592 : }
2593 0 : if (new_page) {
2594 0 : copy_user_highpage(new_page, page, address, vma);
2595 :
2596 0 : SetPageDirty(new_page);
2597 0 : __SetPageUptodate(new_page);
2598 0 : __SetPageLocked(new_page);
2599 : }
2600 :
2601 : return new_page;
2602 : }
2603 :
2604 0 : void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2605 : {
2606 0 : struct stable_node *stable_node;
2607 0 : struct rmap_item *rmap_item;
2608 0 : int search_new_forks = 0;
2609 :
2610 0 : VM_BUG_ON_PAGE(!PageKsm(page), page);
2611 :
2612 : /*
2613 : * Rely on the page lock to protect against concurrent modifications
2614 : * to that page's node of the stable tree.
2615 : */
2616 0 : VM_BUG_ON_PAGE(!PageLocked(page), page);
2617 :
2618 0 : stable_node = page_stable_node(page);
2619 0 : if (!stable_node)
2620 : return;
2621 0 : again:
2622 0 : hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2623 0 : struct anon_vma *anon_vma = rmap_item->anon_vma;
2624 0 : struct anon_vma_chain *vmac;
2625 0 : struct vm_area_struct *vma;
2626 :
2627 0 : cond_resched();
2628 0 : anon_vma_lock_read(anon_vma);
2629 0 : anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
2630 : 0, ULONG_MAX) {
2631 0 : unsigned long addr;
2632 :
2633 0 : cond_resched();
2634 0 : vma = vmac->vma;
2635 :
2636 : /* Ignore the stable/unstable/sqnr flags */
2637 0 : addr = rmap_item->address & ~KSM_FLAG_MASK;
2638 :
2639 0 : if (addr < vma->vm_start || addr >= vma->vm_end)
2640 0 : continue;
2641 : /*
2642 : * Initially we examine only the vma which covers this
2643 : * rmap_item; but later, if there is still work to do,
2644 : * we examine covering vmas in other mms: in case they
2645 : * were forked from the original since ksmd passed.
2646 : */
2647 0 : if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2648 0 : continue;
2649 :
2650 0 : if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2651 0 : continue;
2652 :
2653 0 : if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
2654 0 : anon_vma_unlock_read(anon_vma);
2655 0 : return;
2656 : }
2657 0 : if (rwc->done && rwc->done(page)) {
2658 0 : anon_vma_unlock_read(anon_vma);
2659 0 : return;
2660 : }
2661 : }
2662 0 : anon_vma_unlock_read(anon_vma);
2663 : }
2664 0 : if (!search_new_forks++)
2665 0 : goto again;
2666 : }
2667 :
2668 : #ifdef CONFIG_MIGRATION
2669 0 : void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2670 : {
2671 0 : struct stable_node *stable_node;
2672 :
2673 0 : VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
2674 0 : VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
2675 0 : VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
2676 :
2677 0 : stable_node = page_stable_node(newpage);
2678 0 : if (stable_node) {
2679 0 : VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
2680 0 : stable_node->kpfn = page_to_pfn(newpage);
2681 : /*
2682 : * newpage->mapping was set in advance; now we need smp_wmb()
2683 : * to make sure that the new stable_node->kpfn is visible
2684 : * to get_ksm_page() before it can see that oldpage->mapping
2685 : * has gone stale (or that PageSwapCache has been cleared).
2686 : */
2687 0 : smp_wmb();
2688 0 : set_page_stable_node(oldpage, NULL);
2689 : }
2690 0 : }
2691 : #endif /* CONFIG_MIGRATION */
2692 :
2693 : #ifdef CONFIG_MEMORY_HOTREMOVE
2694 : static void wait_while_offlining(void)
2695 : {
2696 : while (ksm_run & KSM_RUN_OFFLINE) {
2697 : mutex_unlock(&ksm_thread_mutex);
2698 : wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
2699 : TASK_UNINTERRUPTIBLE);
2700 : mutex_lock(&ksm_thread_mutex);
2701 : }
2702 : }
2703 :
2704 : static bool stable_node_dup_remove_range(struct stable_node *stable_node,
2705 : unsigned long start_pfn,
2706 : unsigned long end_pfn)
2707 : {
2708 : if (stable_node->kpfn >= start_pfn &&
2709 : stable_node->kpfn < end_pfn) {
2710 : /*
2711 : * Don't get_ksm_page, page has already gone:
2712 : * which is why we keep kpfn instead of page*
2713 : */
2714 : remove_node_from_stable_tree(stable_node);
2715 : return true;
2716 : }
2717 : return false;
2718 : }
2719 :
2720 : static bool stable_node_chain_remove_range(struct stable_node *stable_node,
2721 : unsigned long start_pfn,
2722 : unsigned long end_pfn,
2723 : struct rb_root *root)
2724 : {
2725 : struct stable_node *dup;
2726 : struct hlist_node *hlist_safe;
2727 :
2728 : if (!is_stable_node_chain(stable_node)) {
2729 : VM_BUG_ON(is_stable_node_dup(stable_node));
2730 : return stable_node_dup_remove_range(stable_node, start_pfn,
2731 : end_pfn);
2732 : }
2733 :
2734 : hlist_for_each_entry_safe(dup, hlist_safe,
2735 : &stable_node->hlist, hlist_dup) {
2736 : VM_BUG_ON(!is_stable_node_dup(dup));
2737 : stable_node_dup_remove_range(dup, start_pfn, end_pfn);
2738 : }
2739 : if (hlist_empty(&stable_node->hlist)) {
2740 : free_stable_node_chain(stable_node, root);
2741 : return true; /* notify caller that tree was rebalanced */
2742 : } else
2743 : return false;
2744 : }
2745 :
2746 : static void ksm_check_stable_tree(unsigned long start_pfn,
2747 : unsigned long end_pfn)
2748 : {
2749 : struct stable_node *stable_node, *next;
2750 : struct rb_node *node;
2751 : int nid;
2752 :
2753 : for (nid = 0; nid < ksm_nr_node_ids; nid++) {
2754 : node = rb_first(root_stable_tree + nid);
2755 : while (node) {
2756 : stable_node = rb_entry(node, struct stable_node, node);
2757 : if (stable_node_chain_remove_range(stable_node,
2758 : start_pfn, end_pfn,
2759 : root_stable_tree +
2760 : nid))
2761 : node = rb_first(root_stable_tree + nid);
2762 : else
2763 : node = rb_next(node);
2764 : cond_resched();
2765 : }
2766 : }
2767 : list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
2768 : if (stable_node->kpfn >= start_pfn &&
2769 : stable_node->kpfn < end_pfn)
2770 : remove_node_from_stable_tree(stable_node);
2771 : cond_resched();
2772 : }
2773 : }
2774 :
2775 : static int ksm_memory_callback(struct notifier_block *self,
2776 : unsigned long action, void *arg)
2777 : {
2778 : struct memory_notify *mn = arg;
2779 :
2780 : switch (action) {
2781 : case MEM_GOING_OFFLINE:
2782 : /*
2783 : * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
2784 : * and remove_all_stable_nodes() while memory is going offline:
2785 : * it is unsafe for them to touch the stable tree at this time.
2786 : * But unmerge_ksm_pages(), rmap lookups and other entry points
2787 : * which do not need the ksm_thread_mutex are all safe.
2788 : */
2789 : mutex_lock(&ksm_thread_mutex);
2790 : ksm_run |= KSM_RUN_OFFLINE;
2791 : mutex_unlock(&ksm_thread_mutex);
2792 : break;
2793 :
2794 : case MEM_OFFLINE:
2795 : /*
2796 : * Most of the work is done by page migration; but there might
2797 : * be a few stable_nodes left over, still pointing to struct
2798 : * pages which have been offlined: prune those from the tree,
2799 : * otherwise get_ksm_page() might later try to access a
2800 : * non-existent struct page.
2801 : */
2802 : ksm_check_stable_tree(mn->start_pfn,
2803 : mn->start_pfn + mn->nr_pages);
2804 : fallthrough;
2805 : case MEM_CANCEL_OFFLINE:
2806 : mutex_lock(&ksm_thread_mutex);
2807 : ksm_run &= ~KSM_RUN_OFFLINE;
2808 : mutex_unlock(&ksm_thread_mutex);
2809 :
2810 : smp_mb(); /* wake_up_bit advises this */
2811 : wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
2812 : break;
2813 : }
2814 : return NOTIFY_OK;
2815 : }
2816 : #else
2817 1 : static void wait_while_offlining(void)
2818 : {
2819 1 : }
2820 : #endif /* CONFIG_MEMORY_HOTREMOVE */
2821 :
2822 : #ifdef CONFIG_SYSFS
2823 : /*
2824 : * This all compiles without CONFIG_SYSFS, but is a waste of space.
2825 : */
2826 :
2827 : #define KSM_ATTR_RO(_name) \
2828 : static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2829 : #define KSM_ATTR(_name) \
2830 : static struct kobj_attribute _name##_attr = \
2831 : __ATTR(_name, 0644, _name##_show, _name##_store)
2832 :
2833 0 : static ssize_t sleep_millisecs_show(struct kobject *kobj,
2834 : struct kobj_attribute *attr, char *buf)
2835 : {
2836 0 : return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
2837 : }
2838 :
2839 0 : static ssize_t sleep_millisecs_store(struct kobject *kobj,
2840 : struct kobj_attribute *attr,
2841 : const char *buf, size_t count)
2842 : {
2843 0 : unsigned int msecs;
2844 0 : int err;
2845 :
2846 0 : err = kstrtouint(buf, 10, &msecs);
2847 0 : if (err)
2848 : return -EINVAL;
2849 :
2850 0 : ksm_thread_sleep_millisecs = msecs;
2851 0 : wake_up_interruptible(&ksm_iter_wait);
2852 :
2853 0 : return count;
2854 : }
2855 : KSM_ATTR(sleep_millisecs);
2856 :
2857 0 : static ssize_t pages_to_scan_show(struct kobject *kobj,
2858 : struct kobj_attribute *attr, char *buf)
2859 : {
2860 0 : return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
2861 : }
2862 :
2863 0 : static ssize_t pages_to_scan_store(struct kobject *kobj,
2864 : struct kobj_attribute *attr,
2865 : const char *buf, size_t count)
2866 : {
2867 0 : unsigned int nr_pages;
2868 0 : int err;
2869 :
2870 0 : err = kstrtouint(buf, 10, &nr_pages);
2871 0 : if (err)
2872 : return -EINVAL;
2873 :
2874 0 : ksm_thread_pages_to_scan = nr_pages;
2875 :
2876 0 : return count;
2877 : }
2878 : KSM_ATTR(pages_to_scan);
2879 :
2880 0 : static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
2881 : char *buf)
2882 : {
2883 0 : return sysfs_emit(buf, "%lu\n", ksm_run);
2884 : }
2885 :
2886 0 : static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
2887 : const char *buf, size_t count)
2888 : {
2889 0 : unsigned int flags;
2890 0 : int err;
2891 :
2892 0 : err = kstrtouint(buf, 10, &flags);
2893 0 : if (err)
2894 : return -EINVAL;
2895 0 : if (flags > KSM_RUN_UNMERGE)
2896 : return -EINVAL;
2897 :
2898 : /*
2899 : * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
2900 : * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
2901 : * breaking COW to free the pages_shared (but leaves mm_slots
2902 : * on the list for when ksmd may be set running again).
2903 : */
2904 :
2905 0 : mutex_lock(&ksm_thread_mutex);
2906 0 : wait_while_offlining();
2907 0 : if (ksm_run != flags) {
2908 0 : ksm_run = flags;
2909 0 : if (flags & KSM_RUN_UNMERGE) {
2910 0 : set_current_oom_origin();
2911 0 : err = unmerge_and_remove_all_rmap_items();
2912 0 : clear_current_oom_origin();
2913 0 : if (err) {
2914 0 : ksm_run = KSM_RUN_STOP;
2915 0 : count = err;
2916 : }
2917 : }
2918 : }
2919 0 : mutex_unlock(&ksm_thread_mutex);
2920 :
2921 0 : if (flags & KSM_RUN_MERGE)
2922 0 : wake_up_interruptible(&ksm_thread_wait);
2923 :
2924 0 : return count;
2925 : }
2926 : KSM_ATTR(run);
2927 :
2928 : #ifdef CONFIG_NUMA
2929 0 : static ssize_t merge_across_nodes_show(struct kobject *kobj,
2930 : struct kobj_attribute *attr, char *buf)
2931 : {
2932 0 : return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
2933 : }
2934 :
2935 0 : static ssize_t merge_across_nodes_store(struct kobject *kobj,
2936 : struct kobj_attribute *attr,
2937 : const char *buf, size_t count)
2938 : {
2939 0 : int err;
2940 0 : unsigned long knob;
2941 :
2942 0 : err = kstrtoul(buf, 10, &knob);
2943 0 : if (err)
2944 0 : return err;
2945 0 : if (knob > 1)
2946 : return -EINVAL;
2947 :
2948 0 : mutex_lock(&ksm_thread_mutex);
2949 0 : wait_while_offlining();
2950 0 : if (ksm_merge_across_nodes != knob) {
2951 0 : if (ksm_pages_shared || remove_all_stable_nodes())
2952 : err = -EBUSY;
2953 0 : else if (root_stable_tree == one_stable_tree) {
2954 0 : struct rb_root *buf;
2955 : /*
2956 : * This is the first time that we switch away from the
2957 : * default of merging across nodes: must now allocate
2958 : * a buffer to hold as many roots as may be needed.
2959 : * Allocate stable and unstable together:
2960 : * MAXSMP NODES_SHIFT 10 will use 16kB.
2961 : */
2962 0 : buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
2963 : GFP_KERNEL);
2964 : /* Let us assume that RB_ROOT is NULL is zero */
2965 0 : if (!buf)
2966 : err = -ENOMEM;
2967 : else {
2968 0 : root_stable_tree = buf;
2969 0 : root_unstable_tree = buf + nr_node_ids;
2970 : /* Stable tree is empty but not the unstable */
2971 0 : root_unstable_tree[0] = one_unstable_tree[0];
2972 : }
2973 : }
2974 0 : if (!err) {
2975 0 : ksm_merge_across_nodes = knob;
2976 0 : ksm_nr_node_ids = knob ? 1 : nr_node_ids;
2977 : }
2978 : }
2979 0 : mutex_unlock(&ksm_thread_mutex);
2980 :
2981 0 : return err ? err : count;
2982 : }
2983 : KSM_ATTR(merge_across_nodes);
2984 : #endif
2985 :
2986 0 : static ssize_t use_zero_pages_show(struct kobject *kobj,
2987 : struct kobj_attribute *attr, char *buf)
2988 : {
2989 0 : return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
2990 : }
2991 0 : static ssize_t use_zero_pages_store(struct kobject *kobj,
2992 : struct kobj_attribute *attr,
2993 : const char *buf, size_t count)
2994 : {
2995 0 : int err;
2996 0 : bool value;
2997 :
2998 0 : err = kstrtobool(buf, &value);
2999 0 : if (err)
3000 : return -EINVAL;
3001 :
3002 0 : ksm_use_zero_pages = value;
3003 :
3004 0 : return count;
3005 : }
3006 : KSM_ATTR(use_zero_pages);
3007 :
3008 0 : static ssize_t max_page_sharing_show(struct kobject *kobj,
3009 : struct kobj_attribute *attr, char *buf)
3010 : {
3011 0 : return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
3012 : }
3013 :
3014 0 : static ssize_t max_page_sharing_store(struct kobject *kobj,
3015 : struct kobj_attribute *attr,
3016 : const char *buf, size_t count)
3017 : {
3018 0 : int err;
3019 0 : int knob;
3020 :
3021 0 : err = kstrtoint(buf, 10, &knob);
3022 0 : if (err)
3023 0 : return err;
3024 : /*
3025 : * When a KSM page is created it is shared by 2 mappings. This
3026 : * being a signed comparison, it implicitly verifies it's not
3027 : * negative.
3028 : */
3029 0 : if (knob < 2)
3030 : return -EINVAL;
3031 :
3032 0 : if (READ_ONCE(ksm_max_page_sharing) == knob)
3033 0 : return count;
3034 :
3035 0 : mutex_lock(&ksm_thread_mutex);
3036 0 : wait_while_offlining();
3037 0 : if (ksm_max_page_sharing != knob) {
3038 0 : if (ksm_pages_shared || remove_all_stable_nodes())
3039 : err = -EBUSY;
3040 : else
3041 0 : ksm_max_page_sharing = knob;
3042 : }
3043 0 : mutex_unlock(&ksm_thread_mutex);
3044 :
3045 0 : return err ? err : count;
3046 : }
3047 : KSM_ATTR(max_page_sharing);
3048 :
3049 0 : static ssize_t pages_shared_show(struct kobject *kobj,
3050 : struct kobj_attribute *attr, char *buf)
3051 : {
3052 0 : return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
3053 : }
3054 : KSM_ATTR_RO(pages_shared);
3055 :
3056 0 : static ssize_t pages_sharing_show(struct kobject *kobj,
3057 : struct kobj_attribute *attr, char *buf)
3058 : {
3059 0 : return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
3060 : }
3061 : KSM_ATTR_RO(pages_sharing);
3062 :
3063 0 : static ssize_t pages_unshared_show(struct kobject *kobj,
3064 : struct kobj_attribute *attr, char *buf)
3065 : {
3066 0 : return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
3067 : }
3068 : KSM_ATTR_RO(pages_unshared);
3069 :
3070 0 : static ssize_t pages_volatile_show(struct kobject *kobj,
3071 : struct kobj_attribute *attr, char *buf)
3072 : {
3073 0 : long ksm_pages_volatile;
3074 :
3075 0 : ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3076 0 : - ksm_pages_sharing - ksm_pages_unshared;
3077 : /*
3078 : * It was not worth any locking to calculate that statistic,
3079 : * but it might therefore sometimes be negative: conceal that.
3080 : */
3081 0 : if (ksm_pages_volatile < 0)
3082 : ksm_pages_volatile = 0;
3083 0 : return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
3084 : }
3085 : KSM_ATTR_RO(pages_volatile);
3086 :
3087 0 : static ssize_t stable_node_dups_show(struct kobject *kobj,
3088 : struct kobj_attribute *attr, char *buf)
3089 : {
3090 0 : return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
3091 : }
3092 : KSM_ATTR_RO(stable_node_dups);
3093 :
3094 0 : static ssize_t stable_node_chains_show(struct kobject *kobj,
3095 : struct kobj_attribute *attr, char *buf)
3096 : {
3097 0 : return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
3098 : }
3099 : KSM_ATTR_RO(stable_node_chains);
3100 :
3101 : static ssize_t
3102 0 : stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3103 : struct kobj_attribute *attr,
3104 : char *buf)
3105 : {
3106 0 : return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3107 : }
3108 :
3109 : static ssize_t
3110 0 : stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3111 : struct kobj_attribute *attr,
3112 : const char *buf, size_t count)
3113 : {
3114 0 : unsigned long msecs;
3115 0 : int err;
3116 :
3117 0 : err = kstrtoul(buf, 10, &msecs);
3118 0 : if (err || msecs > UINT_MAX)
3119 : return -EINVAL;
3120 :
3121 0 : ksm_stable_node_chains_prune_millisecs = msecs;
3122 :
3123 0 : return count;
3124 : }
3125 : KSM_ATTR(stable_node_chains_prune_millisecs);
3126 :
3127 0 : static ssize_t full_scans_show(struct kobject *kobj,
3128 : struct kobj_attribute *attr, char *buf)
3129 : {
3130 0 : return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
3131 : }
3132 : KSM_ATTR_RO(full_scans);
3133 :
3134 : static struct attribute *ksm_attrs[] = {
3135 : &sleep_millisecs_attr.attr,
3136 : &pages_to_scan_attr.attr,
3137 : &run_attr.attr,
3138 : &pages_shared_attr.attr,
3139 : &pages_sharing_attr.attr,
3140 : &pages_unshared_attr.attr,
3141 : &pages_volatile_attr.attr,
3142 : &full_scans_attr.attr,
3143 : #ifdef CONFIG_NUMA
3144 : &merge_across_nodes_attr.attr,
3145 : #endif
3146 : &max_page_sharing_attr.attr,
3147 : &stable_node_chains_attr.attr,
3148 : &stable_node_dups_attr.attr,
3149 : &stable_node_chains_prune_millisecs_attr.attr,
3150 : &use_zero_pages_attr.attr,
3151 : NULL,
3152 : };
3153 :
3154 : static const struct attribute_group ksm_attr_group = {
3155 : .attrs = ksm_attrs,
3156 : .name = "ksm",
3157 : };
3158 : #endif /* CONFIG_SYSFS */
3159 :
3160 1 : static int __init ksm_init(void)
3161 : {
3162 1 : struct task_struct *ksm_thread;
3163 1 : int err;
3164 :
3165 : /* The correct value depends on page size and endianness */
3166 2 : zero_checksum = calc_checksum(ZERO_PAGE(0));
3167 : /* Default to false for backwards compatibility */
3168 1 : ksm_use_zero_pages = false;
3169 :
3170 1 : err = ksm_slab_init();
3171 1 : if (err)
3172 0 : goto out;
3173 :
3174 1 : ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3175 1 : if (IS_ERR(ksm_thread)) {
3176 0 : pr_err("ksm: creating kthread failed\n");
3177 0 : err = PTR_ERR(ksm_thread);
3178 0 : goto out_free;
3179 : }
3180 :
3181 : #ifdef CONFIG_SYSFS
3182 1 : err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3183 1 : if (err) {
3184 0 : pr_err("ksm: register sysfs failed\n");
3185 0 : kthread_stop(ksm_thread);
3186 0 : goto out_free;
3187 : }
3188 : #else
3189 : ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
3190 :
3191 : #endif /* CONFIG_SYSFS */
3192 :
3193 : #ifdef CONFIG_MEMORY_HOTREMOVE
3194 : /* There is no significance to this priority 100 */
3195 : hotplug_memory_notifier(ksm_memory_callback, 100);
3196 : #endif
3197 : return 0;
3198 :
3199 0 : out_free:
3200 0 : ksm_slab_free();
3201 : out:
3202 : return err;
3203 : }
3204 : subsys_initcall(ksm_init);
|