Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Workingset detection
4 : *
5 : * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6 : */
7 :
8 : #include <linux/memcontrol.h>
9 : #include <linux/mm_inline.h>
10 : #include <linux/writeback.h>
11 : #include <linux/shmem_fs.h>
12 : #include <linux/pagemap.h>
13 : #include <linux/atomic.h>
14 : #include <linux/module.h>
15 : #include <linux/swap.h>
16 : #include <linux/dax.h>
17 : #include <linux/fs.h>
18 : #include <linux/mm.h>
19 :
20 : /*
21 : * Double CLOCK lists
22 : *
23 : * Per node, two clock lists are maintained for file pages: the
24 : * inactive and the active list. Freshly faulted pages start out at
25 : * the head of the inactive list and page reclaim scans pages from the
26 : * tail. Pages that are accessed multiple times on the inactive list
27 : * are promoted to the active list, to protect them from reclaim,
28 : * whereas active pages are demoted to the inactive list when the
29 : * active list grows too big.
30 : *
31 : * fault ------------------------+
32 : * |
33 : * +--------------+ | +-------------+
34 : * reclaim <- | inactive | <-+-- demotion | active | <--+
35 : * +--------------+ +-------------+ |
36 : * | |
37 : * +-------------- promotion ------------------+
38 : *
39 : *
40 : * Access frequency and refault distance
41 : *
42 : * A workload is thrashing when its pages are frequently used but they
43 : * are evicted from the inactive list every time before another access
44 : * would have promoted them to the active list.
45 : *
46 : * In cases where the average access distance between thrashing pages
47 : * is bigger than the size of memory there is nothing that can be
48 : * done - the thrashing set could never fit into memory under any
49 : * circumstance.
50 : *
51 : * However, the average access distance could be bigger than the
52 : * inactive list, yet smaller than the size of memory. In this case,
53 : * the set could fit into memory if it weren't for the currently
54 : * active pages - which may be used more, hopefully less frequently:
55 : *
56 : * +-memory available to cache-+
57 : * | |
58 : * +-inactive------+-active----+
59 : * a b | c d e f g h i | J K L M N |
60 : * +---------------+-----------+
61 : *
62 : * It is prohibitively expensive to accurately track access frequency
63 : * of pages. But a reasonable approximation can be made to measure
64 : * thrashing on the inactive list, after which refaulting pages can be
65 : * activated optimistically to compete with the existing active pages.
66 : *
67 : * Approximating inactive page access frequency - Observations:
68 : *
69 : * 1. When a page is accessed for the first time, it is added to the
70 : * head of the inactive list, slides every existing inactive page
71 : * towards the tail by one slot, and pushes the current tail page
72 : * out of memory.
73 : *
74 : * 2. When a page is accessed for the second time, it is promoted to
75 : * the active list, shrinking the inactive list by one slot. This
76 : * also slides all inactive pages that were faulted into the cache
77 : * more recently than the activated page towards the tail of the
78 : * inactive list.
79 : *
80 : * Thus:
81 : *
82 : * 1. The sum of evictions and activations between any two points in
83 : * time indicate the minimum number of inactive pages accessed in
84 : * between.
85 : *
86 : * 2. Moving one inactive page N page slots towards the tail of the
87 : * list requires at least N inactive page accesses.
88 : *
89 : * Combining these:
90 : *
91 : * 1. When a page is finally evicted from memory, the number of
92 : * inactive pages accessed while the page was in cache is at least
93 : * the number of page slots on the inactive list.
94 : *
95 : * 2. In addition, measuring the sum of evictions and activations (E)
96 : * at the time of a page's eviction, and comparing it to another
97 : * reading (R) at the time the page faults back into memory tells
98 : * the minimum number of accesses while the page was not cached.
99 : * This is called the refault distance.
100 : *
101 : * Because the first access of the page was the fault and the second
102 : * access the refault, we combine the in-cache distance with the
103 : * out-of-cache distance to get the complete minimum access distance
104 : * of this page:
105 : *
106 : * NR_inactive + (R - E)
107 : *
108 : * And knowing the minimum access distance of a page, we can easily
109 : * tell if the page would be able to stay in cache assuming all page
110 : * slots in the cache were available:
111 : *
112 : * NR_inactive + (R - E) <= NR_inactive + NR_active
113 : *
114 : * which can be further simplified to
115 : *
116 : * (R - E) <= NR_active
117 : *
118 : * Put into words, the refault distance (out-of-cache) can be seen as
119 : * a deficit in inactive list space (in-cache). If the inactive list
120 : * had (R - E) more page slots, the page would not have been evicted
121 : * in between accesses, but activated instead. And on a full system,
122 : * the only thing eating into inactive list space is active pages.
123 : *
124 : *
125 : * Refaulting inactive pages
126 : *
127 : * All that is known about the active list is that the pages have been
128 : * accessed more than once in the past. This means that at any given
129 : * time there is actually a good chance that pages on the active list
130 : * are no longer in active use.
131 : *
132 : * So when a refault distance of (R - E) is observed and there are at
133 : * least (R - E) active pages, the refaulting page is activated
134 : * optimistically in the hope that (R - E) active pages are actually
135 : * used less frequently than the refaulting page - or even not used at
136 : * all anymore.
137 : *
138 : * That means if inactive cache is refaulting with a suitable refault
139 : * distance, we assume the cache workingset is transitioning and put
140 : * pressure on the current active list.
141 : *
142 : * If this is wrong and demotion kicks in, the pages which are truly
143 : * used more frequently will be reactivated while the less frequently
144 : * used once will be evicted from memory.
145 : *
146 : * But if this is right, the stale pages will be pushed out of memory
147 : * and the used pages get to stay in cache.
148 : *
149 : * Refaulting active pages
150 : *
151 : * If on the other hand the refaulting pages have recently been
152 : * deactivated, it means that the active list is no longer protecting
153 : * actively used cache from reclaim. The cache is NOT transitioning to
154 : * a different workingset; the existing workingset is thrashing in the
155 : * space allocated to the page cache.
156 : *
157 : *
158 : * Implementation
159 : *
160 : * For each node's LRU lists, a counter for inactive evictions and
161 : * activations is maintained (node->nonresident_age).
162 : *
163 : * On eviction, a snapshot of this counter (along with some bits to
164 : * identify the node) is stored in the now empty page cache
165 : * slot of the evicted page. This is called a shadow entry.
166 : *
167 : * On cache misses for which there are shadow entries, an eligible
168 : * refault distance will immediately activate the refaulting page.
169 : */
170 :
171 : #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
172 : 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
173 : #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
174 :
175 : /*
176 : * Eviction timestamps need to be able to cover the full range of
177 : * actionable refaults. However, bits are tight in the xarray
178 : * entry, and after storing the identifier for the lruvec there might
179 : * not be enough left to represent every single actionable refault. In
180 : * that case, we have to sacrifice granularity for distance, and group
181 : * evictions into coarser buckets by shaving off lower timestamp bits.
182 : */
183 : static unsigned int bucket_order __read_mostly;
184 :
185 0 : static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
186 : bool workingset)
187 : {
188 0 : eviction >>= bucket_order;
189 0 : eviction &= EVICTION_MASK;
190 0 : eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
191 0 : eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
192 0 : eviction = (eviction << 1) | workingset;
193 :
194 0 : return xa_mk_value(eviction);
195 : }
196 :
197 0 : static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
198 : unsigned long *evictionp, bool *workingsetp)
199 : {
200 0 : unsigned long entry = xa_to_value(shadow);
201 0 : int memcgid, nid;
202 0 : bool workingset;
203 :
204 0 : workingset = entry & 1;
205 0 : entry >>= 1;
206 0 : nid = entry & ((1UL << NODES_SHIFT) - 1);
207 0 : entry >>= NODES_SHIFT;
208 0 : memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
209 0 : entry >>= MEM_CGROUP_ID_SHIFT;
210 :
211 0 : *memcgidp = memcgid;
212 0 : *pgdat = NODE_DATA(nid);
213 0 : *evictionp = entry << bucket_order;
214 0 : *workingsetp = workingset;
215 : }
216 :
217 : /**
218 : * workingset_age_nonresident - age non-resident entries as LRU ages
219 : * @lruvec: the lruvec that was aged
220 : * @nr_pages: the number of pages to count
221 : *
222 : * As in-memory pages are aged, non-resident pages need to be aged as
223 : * well, in order for the refault distances later on to be comparable
224 : * to the in-memory dimensions. This function allows reclaim and LRU
225 : * operations to drive the non-resident aging along in parallel.
226 : */
227 24149 : void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
228 : {
229 : /*
230 : * Reclaiming a cgroup means reclaiming all its children in a
231 : * round-robin fashion. That means that each cgroup has an LRU
232 : * order that is composed of the LRU orders of its child
233 : * cgroups; and every page has an LRU position not just in the
234 : * cgroup that owns it, but in all of that group's ancestors.
235 : *
236 : * So when the physical inactive list of a leaf cgroup ages,
237 : * the virtual inactive lists of all its parents, including
238 : * the root cgroup's, age as well.
239 : */
240 24149 : do {
241 24149 : atomic_long_add(nr_pages, &lruvec->nonresident_age);
242 24160 : } while ((lruvec = parent_lruvec(lruvec)));
243 24160 : }
244 :
245 : /**
246 : * workingset_eviction - note the eviction of a page from memory
247 : * @target_memcg: the cgroup that is causing the reclaim
248 : * @page: the page being evicted
249 : *
250 : * Returns a shadow entry to be stored in @page->mapping->i_pages in place
251 : * of the evicted @page so that a later refault can be detected.
252 : */
253 0 : void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
254 : {
255 0 : struct pglist_data *pgdat = page_pgdat(page);
256 0 : unsigned long eviction;
257 0 : struct lruvec *lruvec;
258 0 : int memcgid;
259 :
260 : /* Page is fully exclusive and pins page's memory cgroup pointer */
261 0 : VM_BUG_ON_PAGE(PageLRU(page), page);
262 0 : VM_BUG_ON_PAGE(page_count(page), page);
263 0 : VM_BUG_ON_PAGE(!PageLocked(page), page);
264 :
265 0 : lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
266 : /* XXX: target_memcg can be NULL, go through lruvec */
267 0 : memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
268 0 : eviction = atomic_long_read(&lruvec->nonresident_age);
269 0 : workingset_age_nonresident(lruvec, thp_nr_pages(page));
270 0 : return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
271 : }
272 :
273 : /**
274 : * workingset_refault - evaluate the refault of a previously evicted page
275 : * @page: the freshly allocated replacement page
276 : * @shadow: shadow entry of the evicted page
277 : *
278 : * Calculates and evaluates the refault distance of the previously
279 : * evicted page in the context of the node and the memcg whose memory
280 : * pressure caused the eviction.
281 : */
282 0 : void workingset_refault(struct page *page, void *shadow)
283 : {
284 0 : bool file = page_is_file_lru(page);
285 0 : struct mem_cgroup *eviction_memcg;
286 0 : struct lruvec *eviction_lruvec;
287 0 : unsigned long refault_distance;
288 0 : unsigned long workingset_size;
289 0 : struct pglist_data *pgdat;
290 0 : struct mem_cgroup *memcg;
291 0 : unsigned long eviction;
292 0 : struct lruvec *lruvec;
293 0 : unsigned long refault;
294 0 : bool workingset;
295 0 : int memcgid;
296 :
297 0 : unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
298 :
299 0 : rcu_read_lock();
300 : /*
301 : * Look up the memcg associated with the stored ID. It might
302 : * have been deleted since the page's eviction.
303 : *
304 : * Note that in rare events the ID could have been recycled
305 : * for a new cgroup that refaults a shared page. This is
306 : * impossible to tell from the available data. However, this
307 : * should be a rare and limited disturbance, and activations
308 : * are always speculative anyway. Ultimately, it's the aging
309 : * algorithm's job to shake out the minimum access frequency
310 : * for the active cache.
311 : *
312 : * XXX: On !CONFIG_MEMCG, this will always return NULL; it
313 : * would be better if the root_mem_cgroup existed in all
314 : * configurations instead.
315 : */
316 0 : eviction_memcg = mem_cgroup_from_id(memcgid);
317 0 : if (!mem_cgroup_disabled() && !eviction_memcg)
318 : goto out;
319 0 : eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
320 0 : refault = atomic_long_read(&eviction_lruvec->nonresident_age);
321 :
322 : /*
323 : * Calculate the refault distance
324 : *
325 : * The unsigned subtraction here gives an accurate distance
326 : * across nonresident_age overflows in most cases. There is a
327 : * special case: usually, shadow entries have a short lifetime
328 : * and are either refaulted or reclaimed along with the inode
329 : * before they get too old. But it is not impossible for the
330 : * nonresident_age to lap a shadow entry in the field, which
331 : * can then result in a false small refault distance, leading
332 : * to a false activation should this old entry actually
333 : * refault again. However, earlier kernels used to deactivate
334 : * unconditionally with *every* reclaim invocation for the
335 : * longest time, so the occasional inappropriate activation
336 : * leading to pressure on the active list is not a problem.
337 : */
338 0 : refault_distance = (refault - eviction) & EVICTION_MASK;
339 :
340 : /*
341 : * The activation decision for this page is made at the level
342 : * where the eviction occurred, as that is where the LRU order
343 : * during page reclaim is being determined.
344 : *
345 : * However, the cgroup that will own the page is the one that
346 : * is actually experiencing the refault event.
347 : */
348 0 : memcg = page_memcg(page);
349 0 : lruvec = mem_cgroup_lruvec(memcg, pgdat);
350 :
351 0 : inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
352 :
353 : /*
354 : * Compare the distance to the existing workingset size. We
355 : * don't activate pages that couldn't stay resident even if
356 : * all the memory was available to the workingset. Whether
357 : * workingset competition needs to consider anon or not depends
358 : * on having swap.
359 : */
360 0 : workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
361 0 : if (!file) {
362 0 : workingset_size += lruvec_page_state(eviction_lruvec,
363 : NR_INACTIVE_FILE);
364 : }
365 0 : if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
366 : workingset_size += lruvec_page_state(eviction_lruvec,
367 : NR_ACTIVE_ANON);
368 : if (file) {
369 : workingset_size += lruvec_page_state(eviction_lruvec,
370 : NR_INACTIVE_ANON);
371 : }
372 : }
373 0 : if (refault_distance > workingset_size)
374 0 : goto out;
375 :
376 0 : SetPageActive(page);
377 0 : workingset_age_nonresident(lruvec, thp_nr_pages(page));
378 0 : inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
379 :
380 : /* Page was active prior to eviction */
381 0 : if (workingset) {
382 0 : SetPageWorkingset(page);
383 : /* XXX: Move to lru_cache_add() when it supports new vs putback */
384 0 : lru_note_cost_page(page);
385 0 : inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
386 : }
387 0 : out:
388 0 : rcu_read_unlock();
389 0 : }
390 :
391 : /**
392 : * workingset_activation - note a page activation
393 : * @page: page that is being activated
394 : */
395 24157 : void workingset_activation(struct page *page)
396 : {
397 24157 : struct mem_cgroup *memcg;
398 24157 : struct lruvec *lruvec;
399 :
400 24157 : rcu_read_lock();
401 : /*
402 : * Filter non-memcg pages here, e.g. unmap can call
403 : * mark_page_accessed() on VDSO pages.
404 : *
405 : * XXX: See workingset_refault() - this should return
406 : * root_mem_cgroup even for !CONFIG_MEMCG.
407 : */
408 24152 : memcg = page_memcg_rcu(page);
409 24150 : if (!mem_cgroup_disabled() && !memcg)
410 : goto out;
411 24150 : lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
412 48299 : workingset_age_nonresident(lruvec, thp_nr_pages(page));
413 24159 : out:
414 24159 : rcu_read_unlock();
415 24149 : }
416 :
417 : /*
418 : * Shadow entries reflect the share of the working set that does not
419 : * fit into memory, so their number depends on the access pattern of
420 : * the workload. In most cases, they will refault or get reclaimed
421 : * along with the inode, but a (malicious) workload that streams
422 : * through files with a total size several times that of available
423 : * memory, while preventing the inodes from being reclaimed, can
424 : * create excessive amounts of shadow nodes. To keep a lid on this,
425 : * track shadow nodes and reclaim them when they grow way past the
426 : * point where they would still be useful.
427 : */
428 :
429 : static struct list_lru shadow_nodes;
430 :
431 28709 : void workingset_update_node(struct xa_node *node)
432 : {
433 : /*
434 : * Track non-empty nodes that contain only shadow entries;
435 : * unlink those that contain pages or are being freed.
436 : *
437 : * Avoid acquiring the list_lru lock when the nodes are
438 : * already where they should be. The list_empty() test is safe
439 : * as node->private_list is protected by the i_pages lock.
440 : */
441 28709 : VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
442 :
443 28709 : if (node->count && node->count == node->nr_values) {
444 0 : if (list_empty(&node->private_list)) {
445 0 : list_lru_add(&shadow_nodes, &node->private_list);
446 0 : __inc_lruvec_kmem_state(node, WORKINGSET_NODES);
447 : }
448 : } else {
449 28709 : if (!list_empty(&node->private_list)) {
450 0 : list_lru_del(&shadow_nodes, &node->private_list);
451 0 : __dec_lruvec_kmem_state(node, WORKINGSET_NODES);
452 : }
453 : }
454 28709 : }
455 :
456 0 : static unsigned long count_shadow_nodes(struct shrinker *shrinker,
457 : struct shrink_control *sc)
458 : {
459 0 : unsigned long max_nodes;
460 0 : unsigned long nodes;
461 0 : unsigned long pages;
462 :
463 0 : nodes = list_lru_shrink_count(&shadow_nodes, sc);
464 0 : if (!nodes)
465 : return SHRINK_EMPTY;
466 :
467 : /*
468 : * Approximate a reasonable limit for the nodes
469 : * containing shadow entries. We don't need to keep more
470 : * shadow entries than possible pages on the active list,
471 : * since refault distances bigger than that are dismissed.
472 : *
473 : * The size of the active list converges toward 100% of
474 : * overall page cache as memory grows, with only a tiny
475 : * inactive list. Assume the total cache size for that.
476 : *
477 : * Nodes might be sparsely populated, with only one shadow
478 : * entry in the extreme case. Obviously, we cannot keep one
479 : * node for every eligible shadow entry, so compromise on a
480 : * worst-case density of 1/8th. Below that, not all eligible
481 : * refaults can be detected anymore.
482 : *
483 : * On 64-bit with 7 xa_nodes per page and 64 slots
484 : * each, this will reclaim shadow entries when they consume
485 : * ~1.8% of available memory:
486 : *
487 : * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
488 : */
489 : #ifdef CONFIG_MEMCG
490 : if (sc->memcg) {
491 : struct lruvec *lruvec;
492 : int i;
493 :
494 : lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
495 : for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
496 : pages += lruvec_page_state_local(lruvec,
497 : NR_LRU_BASE + i);
498 : pages += lruvec_page_state_local(
499 : lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT;
500 : pages += lruvec_page_state_local(
501 : lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT;
502 : } else
503 : #endif
504 0 : pages = node_present_pages(sc->nid);
505 :
506 0 : max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
507 :
508 0 : if (nodes <= max_nodes)
509 : return 0;
510 0 : return nodes - max_nodes;
511 : }
512 :
513 0 : static enum lru_status shadow_lru_isolate(struct list_head *item,
514 : struct list_lru_one *lru,
515 : spinlock_t *lru_lock,
516 : void *arg) __must_hold(lru_lock)
517 : {
518 0 : struct xa_node *node = container_of(item, struct xa_node, private_list);
519 0 : struct address_space *mapping;
520 0 : int ret;
521 :
522 : /*
523 : * Page cache insertions and deletions synchronously maintain
524 : * the shadow node LRU under the i_pages lock and the
525 : * lru_lock. Because the page cache tree is emptied before
526 : * the inode can be destroyed, holding the lru_lock pins any
527 : * address_space that has nodes on the LRU.
528 : *
529 : * We can then safely transition to the i_pages lock to
530 : * pin only the address_space of the particular node we want
531 : * to reclaim, take the node off-LRU, and drop the lru_lock.
532 : */
533 :
534 0 : mapping = container_of(node->array, struct address_space, i_pages);
535 :
536 : /* Coming from the list, invert the lock order */
537 0 : if (!xa_trylock(&mapping->i_pages)) {
538 0 : spin_unlock_irq(lru_lock);
539 0 : ret = LRU_RETRY;
540 0 : goto out;
541 : }
542 :
543 0 : list_lru_isolate(lru, item);
544 0 : __dec_lruvec_kmem_state(node, WORKINGSET_NODES);
545 :
546 0 : spin_unlock(lru_lock);
547 :
548 : /*
549 : * The nodes should only contain one or more shadow entries,
550 : * no pages, so we expect to be able to remove them all and
551 : * delete and free the empty node afterwards.
552 : */
553 0 : if (WARN_ON_ONCE(!node->nr_values))
554 0 : goto out_invalid;
555 0 : if (WARN_ON_ONCE(node->count != node->nr_values))
556 0 : goto out_invalid;
557 0 : mapping->nrexceptional -= node->nr_values;
558 0 : xa_delete_node(node, workingset_update_node);
559 0 : __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM);
560 :
561 0 : out_invalid:
562 0 : xa_unlock_irq(&mapping->i_pages);
563 0 : ret = LRU_REMOVED_RETRY;
564 0 : out:
565 0 : cond_resched();
566 0 : spin_lock_irq(lru_lock);
567 0 : return ret;
568 : }
569 :
570 0 : static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
571 : struct shrink_control *sc)
572 : {
573 : /* list_lru lock nests inside the IRQ-safe i_pages lock */
574 0 : return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
575 : NULL);
576 : }
577 :
578 : static struct shrinker workingset_shadow_shrinker = {
579 : .count_objects = count_shadow_nodes,
580 : .scan_objects = scan_shadow_nodes,
581 : .seeks = 0, /* ->count reports only fully expendable nodes */
582 : .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
583 : };
584 :
585 : /*
586 : * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
587 : * i_pages lock.
588 : */
589 : static struct lock_class_key shadow_nodes_key;
590 :
591 1 : static int __init workingset_init(void)
592 : {
593 1 : unsigned int timestamp_bits;
594 1 : unsigned int max_order;
595 1 : int ret;
596 :
597 1 : BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
598 : /*
599 : * Calculate the eviction bucket size to cover the longest
600 : * actionable refault distance, which is currently half of
601 : * memory (totalram_pages/2). However, memory hotplug may add
602 : * some more pages at runtime, so keep working with up to
603 : * double the initial memory by using totalram_pages as-is.
604 : */
605 1 : timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
606 1 : max_order = fls_long(totalram_pages() - 1);
607 1 : if (max_order > timestamp_bits)
608 0 : bucket_order = max_order - timestamp_bits;
609 1 : pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
610 : timestamp_bits, max_order, bucket_order);
611 :
612 1 : ret = prealloc_shrinker(&workingset_shadow_shrinker);
613 1 : if (ret)
614 0 : goto err;
615 1 : ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
616 : &workingset_shadow_shrinker);
617 1 : if (ret)
618 0 : goto err_list_lru;
619 1 : register_shrinker_prepared(&workingset_shadow_shrinker);
620 1 : return 0;
621 0 : err_list_lru:
622 0 : free_prealloced_shrinker(&workingset_shadow_shrinker);
623 : err:
624 : return ret;
625 : }
626 : module_init(workingset_init);
|