LCOV - code coverage report
Current view: top level - mm - compaction.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 68 1132 6.0 %
Date: 2021-04-22 12:43:58 Functions: 10 62 16.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * linux/mm/compaction.c
       4             :  *
       5             :  * Memory compaction for the reduction of external fragmentation. Note that
       6             :  * this heavily depends upon page migration to do all the real heavy
       7             :  * lifting
       8             :  *
       9             :  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
      10             :  */
      11             : #include <linux/cpu.h>
      12             : #include <linux/swap.h>
      13             : #include <linux/migrate.h>
      14             : #include <linux/compaction.h>
      15             : #include <linux/mm_inline.h>
      16             : #include <linux/sched/signal.h>
      17             : #include <linux/backing-dev.h>
      18             : #include <linux/sysctl.h>
      19             : #include <linux/sysfs.h>
      20             : #include <linux/page-isolation.h>
      21             : #include <linux/kasan.h>
      22             : #include <linux/kthread.h>
      23             : #include <linux/freezer.h>
      24             : #include <linux/page_owner.h>
      25             : #include <linux/psi.h>
      26             : #include "internal.h"
      27             : 
      28             : #ifdef CONFIG_COMPACTION
      29           0 : static inline void count_compact_event(enum vm_event_item item)
      30             : {
      31           0 :         count_vm_event(item);
      32             : }
      33             : 
      34           0 : static inline void count_compact_events(enum vm_event_item item, long delta)
      35             : {
      36           0 :         count_vm_events(item, delta);
      37           0 : }
      38             : #else
      39             : #define count_compact_event(item) do { } while (0)
      40             : #define count_compact_events(item, delta) do { } while (0)
      41             : #endif
      42             : 
      43             : #if defined CONFIG_COMPACTION || defined CONFIG_CMA
      44             : 
      45             : #define CREATE_TRACE_POINTS
      46             : #include <trace/events/compaction.h>
      47             : 
      48             : #define block_start_pfn(pfn, order)     round_down(pfn, 1UL << (order))
      49             : #define block_end_pfn(pfn, order)       ALIGN((pfn) + 1, 1UL << (order))
      50             : #define pageblock_start_pfn(pfn)        block_start_pfn(pfn, pageblock_order)
      51             : #define pageblock_end_pfn(pfn)          block_end_pfn(pfn, pageblock_order)
      52             : 
      53             : /*
      54             :  * Fragmentation score check interval for proactive compaction purposes.
      55             :  */
      56             : static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
      57             : 
      58             : /*
      59             :  * Page order with-respect-to which proactive compaction
      60             :  * calculates external fragmentation, which is used as
      61             :  * the "fragmentation score" of a node/zone.
      62             :  */
      63             : #if defined CONFIG_TRANSPARENT_HUGEPAGE
      64             : #define COMPACTION_HPAGE_ORDER  HPAGE_PMD_ORDER
      65             : #elif defined CONFIG_HUGETLBFS
      66             : #define COMPACTION_HPAGE_ORDER  HUGETLB_PAGE_ORDER
      67             : #else
      68             : #define COMPACTION_HPAGE_ORDER  (PMD_SHIFT - PAGE_SHIFT)
      69             : #endif
      70             : 
      71           0 : static unsigned long release_freepages(struct list_head *freelist)
      72             : {
      73           0 :         struct page *page, *next;
      74           0 :         unsigned long high_pfn = 0;
      75             : 
      76           0 :         list_for_each_entry_safe(page, next, freelist, lru) {
      77           0 :                 unsigned long pfn = page_to_pfn(page);
      78           0 :                 list_del(&page->lru);
      79           0 :                 __free_page(page);
      80           0 :                 if (pfn > high_pfn)
      81             :                         high_pfn = pfn;
      82             :         }
      83             : 
      84           0 :         return high_pfn;
      85             : }
      86             : 
      87           0 : static void split_map_pages(struct list_head *list)
      88             : {
      89           0 :         unsigned int i, order, nr_pages;
      90           0 :         struct page *page, *next;
      91           0 :         LIST_HEAD(tmp_list);
      92             : 
      93           0 :         list_for_each_entry_safe(page, next, list, lru) {
      94           0 :                 list_del(&page->lru);
      95             : 
      96           0 :                 order = page_private(page);
      97           0 :                 nr_pages = 1 << order;
      98             : 
      99           0 :                 post_alloc_hook(page, order, __GFP_MOVABLE);
     100           0 :                 if (order)
     101           0 :                         split_page(page, order);
     102             : 
     103           0 :                 for (i = 0; i < nr_pages; i++) {
     104           0 :                         list_add(&page->lru, &tmp_list);
     105           0 :                         page++;
     106             :                 }
     107             :         }
     108             : 
     109           0 :         list_splice(&tmp_list, list);
     110           0 : }
     111             : 
     112             : #ifdef CONFIG_COMPACTION
     113             : 
     114           0 : int PageMovable(struct page *page)
     115             : {
     116           0 :         struct address_space *mapping;
     117             : 
     118           0 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     119           0 :         if (!__PageMovable(page))
     120             :                 return 0;
     121             : 
     122           0 :         mapping = page_mapping(page);
     123           0 :         if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
     124           0 :                 return 1;
     125             : 
     126             :         return 0;
     127             : }
     128             : EXPORT_SYMBOL(PageMovable);
     129             : 
     130           0 : void __SetPageMovable(struct page *page, struct address_space *mapping)
     131             : {
     132           0 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     133           0 :         VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
     134           0 :         page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
     135           0 : }
     136             : EXPORT_SYMBOL(__SetPageMovable);
     137             : 
     138           0 : void __ClearPageMovable(struct page *page)
     139             : {
     140           0 :         VM_BUG_ON_PAGE(!PageMovable(page), page);
     141             :         /*
     142             :          * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
     143             :          * flag so that VM can catch up released page by driver after isolation.
     144             :          * With it, VM migration doesn't try to put it back.
     145             :          */
     146           0 :         page->mapping = (void *)((unsigned long)page->mapping &
     147             :                                 PAGE_MAPPING_MOVABLE);
     148           0 : }
     149             : EXPORT_SYMBOL(__ClearPageMovable);
     150             : 
     151             : /* Do not skip compaction more than 64 times */
     152             : #define COMPACT_MAX_DEFER_SHIFT 6
     153             : 
     154             : /*
     155             :  * Compaction is deferred when compaction fails to result in a page
     156             :  * allocation success. 1 << compact_defer_shift, compactions are skipped up
     157             :  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
     158             :  */
     159           0 : static void defer_compaction(struct zone *zone, int order)
     160             : {
     161           0 :         zone->compact_considered = 0;
     162           0 :         zone->compact_defer_shift++;
     163             : 
     164           0 :         if (order < zone->compact_order_failed)
     165           0 :                 zone->compact_order_failed = order;
     166             : 
     167           0 :         if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
     168           0 :                 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
     169             : 
     170           0 :         trace_mm_compaction_defer_compaction(zone, order);
     171           0 : }
     172             : 
     173             : /* Returns true if compaction should be skipped this time */
     174           0 : static bool compaction_deferred(struct zone *zone, int order)
     175             : {
     176           0 :         unsigned long defer_limit = 1UL << zone->compact_defer_shift;
     177             : 
     178           0 :         if (order < zone->compact_order_failed)
     179             :                 return false;
     180             : 
     181             :         /* Avoid possible overflow */
     182           0 :         if (++zone->compact_considered >= defer_limit) {
     183           0 :                 zone->compact_considered = defer_limit;
     184           0 :                 return false;
     185             :         }
     186             : 
     187           0 :         trace_mm_compaction_deferred(zone, order);
     188             : 
     189           0 :         return true;
     190             : }
     191             : 
     192             : /*
     193             :  * Update defer tracking counters after successful compaction of given order,
     194             :  * which means an allocation either succeeded (alloc_success == true) or is
     195             :  * expected to succeed.
     196             :  */
     197           0 : void compaction_defer_reset(struct zone *zone, int order,
     198             :                 bool alloc_success)
     199             : {
     200           0 :         if (alloc_success) {
     201           0 :                 zone->compact_considered = 0;
     202           0 :                 zone->compact_defer_shift = 0;
     203             :         }
     204           0 :         if (order >= zone->compact_order_failed)
     205           0 :                 zone->compact_order_failed = order + 1;
     206             : 
     207           0 :         trace_mm_compaction_defer_reset(zone, order);
     208           0 : }
     209             : 
     210             : /* Returns true if restarting compaction after many failures */
     211           0 : static bool compaction_restarting(struct zone *zone, int order)
     212             : {
     213           0 :         if (order < zone->compact_order_failed)
     214             :                 return false;
     215             : 
     216           0 :         return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
     217           0 :                 zone->compact_considered >= 1UL << zone->compact_defer_shift;
     218             : }
     219             : 
     220             : /* Returns true if the pageblock should be scanned for pages to isolate. */
     221           0 : static inline bool isolation_suitable(struct compact_control *cc,
     222             :                                         struct page *page)
     223             : {
     224           0 :         if (cc->ignore_skip_hint)
     225             :                 return true;
     226             : 
     227           0 :         return !get_pageblock_skip(page);
     228             : }
     229             : 
     230           0 : static void reset_cached_positions(struct zone *zone)
     231             : {
     232           0 :         zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
     233           0 :         zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
     234           0 :         zone->compact_cached_free_pfn =
     235           0 :                                 pageblock_start_pfn(zone_end_pfn(zone) - 1);
     236             : }
     237             : 
     238             : /*
     239             :  * Compound pages of >= pageblock_order should consistently be skipped until
     240             :  * released. It is always pointless to compact pages of such order (if they are
     241             :  * migratable), and the pageblocks they occupy cannot contain any free pages.
     242             :  */
     243           0 : static bool pageblock_skip_persistent(struct page *page)
     244             : {
     245           0 :         if (!PageCompound(page))
     246             :                 return false;
     247             : 
     248           0 :         page = compound_head(page);
     249             : 
     250           0 :         if (compound_order(page) >= pageblock_order)
     251           0 :                 return true;
     252             : 
     253             :         return false;
     254             : }
     255             : 
     256             : static bool
     257           0 : __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
     258             :                                                         bool check_target)
     259             : {
     260           0 :         struct page *page = pfn_to_online_page(pfn);
     261           0 :         struct page *block_page;
     262           0 :         struct page *end_page;
     263           0 :         unsigned long block_pfn;
     264             : 
     265           0 :         if (!page)
     266             :                 return false;
     267           0 :         if (zone != page_zone(page))
     268             :                 return false;
     269           0 :         if (pageblock_skip_persistent(page))
     270             :                 return false;
     271             : 
     272             :         /*
     273             :          * If skip is already cleared do no further checking once the
     274             :          * restart points have been set.
     275             :          */
     276           0 :         if (check_source && check_target && !get_pageblock_skip(page))
     277             :                 return true;
     278             : 
     279             :         /*
     280             :          * If clearing skip for the target scanner, do not select a
     281             :          * non-movable pageblock as the starting point.
     282             :          */
     283           0 :         if (!check_source && check_target &&
     284           0 :             get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
     285             :                 return false;
     286             : 
     287             :         /* Ensure the start of the pageblock or zone is online and valid */
     288           0 :         block_pfn = pageblock_start_pfn(pfn);
     289           0 :         block_pfn = max(block_pfn, zone->zone_start_pfn);
     290           0 :         block_page = pfn_to_online_page(block_pfn);
     291           0 :         if (block_page) {
     292           0 :                 page = block_page;
     293           0 :                 pfn = block_pfn;
     294             :         }
     295             : 
     296             :         /* Ensure the end of the pageblock or zone is online and valid */
     297           0 :         block_pfn = pageblock_end_pfn(pfn) - 1;
     298           0 :         block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
     299           0 :         end_page = pfn_to_online_page(block_pfn);
     300           0 :         if (!end_page)
     301             :                 return false;
     302             : 
     303             :         /*
     304             :          * Only clear the hint if a sample indicates there is either a
     305             :          * free page or an LRU page in the block. One or other condition
     306             :          * is necessary for the block to be a migration source/target.
     307             :          */
     308           0 :         do {
     309           0 :                 if (pfn_valid_within(pfn)) {
     310           0 :                         if (check_source && PageLRU(page)) {
     311           0 :                                 clear_pageblock_skip(page);
     312           0 :                                 return true;
     313             :                         }
     314             : 
     315           0 :                         if (check_target && PageBuddy(page)) {
     316           0 :                                 clear_pageblock_skip(page);
     317           0 :                                 return true;
     318             :                         }
     319             :                 }
     320             : 
     321           0 :                 page += (1 << PAGE_ALLOC_COSTLY_ORDER);
     322           0 :                 pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
     323           0 :         } while (page <= end_page);
     324             : 
     325             :         return false;
     326             : }
     327             : 
     328             : /*
     329             :  * This function is called to clear all cached information on pageblocks that
     330             :  * should be skipped for page isolation when the migrate and free page scanner
     331             :  * meet.
     332             :  */
     333           0 : static void __reset_isolation_suitable(struct zone *zone)
     334             : {
     335           0 :         unsigned long migrate_pfn = zone->zone_start_pfn;
     336           0 :         unsigned long free_pfn = zone_end_pfn(zone) - 1;
     337           0 :         unsigned long reset_migrate = free_pfn;
     338           0 :         unsigned long reset_free = migrate_pfn;
     339           0 :         bool source_set = false;
     340           0 :         bool free_set = false;
     341             : 
     342           0 :         if (!zone->compact_blockskip_flush)
     343             :                 return;
     344             : 
     345           0 :         zone->compact_blockskip_flush = false;
     346             : 
     347             :         /*
     348             :          * Walk the zone and update pageblock skip information. Source looks
     349             :          * for PageLRU while target looks for PageBuddy. When the scanner
     350             :          * is found, both PageBuddy and PageLRU are checked as the pageblock
     351             :          * is suitable as both source and target.
     352             :          */
     353           0 :         for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
     354           0 :                                         free_pfn -= pageblock_nr_pages) {
     355           0 :                 cond_resched();
     356             : 
     357             :                 /* Update the migrate PFN */
     358           0 :                 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
     359             :                     migrate_pfn < reset_migrate) {
     360           0 :                         source_set = true;
     361           0 :                         reset_migrate = migrate_pfn;
     362           0 :                         zone->compact_init_migrate_pfn = reset_migrate;
     363           0 :                         zone->compact_cached_migrate_pfn[0] = reset_migrate;
     364           0 :                         zone->compact_cached_migrate_pfn[1] = reset_migrate;
     365             :                 }
     366             : 
     367             :                 /* Update the free PFN */
     368           0 :                 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
     369             :                     free_pfn > reset_free) {
     370           0 :                         free_set = true;
     371           0 :                         reset_free = free_pfn;
     372           0 :                         zone->compact_init_free_pfn = reset_free;
     373           0 :                         zone->compact_cached_free_pfn = reset_free;
     374             :                 }
     375             :         }
     376             : 
     377             :         /* Leave no distance if no suitable block was reset */
     378           0 :         if (reset_migrate >= reset_free) {
     379           0 :                 zone->compact_cached_migrate_pfn[0] = migrate_pfn;
     380           0 :                 zone->compact_cached_migrate_pfn[1] = migrate_pfn;
     381           0 :                 zone->compact_cached_free_pfn = free_pfn;
     382             :         }
     383             : }
     384             : 
     385           1 : void reset_isolation_suitable(pg_data_t *pgdat)
     386             : {
     387           1 :         int zoneid;
     388             : 
     389           4 :         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
     390           3 :                 struct zone *zone = &pgdat->node_zones[zoneid];
     391           3 :                 if (!populated_zone(zone))
     392           2 :                         continue;
     393             : 
     394             :                 /* Only flush if a full compaction finished recently */
     395           1 :                 if (zone->compact_blockskip_flush)
     396           0 :                         __reset_isolation_suitable(zone);
     397             :         }
     398           1 : }
     399             : 
     400             : /*
     401             :  * Sets the pageblock skip bit if it was clear. Note that this is a hint as
     402             :  * locks are not required for read/writers. Returns true if it was already set.
     403             :  */
     404           0 : static bool test_and_set_skip(struct compact_control *cc, struct page *page,
     405             :                                                         unsigned long pfn)
     406             : {
     407           0 :         bool skip;
     408             : 
     409             :         /* Do no update if skip hint is being ignored */
     410           0 :         if (cc->ignore_skip_hint)
     411             :                 return false;
     412             : 
     413           0 :         if (!IS_ALIGNED(pfn, pageblock_nr_pages))
     414             :                 return false;
     415             : 
     416           0 :         skip = get_pageblock_skip(page);
     417           0 :         if (!skip && !cc->no_set_skip_hint)
     418           0 :                 set_pageblock_skip(page);
     419             : 
     420             :         return skip;
     421             : }
     422             : 
     423           0 : static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
     424             : {
     425           0 :         struct zone *zone = cc->zone;
     426             : 
     427           0 :         pfn = pageblock_end_pfn(pfn);
     428             : 
     429             :         /* Set for isolation rather than compaction */
     430           0 :         if (cc->no_set_skip_hint)
     431             :                 return;
     432             : 
     433           0 :         if (pfn > zone->compact_cached_migrate_pfn[0])
     434           0 :                 zone->compact_cached_migrate_pfn[0] = pfn;
     435           0 :         if (cc->mode != MIGRATE_ASYNC &&
     436           0 :             pfn > zone->compact_cached_migrate_pfn[1])
     437           0 :                 zone->compact_cached_migrate_pfn[1] = pfn;
     438             : }
     439             : 
     440             : /*
     441             :  * If no pages were isolated then mark this pageblock to be skipped in the
     442             :  * future. The information is later cleared by __reset_isolation_suitable().
     443             :  */
     444           0 : static void update_pageblock_skip(struct compact_control *cc,
     445             :                         struct page *page, unsigned long pfn)
     446             : {
     447           0 :         struct zone *zone = cc->zone;
     448             : 
     449           0 :         if (cc->no_set_skip_hint)
     450             :                 return;
     451             : 
     452           0 :         if (!page)
     453             :                 return;
     454             : 
     455           0 :         set_pageblock_skip(page);
     456             : 
     457             :         /* Update where async and sync compaction should restart */
     458           0 :         if (pfn < zone->compact_cached_free_pfn)
     459           0 :                 zone->compact_cached_free_pfn = pfn;
     460             : }
     461             : #else
     462             : static inline bool isolation_suitable(struct compact_control *cc,
     463             :                                         struct page *page)
     464             : {
     465             :         return true;
     466             : }
     467             : 
     468             : static inline bool pageblock_skip_persistent(struct page *page)
     469             : {
     470             :         return false;
     471             : }
     472             : 
     473             : static inline void update_pageblock_skip(struct compact_control *cc,
     474             :                         struct page *page, unsigned long pfn)
     475             : {
     476             : }
     477             : 
     478             : static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
     479             : {
     480             : }
     481             : 
     482             : static bool test_and_set_skip(struct compact_control *cc, struct page *page,
     483             :                                                         unsigned long pfn)
     484             : {
     485             :         return false;
     486             : }
     487             : #endif /* CONFIG_COMPACTION */
     488             : 
     489             : /*
     490             :  * Compaction requires the taking of some coarse locks that are potentially
     491             :  * very heavily contended. For async compaction, trylock and record if the
     492             :  * lock is contended. The lock will still be acquired but compaction will
     493             :  * abort when the current block is finished regardless of success rate.
     494             :  * Sync compaction acquires the lock.
     495             :  *
     496             :  * Always returns true which makes it easier to track lock state in callers.
     497             :  */
     498           0 : static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
     499             :                                                 struct compact_control *cc)
     500             :         __acquires(lock)
     501             : {
     502             :         /* Track if the lock is contended in async mode */
     503           0 :         if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
     504           0 :                 if (spin_trylock_irqsave(lock, *flags))
     505             :                         return true;
     506             : 
     507           0 :                 cc->contended = true;
     508             :         }
     509             : 
     510           0 :         spin_lock_irqsave(lock, *flags);
     511           0 :         return true;
     512             : }
     513             : 
     514             : /*
     515             :  * Compaction requires the taking of some coarse locks that are potentially
     516             :  * very heavily contended. The lock should be periodically unlocked to avoid
     517             :  * having disabled IRQs for a long time, even when there is nobody waiting on
     518             :  * the lock. It might also be that allowing the IRQs will result in
     519             :  * need_resched() becoming true. If scheduling is needed, async compaction
     520             :  * aborts. Sync compaction schedules.
     521             :  * Either compaction type will also abort if a fatal signal is pending.
     522             :  * In either case if the lock was locked, it is dropped and not regained.
     523             :  *
     524             :  * Returns true if compaction should abort due to fatal signal pending, or
     525             :  *              async compaction due to need_resched()
     526             :  * Returns false when compaction can continue (sync compaction might have
     527             :  *              scheduled)
     528             :  */
     529           0 : static bool compact_unlock_should_abort(spinlock_t *lock,
     530             :                 unsigned long flags, bool *locked, struct compact_control *cc)
     531             : {
     532           0 :         if (*locked) {
     533           0 :                 spin_unlock_irqrestore(lock, flags);
     534           0 :                 *locked = false;
     535             :         }
     536             : 
     537           0 :         if (fatal_signal_pending(current)) {
     538           0 :                 cc->contended = true;
     539           0 :                 return true;
     540             :         }
     541             : 
     542           0 :         cond_resched();
     543             : 
     544           0 :         return false;
     545             : }
     546             : 
     547             : /*
     548             :  * Isolate free pages onto a private freelist. If @strict is true, will abort
     549             :  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
     550             :  * (even though it may still end up isolating some pages).
     551             :  */
     552           0 : static unsigned long isolate_freepages_block(struct compact_control *cc,
     553             :                                 unsigned long *start_pfn,
     554             :                                 unsigned long end_pfn,
     555             :                                 struct list_head *freelist,
     556             :                                 unsigned int stride,
     557             :                                 bool strict)
     558             : {
     559           0 :         int nr_scanned = 0, total_isolated = 0;
     560           0 :         struct page *cursor;
     561           0 :         unsigned long flags = 0;
     562           0 :         bool locked = false;
     563           0 :         unsigned long blockpfn = *start_pfn;
     564           0 :         unsigned int order;
     565             : 
     566             :         /* Strict mode is for isolation, speed is secondary */
     567           0 :         if (strict)
     568           0 :                 stride = 1;
     569             : 
     570           0 :         cursor = pfn_to_page(blockpfn);
     571             : 
     572             :         /* Isolate free pages. */
     573           0 :         for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
     574           0 :                 int isolated;
     575           0 :                 struct page *page = cursor;
     576             : 
     577             :                 /*
     578             :                  * Periodically drop the lock (if held) regardless of its
     579             :                  * contention, to give chance to IRQs. Abort if fatal signal
     580             :                  * pending or async compaction detects need_resched()
     581             :                  */
     582           0 :                 if (!(blockpfn % SWAP_CLUSTER_MAX)
     583           0 :                     && compact_unlock_should_abort(&cc->zone->lock, flags,
     584             :                                                                 &locked, cc))
     585             :                         break;
     586             : 
     587           0 :                 nr_scanned++;
     588           0 :                 if (!pfn_valid_within(blockpfn))
     589             :                         goto isolate_fail;
     590             : 
     591             :                 /*
     592             :                  * For compound pages such as THP and hugetlbfs, we can save
     593             :                  * potentially a lot of iterations if we skip them at once.
     594             :                  * The check is racy, but we can consider only valid values
     595             :                  * and the only danger is skipping too much.
     596             :                  */
     597           0 :                 if (PageCompound(page)) {
     598           0 :                         const unsigned int order = compound_order(page);
     599             : 
     600           0 :                         if (likely(order < MAX_ORDER)) {
     601           0 :                                 blockpfn += (1UL << order) - 1;
     602           0 :                                 cursor += (1UL << order) - 1;
     603             :                         }
     604           0 :                         goto isolate_fail;
     605             :                 }
     606             : 
     607           0 :                 if (!PageBuddy(page))
     608           0 :                         goto isolate_fail;
     609             : 
     610             :                 /*
     611             :                  * If we already hold the lock, we can skip some rechecking.
     612             :                  * Note that if we hold the lock now, checked_pageblock was
     613             :                  * already set in some previous iteration (or strict is true),
     614             :                  * so it is correct to skip the suitable migration target
     615             :                  * recheck as well.
     616             :                  */
     617           0 :                 if (!locked) {
     618           0 :                         locked = compact_lock_irqsave(&cc->zone->lock,
     619             :                                                                 &flags, cc);
     620             : 
     621             :                         /* Recheck this is a buddy page under lock */
     622           0 :                         if (!PageBuddy(page))
     623           0 :                                 goto isolate_fail;
     624             :                 }
     625             : 
     626             :                 /* Found a free page, will break it into order-0 pages */
     627           0 :                 order = buddy_order(page);
     628           0 :                 isolated = __isolate_free_page(page, order);
     629           0 :                 if (!isolated)
     630             :                         break;
     631           0 :                 set_page_private(page, order);
     632             : 
     633           0 :                 total_isolated += isolated;
     634           0 :                 cc->nr_freepages += isolated;
     635           0 :                 list_add_tail(&page->lru, freelist);
     636             : 
     637           0 :                 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
     638           0 :                         blockpfn += isolated;
     639           0 :                         break;
     640             :                 }
     641             :                 /* Advance to the end of split page */
     642           0 :                 blockpfn += isolated - 1;
     643           0 :                 cursor += isolated - 1;
     644           0 :                 continue;
     645             : 
     646           0 : isolate_fail:
     647           0 :                 if (strict)
     648             :                         break;
     649             :                 else
     650           0 :                         continue;
     651             : 
     652             :         }
     653             : 
     654           0 :         if (locked)
     655           0 :                 spin_unlock_irqrestore(&cc->zone->lock, flags);
     656             : 
     657             :         /*
     658             :          * There is a tiny chance that we have read bogus compound_order(),
     659             :          * so be careful to not go outside of the pageblock.
     660             :          */
     661           0 :         if (unlikely(blockpfn > end_pfn))
     662           0 :                 blockpfn = end_pfn;
     663             : 
     664           0 :         trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
     665             :                                         nr_scanned, total_isolated);
     666             : 
     667             :         /* Record how far we have got within the block */
     668           0 :         *start_pfn = blockpfn;
     669             : 
     670             :         /*
     671             :          * If strict isolation is requested by CMA then check that all the
     672             :          * pages requested were isolated. If there were any failures, 0 is
     673             :          * returned and CMA will fail.
     674             :          */
     675           0 :         if (strict && blockpfn < end_pfn)
     676           0 :                 total_isolated = 0;
     677             : 
     678           0 :         cc->total_free_scanned += nr_scanned;
     679           0 :         if (total_isolated)
     680           0 :                 count_compact_events(COMPACTISOLATED, total_isolated);
     681           0 :         return total_isolated;
     682             : }
     683             : 
     684             : /**
     685             :  * isolate_freepages_range() - isolate free pages.
     686             :  * @cc:        Compaction control structure.
     687             :  * @start_pfn: The first PFN to start isolating.
     688             :  * @end_pfn:   The one-past-last PFN.
     689             :  *
     690             :  * Non-free pages, invalid PFNs, or zone boundaries within the
     691             :  * [start_pfn, end_pfn) range are considered errors, cause function to
     692             :  * undo its actions and return zero.
     693             :  *
     694             :  * Otherwise, function returns one-past-the-last PFN of isolated page
     695             :  * (which may be greater then end_pfn if end fell in a middle of
     696             :  * a free page).
     697             :  */
     698             : unsigned long
     699           0 : isolate_freepages_range(struct compact_control *cc,
     700             :                         unsigned long start_pfn, unsigned long end_pfn)
     701             : {
     702           0 :         unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
     703           0 :         LIST_HEAD(freelist);
     704             : 
     705           0 :         pfn = start_pfn;
     706           0 :         block_start_pfn = pageblock_start_pfn(pfn);
     707           0 :         if (block_start_pfn < cc->zone->zone_start_pfn)
     708             :                 block_start_pfn = cc->zone->zone_start_pfn;
     709           0 :         block_end_pfn = pageblock_end_pfn(pfn);
     710             : 
     711           0 :         for (; pfn < end_pfn; pfn += isolated,
     712           0 :                                 block_start_pfn = block_end_pfn,
     713           0 :                                 block_end_pfn += pageblock_nr_pages) {
     714             :                 /* Protect pfn from changing by isolate_freepages_block */
     715           0 :                 unsigned long isolate_start_pfn = pfn;
     716             : 
     717           0 :                 block_end_pfn = min(block_end_pfn, end_pfn);
     718             : 
     719             :                 /*
     720             :                  * pfn could pass the block_end_pfn if isolated freepage
     721             :                  * is more than pageblock order. In this case, we adjust
     722             :                  * scanning range to right one.
     723             :                  */
     724           0 :                 if (pfn >= block_end_pfn) {
     725           0 :                         block_start_pfn = pageblock_start_pfn(pfn);
     726           0 :                         block_end_pfn = pageblock_end_pfn(pfn);
     727           0 :                         block_end_pfn = min(block_end_pfn, end_pfn);
     728             :                 }
     729             : 
     730           0 :                 if (!pageblock_pfn_to_page(block_start_pfn,
     731             :                                         block_end_pfn, cc->zone))
     732             :                         break;
     733             : 
     734           0 :                 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
     735             :                                         block_end_pfn, &freelist, 0, true);
     736             : 
     737             :                 /*
     738             :                  * In strict mode, isolate_freepages_block() returns 0 if
     739             :                  * there are any holes in the block (ie. invalid PFNs or
     740             :                  * non-free pages).
     741             :                  */
     742           0 :                 if (!isolated)
     743             :                         break;
     744             : 
     745             :                 /*
     746             :                  * If we managed to isolate pages, it is always (1 << n) *
     747             :                  * pageblock_nr_pages for some non-negative n.  (Max order
     748             :                  * page may span two pageblocks).
     749             :                  */
     750             :         }
     751             : 
     752             :         /* __isolate_free_page() does not map the pages */
     753           0 :         split_map_pages(&freelist);
     754             : 
     755           0 :         if (pfn < end_pfn) {
     756             :                 /* Loop terminated early, cleanup. */
     757           0 :                 release_freepages(&freelist);
     758           0 :                 return 0;
     759             :         }
     760             : 
     761             :         /* We don't use freelists for anything. */
     762             :         return pfn;
     763             : }
     764             : 
     765             : /* Similar to reclaim, but different enough that they don't share logic */
     766           0 : static bool too_many_isolated(pg_data_t *pgdat)
     767             : {
     768           0 :         unsigned long active, inactive, isolated;
     769             : 
     770           0 :         inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
     771           0 :                         node_page_state(pgdat, NR_INACTIVE_ANON);
     772           0 :         active = node_page_state(pgdat, NR_ACTIVE_FILE) +
     773           0 :                         node_page_state(pgdat, NR_ACTIVE_ANON);
     774           0 :         isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
     775           0 :                         node_page_state(pgdat, NR_ISOLATED_ANON);
     776             : 
     777           0 :         return isolated > (inactive + active) / 2;
     778             : }
     779             : 
     780             : /**
     781             :  * isolate_migratepages_block() - isolate all migrate-able pages within
     782             :  *                                a single pageblock
     783             :  * @cc:         Compaction control structure.
     784             :  * @low_pfn:    The first PFN to isolate
     785             :  * @end_pfn:    The one-past-the-last PFN to isolate, within same pageblock
     786             :  * @isolate_mode: Isolation mode to be used.
     787             :  *
     788             :  * Isolate all pages that can be migrated from the range specified by
     789             :  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
     790             :  * Returns zero if there is a fatal signal pending, otherwise PFN of the
     791             :  * first page that was not scanned (which may be both less, equal to or more
     792             :  * than end_pfn).
     793             :  *
     794             :  * The pages are isolated on cc->migratepages list (not required to be empty),
     795             :  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
     796             :  * is neither read nor updated.
     797             :  */
     798             : static unsigned long
     799           0 : isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
     800             :                         unsigned long end_pfn, isolate_mode_t isolate_mode)
     801             : {
     802           0 :         pg_data_t *pgdat = cc->zone->zone_pgdat;
     803           0 :         unsigned long nr_scanned = 0, nr_isolated = 0;
     804           0 :         struct lruvec *lruvec;
     805           0 :         unsigned long flags = 0;
     806           0 :         struct lruvec *locked = NULL;
     807           0 :         struct page *page = NULL, *valid_page = NULL;
     808           0 :         unsigned long start_pfn = low_pfn;
     809           0 :         bool skip_on_failure = false;
     810           0 :         unsigned long next_skip_pfn = 0;
     811           0 :         bool skip_updated = false;
     812             : 
     813             :         /*
     814             :          * Ensure that there are not too many pages isolated from the LRU
     815             :          * list by either parallel reclaimers or compaction. If there are,
     816             :          * delay for some time until fewer pages are isolated
     817             :          */
     818           0 :         while (unlikely(too_many_isolated(pgdat))) {
     819             :                 /* stop isolation if there are still pages not migrated */
     820           0 :                 if (cc->nr_migratepages)
     821             :                         return 0;
     822             : 
     823             :                 /* async migration should just abort */
     824           0 :                 if (cc->mode == MIGRATE_ASYNC)
     825             :                         return 0;
     826             : 
     827           0 :                 congestion_wait(BLK_RW_ASYNC, HZ/10);
     828             : 
     829           0 :                 if (fatal_signal_pending(current))
     830             :                         return 0;
     831             :         }
     832             : 
     833           0 :         cond_resched();
     834             : 
     835           0 :         if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
     836           0 :                 skip_on_failure = true;
     837           0 :                 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
     838             :         }
     839             : 
     840             :         /* Time to isolate some pages for migration */
     841           0 :         for (; low_pfn < end_pfn; low_pfn++) {
     842             : 
     843           0 :                 if (skip_on_failure && low_pfn >= next_skip_pfn) {
     844             :                         /*
     845             :                          * We have isolated all migration candidates in the
     846             :                          * previous order-aligned block, and did not skip it due
     847             :                          * to failure. We should migrate the pages now and
     848             :                          * hopefully succeed compaction.
     849             :                          */
     850           0 :                         if (nr_isolated)
     851             :                                 break;
     852             : 
     853             :                         /*
     854             :                          * We failed to isolate in the previous order-aligned
     855             :                          * block. Set the new boundary to the end of the
     856             :                          * current block. Note we can't simply increase
     857             :                          * next_skip_pfn by 1 << order, as low_pfn might have
     858             :                          * been incremented by a higher number due to skipping
     859             :                          * a compound or a high-order buddy page in the
     860             :                          * previous loop iteration.
     861             :                          */
     862           0 :                         next_skip_pfn = block_end_pfn(low_pfn, cc->order);
     863             :                 }
     864             : 
     865             :                 /*
     866             :                  * Periodically drop the lock (if held) regardless of its
     867             :                  * contention, to give chance to IRQs. Abort completely if
     868             :                  * a fatal signal is pending.
     869             :                  */
     870           0 :                 if (!(low_pfn % SWAP_CLUSTER_MAX)) {
     871           0 :                         if (locked) {
     872           0 :                                 unlock_page_lruvec_irqrestore(locked, flags);
     873           0 :                                 locked = NULL;
     874             :                         }
     875             : 
     876           0 :                         if (fatal_signal_pending(current)) {
     877           0 :                                 cc->contended = true;
     878             : 
     879           0 :                                 low_pfn = 0;
     880           0 :                                 goto fatal_pending;
     881             :                         }
     882             : 
     883           0 :                         cond_resched();
     884             :                 }
     885             : 
     886           0 :                 if (!pfn_valid_within(low_pfn))
     887             :                         goto isolate_fail;
     888           0 :                 nr_scanned++;
     889             : 
     890           0 :                 page = pfn_to_page(low_pfn);
     891             : 
     892             :                 /*
     893             :                  * Check if the pageblock has already been marked skipped.
     894             :                  * Only the aligned PFN is checked as the caller isolates
     895             :                  * COMPACT_CLUSTER_MAX at a time so the second call must
     896             :                  * not falsely conclude that the block should be skipped.
     897             :                  */
     898           0 :                 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
     899           0 :                         if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
     900           0 :                                 low_pfn = end_pfn;
     901           0 :                                 page = NULL;
     902           0 :                                 goto isolate_abort;
     903             :                         }
     904             :                         valid_page = page;
     905             :                 }
     906             : 
     907             :                 /*
     908             :                  * Skip if free. We read page order here without zone lock
     909             :                  * which is generally unsafe, but the race window is small and
     910             :                  * the worst thing that can happen is that we skip some
     911             :                  * potential isolation targets.
     912             :                  */
     913           0 :                 if (PageBuddy(page)) {
     914           0 :                         unsigned long freepage_order = buddy_order_unsafe(page);
     915             : 
     916             :                         /*
     917             :                          * Without lock, we cannot be sure that what we got is
     918             :                          * a valid page order. Consider only values in the
     919             :                          * valid order range to prevent low_pfn overflow.
     920             :                          */
     921           0 :                         if (freepage_order > 0 && freepage_order < MAX_ORDER)
     922           0 :                                 low_pfn += (1UL << freepage_order) - 1;
     923           0 :                         continue;
     924             :                 }
     925             : 
     926             :                 /*
     927             :                  * Regardless of being on LRU, compound pages such as THP and
     928             :                  * hugetlbfs are not to be compacted unless we are attempting
     929             :                  * an allocation much larger than the huge page size (eg CMA).
     930             :                  * We can potentially save a lot of iterations if we skip them
     931             :                  * at once. The check is racy, but we can consider only valid
     932             :                  * values and the only danger is skipping too much.
     933             :                  */
     934           0 :                 if (PageCompound(page) && !cc->alloc_contig) {
     935           0 :                         const unsigned int order = compound_order(page);
     936             : 
     937           0 :                         if (likely(order < MAX_ORDER))
     938           0 :                                 low_pfn += (1UL << order) - 1;
     939           0 :                         goto isolate_fail;
     940             :                 }
     941             : 
     942             :                 /*
     943             :                  * Check may be lockless but that's ok as we recheck later.
     944             :                  * It's possible to migrate LRU and non-lru movable pages.
     945             :                  * Skip any other type of page
     946             :                  */
     947           0 :                 if (!PageLRU(page)) {
     948             :                         /*
     949             :                          * __PageMovable can return false positive so we need
     950             :                          * to verify it under page_lock.
     951             :                          */
     952           0 :                         if (unlikely(__PageMovable(page)) &&
     953           0 :                                         !PageIsolated(page)) {
     954           0 :                                 if (locked) {
     955           0 :                                         unlock_page_lruvec_irqrestore(locked, flags);
     956           0 :                                         locked = NULL;
     957             :                                 }
     958             : 
     959           0 :                                 if (!isolate_movable_page(page, isolate_mode))
     960           0 :                                         goto isolate_success;
     961             :                         }
     962             : 
     963           0 :                         goto isolate_fail;
     964             :                 }
     965             : 
     966             :                 /*
     967             :                  * Migration will fail if an anonymous page is pinned in memory,
     968             :                  * so avoid taking lru_lock and isolating it unnecessarily in an
     969             :                  * admittedly racy check.
     970             :                  */
     971           0 :                 if (!page_mapping(page) &&
     972           0 :                     page_count(page) > page_mapcount(page))
     973           0 :                         goto isolate_fail;
     974             : 
     975             :                 /*
     976             :                  * Only allow to migrate anonymous pages in GFP_NOFS context
     977             :                  * because those do not depend on fs locks.
     978             :                  */
     979           0 :                 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
     980           0 :                         goto isolate_fail;
     981             : 
     982             :                 /*
     983             :                  * Be careful not to clear PageLRU until after we're
     984             :                  * sure the page is not being freed elsewhere -- the
     985             :                  * page release code relies on it.
     986             :                  */
     987           0 :                 if (unlikely(!get_page_unless_zero(page)))
     988           0 :                         goto isolate_fail;
     989             : 
     990           0 :                 if (!__isolate_lru_page_prepare(page, isolate_mode))
     991           0 :                         goto isolate_fail_put;
     992             : 
     993             :                 /* Try isolate the page */
     994           0 :                 if (!TestClearPageLRU(page))
     995           0 :                         goto isolate_fail_put;
     996             : 
     997           0 :                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
     998             : 
     999             :                 /* If we already hold the lock, we can skip some rechecking */
    1000           0 :                 if (lruvec != locked) {
    1001           0 :                         if (locked)
    1002           0 :                                 unlock_page_lruvec_irqrestore(locked, flags);
    1003             : 
    1004           0 :                         compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
    1005           0 :                         locked = lruvec;
    1006             : 
    1007           0 :                         lruvec_memcg_debug(lruvec, page);
    1008             : 
    1009             :                         /* Try get exclusive access under lock */
    1010           0 :                         if (!skip_updated) {
    1011           0 :                                 skip_updated = true;
    1012           0 :                                 if (test_and_set_skip(cc, page, low_pfn))
    1013           0 :                                         goto isolate_abort;
    1014             :                         }
    1015             : 
    1016             :                         /*
    1017             :                          * Page become compound since the non-locked check,
    1018             :                          * and it's on LRU. It can only be a THP so the order
    1019             :                          * is safe to read and it's 0 for tail pages.
    1020             :                          */
    1021           0 :                         if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
    1022           0 :                                 low_pfn += compound_nr(page) - 1;
    1023           0 :                                 SetPageLRU(page);
    1024           0 :                                 goto isolate_fail_put;
    1025             :                         }
    1026             :                 }
    1027             : 
    1028             :                 /* The whole page is taken off the LRU; skip the tail pages. */
    1029           0 :                 if (PageCompound(page))
    1030           0 :                         low_pfn += compound_nr(page) - 1;
    1031             : 
    1032             :                 /* Successfully isolated */
    1033           0 :                 del_page_from_lru_list(page, lruvec);
    1034           0 :                 mod_node_page_state(page_pgdat(page),
    1035           0 :                                 NR_ISOLATED_ANON + page_is_file_lru(page),
    1036           0 :                                 thp_nr_pages(page));
    1037             : 
    1038           0 : isolate_success:
    1039           0 :                 list_add(&page->lru, &cc->migratepages);
    1040           0 :                 cc->nr_migratepages += compound_nr(page);
    1041           0 :                 nr_isolated += compound_nr(page);
    1042             : 
    1043             :                 /*
    1044             :                  * Avoid isolating too much unless this block is being
    1045             :                  * rescanned (e.g. dirty/writeback pages, parallel allocation)
    1046             :                  * or a lock is contended. For contention, isolate quickly to
    1047             :                  * potentially remove one source of contention.
    1048             :                  */
    1049           0 :                 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
    1050           0 :                     !cc->rescan && !cc->contended) {
    1051           0 :                         ++low_pfn;
    1052           0 :                         break;
    1053             :                 }
    1054             : 
    1055           0 :                 continue;
    1056             : 
    1057           0 : isolate_fail_put:
    1058             :                 /* Avoid potential deadlock in freeing page under lru_lock */
    1059           0 :                 if (locked) {
    1060           0 :                         unlock_page_lruvec_irqrestore(locked, flags);
    1061           0 :                         locked = NULL;
    1062             :                 }
    1063           0 :                 put_page(page);
    1064             : 
    1065           0 : isolate_fail:
    1066           0 :                 if (!skip_on_failure)
    1067           0 :                         continue;
    1068             : 
    1069             :                 /*
    1070             :                  * We have isolated some pages, but then failed. Release them
    1071             :                  * instead of migrating, as we cannot form the cc->order buddy
    1072             :                  * page anyway.
    1073             :                  */
    1074           0 :                 if (nr_isolated) {
    1075           0 :                         if (locked) {
    1076           0 :                                 unlock_page_lruvec_irqrestore(locked, flags);
    1077           0 :                                 locked = NULL;
    1078             :                         }
    1079           0 :                         putback_movable_pages(&cc->migratepages);
    1080           0 :                         cc->nr_migratepages = 0;
    1081           0 :                         nr_isolated = 0;
    1082             :                 }
    1083             : 
    1084           0 :                 if (low_pfn < next_skip_pfn) {
    1085           0 :                         low_pfn = next_skip_pfn - 1;
    1086             :                         /*
    1087             :                          * The check near the loop beginning would have updated
    1088             :                          * next_skip_pfn too, but this is a bit simpler.
    1089             :                          */
    1090           0 :                         next_skip_pfn += 1UL << cc->order;
    1091             :                 }
    1092             :         }
    1093             : 
    1094             :         /*
    1095             :          * The PageBuddy() check could have potentially brought us outside
    1096             :          * the range to be scanned.
    1097             :          */
    1098           0 :         if (unlikely(low_pfn > end_pfn))
    1099           0 :                 low_pfn = end_pfn;
    1100             : 
    1101             :         page = NULL;
    1102             : 
    1103           0 : isolate_abort:
    1104           0 :         if (locked)
    1105           0 :                 unlock_page_lruvec_irqrestore(locked, flags);
    1106           0 :         if (page) {
    1107           0 :                 SetPageLRU(page);
    1108           0 :                 put_page(page);
    1109             :         }
    1110             : 
    1111             :         /*
    1112             :          * Updated the cached scanner pfn once the pageblock has been scanned
    1113             :          * Pages will either be migrated in which case there is no point
    1114             :          * scanning in the near future or migration failed in which case the
    1115             :          * failure reason may persist. The block is marked for skipping if
    1116             :          * there were no pages isolated in the block or if the block is
    1117             :          * rescanned twice in a row.
    1118             :          */
    1119           0 :         if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
    1120           0 :                 if (valid_page && !skip_updated)
    1121           0 :                         set_pageblock_skip(valid_page);
    1122           0 :                 update_cached_migrate(cc, low_pfn);
    1123             :         }
    1124             : 
    1125           0 :         trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
    1126             :                                                 nr_scanned, nr_isolated);
    1127             : 
    1128           0 : fatal_pending:
    1129           0 :         cc->total_migrate_scanned += nr_scanned;
    1130           0 :         if (nr_isolated)
    1131           0 :                 count_compact_events(COMPACTISOLATED, nr_isolated);
    1132             : 
    1133             :         return low_pfn;
    1134             : }
    1135             : 
    1136             : /**
    1137             :  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
    1138             :  * @cc:        Compaction control structure.
    1139             :  * @start_pfn: The first PFN to start isolating.
    1140             :  * @end_pfn:   The one-past-last PFN.
    1141             :  *
    1142             :  * Returns zero if isolation fails fatally due to e.g. pending signal.
    1143             :  * Otherwise, function returns one-past-the-last PFN of isolated page
    1144             :  * (which may be greater than end_pfn if end fell in a middle of a THP page).
    1145             :  */
    1146             : unsigned long
    1147           0 : isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
    1148             :                                                         unsigned long end_pfn)
    1149             : {
    1150           0 :         unsigned long pfn, block_start_pfn, block_end_pfn;
    1151             : 
    1152             :         /* Scan block by block. First and last block may be incomplete */
    1153           0 :         pfn = start_pfn;
    1154           0 :         block_start_pfn = pageblock_start_pfn(pfn);
    1155           0 :         if (block_start_pfn < cc->zone->zone_start_pfn)
    1156             :                 block_start_pfn = cc->zone->zone_start_pfn;
    1157           0 :         block_end_pfn = pageblock_end_pfn(pfn);
    1158             : 
    1159           0 :         for (; pfn < end_pfn; pfn = block_end_pfn,
    1160           0 :                                 block_start_pfn = block_end_pfn,
    1161           0 :                                 block_end_pfn += pageblock_nr_pages) {
    1162             : 
    1163           0 :                 block_end_pfn = min(block_end_pfn, end_pfn);
    1164             : 
    1165           0 :                 if (!pageblock_pfn_to_page(block_start_pfn,
    1166             :                                         block_end_pfn, cc->zone))
    1167           0 :                         continue;
    1168             : 
    1169           0 :                 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
    1170             :                                                         ISOLATE_UNEVICTABLE);
    1171             : 
    1172           0 :                 if (!pfn)
    1173             :                         break;
    1174             : 
    1175           0 :                 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
    1176             :                         break;
    1177             :         }
    1178             : 
    1179           0 :         return pfn;
    1180             : }
    1181             : 
    1182             : #endif /* CONFIG_COMPACTION || CONFIG_CMA */
    1183             : #ifdef CONFIG_COMPACTION
    1184             : 
    1185           0 : static bool suitable_migration_source(struct compact_control *cc,
    1186             :                                                         struct page *page)
    1187             : {
    1188           0 :         int block_mt;
    1189             : 
    1190           0 :         if (pageblock_skip_persistent(page))
    1191             :                 return false;
    1192             : 
    1193           0 :         if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
    1194             :                 return true;
    1195             : 
    1196           0 :         block_mt = get_pageblock_migratetype(page);
    1197             : 
    1198           0 :         if (cc->migratetype == MIGRATE_MOVABLE)
    1199           0 :                 return is_migrate_movable(block_mt);
    1200             :         else
    1201           0 :                 return block_mt == cc->migratetype;
    1202             : }
    1203             : 
    1204             : /* Returns true if the page is within a block suitable for migration to */
    1205           0 : static bool suitable_migration_target(struct compact_control *cc,
    1206             :                                                         struct page *page)
    1207             : {
    1208             :         /* If the page is a large free page, then disallow migration */
    1209           0 :         if (PageBuddy(page)) {
    1210             :                 /*
    1211             :                  * We are checking page_order without zone->lock taken. But
    1212             :                  * the only small danger is that we skip a potentially suitable
    1213             :                  * pageblock, so it's not worth to check order for valid range.
    1214             :                  */
    1215           0 :                 if (buddy_order_unsafe(page) >= pageblock_order)
    1216             :                         return false;
    1217             :         }
    1218             : 
    1219           0 :         if (cc->ignore_block_suitable)
    1220             :                 return true;
    1221             : 
    1222             :         /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
    1223           0 :         if (is_migrate_movable(get_pageblock_migratetype(page)))
    1224           0 :                 return true;
    1225             : 
    1226             :         /* Otherwise skip the block */
    1227             :         return false;
    1228             : }
    1229             : 
    1230             : static inline unsigned int
    1231           0 : freelist_scan_limit(struct compact_control *cc)
    1232             : {
    1233           0 :         unsigned short shift = BITS_PER_LONG - 1;
    1234             : 
    1235           0 :         return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
    1236             : }
    1237             : 
    1238             : /*
    1239             :  * Test whether the free scanner has reached the same or lower pageblock than
    1240             :  * the migration scanner, and compaction should thus terminate.
    1241             :  */
    1242           0 : static inline bool compact_scanners_met(struct compact_control *cc)
    1243             : {
    1244           0 :         return (cc->free_pfn >> pageblock_order)
    1245           0 :                 <= (cc->migrate_pfn >> pageblock_order);
    1246             : }
    1247             : 
    1248             : /*
    1249             :  * Used when scanning for a suitable migration target which scans freelists
    1250             :  * in reverse. Reorders the list such as the unscanned pages are scanned
    1251             :  * first on the next iteration of the free scanner
    1252             :  */
    1253             : static void
    1254           0 : move_freelist_head(struct list_head *freelist, struct page *freepage)
    1255             : {
    1256           0 :         LIST_HEAD(sublist);
    1257             : 
    1258           0 :         if (!list_is_last(freelist, &freepage->lru)) {
    1259           0 :                 list_cut_before(&sublist, freelist, &freepage->lru);
    1260           0 :                 if (!list_empty(&sublist))
    1261           0 :                         list_splice_tail(&sublist, freelist);
    1262             :         }
    1263           0 : }
    1264             : 
    1265             : /*
    1266             :  * Similar to move_freelist_head except used by the migration scanner
    1267             :  * when scanning forward. It's possible for these list operations to
    1268             :  * move against each other if they search the free list exactly in
    1269             :  * lockstep.
    1270             :  */
    1271             : static void
    1272           0 : move_freelist_tail(struct list_head *freelist, struct page *freepage)
    1273             : {
    1274           0 :         LIST_HEAD(sublist);
    1275             : 
    1276           0 :         if (!list_is_first(freelist, &freepage->lru)) {
    1277           0 :                 list_cut_position(&sublist, freelist, &freepage->lru);
    1278           0 :                 if (!list_empty(&sublist))
    1279           0 :                         list_splice_tail(&sublist, freelist);
    1280             :         }
    1281           0 : }
    1282             : 
    1283             : static void
    1284           0 : fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
    1285             : {
    1286           0 :         unsigned long start_pfn, end_pfn;
    1287           0 :         struct page *page;
    1288             : 
    1289             :         /* Do not search around if there are enough pages already */
    1290           0 :         if (cc->nr_freepages >= cc->nr_migratepages)
    1291           0 :                 return;
    1292             : 
    1293             :         /* Minimise scanning during async compaction */
    1294           0 :         if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
    1295             :                 return;
    1296             : 
    1297             :         /* Pageblock boundaries */
    1298           0 :         start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
    1299           0 :         end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
    1300             : 
    1301           0 :         page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
    1302           0 :         if (!page)
    1303             :                 return;
    1304             : 
    1305             :         /* Scan before */
    1306           0 :         if (start_pfn != pfn) {
    1307           0 :                 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
    1308           0 :                 if (cc->nr_freepages >= cc->nr_migratepages)
    1309             :                         return;
    1310             :         }
    1311             : 
    1312             :         /* Scan after */
    1313           0 :         start_pfn = pfn + nr_isolated;
    1314           0 :         if (start_pfn < end_pfn)
    1315           0 :                 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
    1316             : 
    1317             :         /* Skip this pageblock in the future as it's full or nearly full */
    1318           0 :         if (cc->nr_freepages < cc->nr_migratepages)
    1319           0 :                 set_pageblock_skip(page);
    1320             : }
    1321             : 
    1322             : /* Search orders in round-robin fashion */
    1323           0 : static int next_search_order(struct compact_control *cc, int order)
    1324             : {
    1325           0 :         order--;
    1326           0 :         if (order < 0)
    1327           0 :                 order = cc->order - 1;
    1328             : 
    1329             :         /* Search wrapped around? */
    1330           0 :         if (order == cc->search_order) {
    1331           0 :                 cc->search_order--;
    1332           0 :                 if (cc->search_order < 0)
    1333           0 :                         cc->search_order = cc->order - 1;
    1334           0 :                 return -1;
    1335             :         }
    1336             : 
    1337             :         return order;
    1338             : }
    1339             : 
    1340             : static unsigned long
    1341           0 : fast_isolate_freepages(struct compact_control *cc)
    1342             : {
    1343           0 :         unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
    1344           0 :         unsigned int nr_scanned = 0;
    1345           0 :         unsigned long low_pfn, min_pfn, highest = 0;
    1346           0 :         unsigned long nr_isolated = 0;
    1347           0 :         unsigned long distance;
    1348           0 :         struct page *page = NULL;
    1349           0 :         bool scan_start = false;
    1350           0 :         int order;
    1351             : 
    1352             :         /* Full compaction passes in a negative order */
    1353           0 :         if (cc->order <= 0)
    1354           0 :                 return cc->free_pfn;
    1355             : 
    1356             :         /*
    1357             :          * If starting the scan, use a deeper search and use the highest
    1358             :          * PFN found if a suitable one is not found.
    1359             :          */
    1360           0 :         if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
    1361           0 :                 limit = pageblock_nr_pages >> 1;
    1362           0 :                 scan_start = true;
    1363             :         }
    1364             : 
    1365             :         /*
    1366             :          * Preferred point is in the top quarter of the scan space but take
    1367             :          * a pfn from the top half if the search is problematic.
    1368             :          */
    1369           0 :         distance = (cc->free_pfn - cc->migrate_pfn);
    1370           0 :         low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
    1371           0 :         min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
    1372             : 
    1373           0 :         if (WARN_ON_ONCE(min_pfn > low_pfn))
    1374           0 :                 low_pfn = min_pfn;
    1375             : 
    1376             :         /*
    1377             :          * Search starts from the last successful isolation order or the next
    1378             :          * order to search after a previous failure
    1379             :          */
    1380           0 :         cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
    1381             : 
    1382           0 :         for (order = cc->search_order;
    1383           0 :              !page && order >= 0;
    1384           0 :              order = next_search_order(cc, order)) {
    1385           0 :                 struct free_area *area = &cc->zone->free_area[order];
    1386           0 :                 struct list_head *freelist;
    1387           0 :                 struct page *freepage;
    1388           0 :                 unsigned long flags;
    1389           0 :                 unsigned int order_scanned = 0;
    1390           0 :                 unsigned long high_pfn = 0;
    1391             : 
    1392           0 :                 if (!area->nr_free)
    1393           0 :                         continue;
    1394             : 
    1395           0 :                 spin_lock_irqsave(&cc->zone->lock, flags);
    1396           0 :                 freelist = &area->free_list[MIGRATE_MOVABLE];
    1397           0 :                 list_for_each_entry_reverse(freepage, freelist, lru) {
    1398           0 :                         unsigned long pfn;
    1399             : 
    1400           0 :                         order_scanned++;
    1401           0 :                         nr_scanned++;
    1402           0 :                         pfn = page_to_pfn(freepage);
    1403             : 
    1404           0 :                         if (pfn >= highest)
    1405           0 :                                 highest = max(pageblock_start_pfn(pfn),
    1406             :                                               cc->zone->zone_start_pfn);
    1407             : 
    1408           0 :                         if (pfn >= low_pfn) {
    1409           0 :                                 cc->fast_search_fail = 0;
    1410           0 :                                 cc->search_order = order;
    1411           0 :                                 page = freepage;
    1412           0 :                                 break;
    1413             :                         }
    1414             : 
    1415           0 :                         if (pfn >= min_pfn && pfn > high_pfn) {
    1416           0 :                                 high_pfn = pfn;
    1417             : 
    1418             :                                 /* Shorten the scan if a candidate is found */
    1419           0 :                                 limit >>= 1;
    1420             :                         }
    1421             : 
    1422           0 :                         if (order_scanned >= limit)
    1423             :                                 break;
    1424             :                 }
    1425             : 
    1426             :                 /* Use a minimum pfn if a preferred one was not found */
    1427           0 :                 if (!page && high_pfn) {
    1428           0 :                         page = pfn_to_page(high_pfn);
    1429             : 
    1430             :                         /* Update freepage for the list reorder below */
    1431           0 :                         freepage = page;
    1432             :                 }
    1433             : 
    1434             :                 /* Reorder to so a future search skips recent pages */
    1435           0 :                 move_freelist_head(freelist, freepage);
    1436             : 
    1437             :                 /* Isolate the page if available */
    1438           0 :                 if (page) {
    1439           0 :                         if (__isolate_free_page(page, order)) {
    1440           0 :                                 set_page_private(page, order);
    1441           0 :                                 nr_isolated = 1 << order;
    1442           0 :                                 cc->nr_freepages += nr_isolated;
    1443           0 :                                 list_add_tail(&page->lru, &cc->freepages);
    1444           0 :                                 count_compact_events(COMPACTISOLATED, nr_isolated);
    1445             :                         } else {
    1446             :                                 /* If isolation fails, abort the search */
    1447           0 :                                 order = cc->search_order + 1;
    1448           0 :                                 page = NULL;
    1449             :                         }
    1450             :                 }
    1451             : 
    1452           0 :                 spin_unlock_irqrestore(&cc->zone->lock, flags);
    1453             : 
    1454             :                 /*
    1455             :                  * Smaller scan on next order so the total scan ig related
    1456             :                  * to freelist_scan_limit.
    1457             :                  */
    1458           0 :                 if (order_scanned >= limit)
    1459           0 :                         limit = min(1U, limit >> 1);
    1460             :         }
    1461             : 
    1462           0 :         if (!page) {
    1463           0 :                 cc->fast_search_fail++;
    1464           0 :                 if (scan_start) {
    1465             :                         /*
    1466             :                          * Use the highest PFN found above min. If one was
    1467             :                          * not found, be pessimistic for direct compaction
    1468             :                          * and use the min mark.
    1469             :                          */
    1470           0 :                         if (highest) {
    1471           0 :                                 page = pfn_to_page(highest);
    1472           0 :                                 cc->free_pfn = highest;
    1473             :                         } else {
    1474           0 :                                 if (cc->direct_compaction && pfn_valid(min_pfn)) {
    1475           0 :                                         page = pageblock_pfn_to_page(min_pfn,
    1476           0 :                                                 min(pageblock_end_pfn(min_pfn),
    1477             :                                                     zone_end_pfn(cc->zone)),
    1478             :                                                 cc->zone);
    1479           0 :                                         cc->free_pfn = min_pfn;
    1480             :                                 }
    1481             :                         }
    1482             :                 }
    1483             :         }
    1484             : 
    1485           0 :         if (highest && highest >= cc->zone->compact_cached_free_pfn) {
    1486           0 :                 highest -= pageblock_nr_pages;
    1487           0 :                 cc->zone->compact_cached_free_pfn = highest;
    1488             :         }
    1489             : 
    1490           0 :         cc->total_free_scanned += nr_scanned;
    1491           0 :         if (!page)
    1492           0 :                 return cc->free_pfn;
    1493             : 
    1494           0 :         low_pfn = page_to_pfn(page);
    1495           0 :         fast_isolate_around(cc, low_pfn, nr_isolated);
    1496           0 :         return low_pfn;
    1497             : }
    1498             : 
    1499             : /*
    1500             :  * Based on information in the current compact_control, find blocks
    1501             :  * suitable for isolating free pages from and then isolate them.
    1502             :  */
    1503           0 : static void isolate_freepages(struct compact_control *cc)
    1504             : {
    1505           0 :         struct zone *zone = cc->zone;
    1506           0 :         struct page *page;
    1507           0 :         unsigned long block_start_pfn;  /* start of current pageblock */
    1508           0 :         unsigned long isolate_start_pfn; /* exact pfn we start at */
    1509           0 :         unsigned long block_end_pfn;    /* end of current pageblock */
    1510           0 :         unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
    1511           0 :         struct list_head *freelist = &cc->freepages;
    1512           0 :         unsigned int stride;
    1513             : 
    1514             :         /* Try a small search of the free lists for a candidate */
    1515           0 :         isolate_start_pfn = fast_isolate_freepages(cc);
    1516           0 :         if (cc->nr_freepages)
    1517           0 :                 goto splitmap;
    1518             : 
    1519             :         /*
    1520             :          * Initialise the free scanner. The starting point is where we last
    1521             :          * successfully isolated from, zone-cached value, or the end of the
    1522             :          * zone when isolating for the first time. For looping we also need
    1523             :          * this pfn aligned down to the pageblock boundary, because we do
    1524             :          * block_start_pfn -= pageblock_nr_pages in the for loop.
    1525             :          * For ending point, take care when isolating in last pageblock of a
    1526             :          * zone which ends in the middle of a pageblock.
    1527             :          * The low boundary is the end of the pageblock the migration scanner
    1528             :          * is using.
    1529             :          */
    1530           0 :         isolate_start_pfn = cc->free_pfn;
    1531           0 :         block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
    1532           0 :         block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
    1533             :                                                 zone_end_pfn(zone));
    1534           0 :         low_pfn = pageblock_end_pfn(cc->migrate_pfn);
    1535           0 :         stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
    1536             : 
    1537             :         /*
    1538             :          * Isolate free pages until enough are available to migrate the
    1539             :          * pages on cc->migratepages. We stop searching if the migrate
    1540             :          * and free page scanners meet or enough free pages are isolated.
    1541             :          */
    1542           0 :         for (; block_start_pfn >= low_pfn;
    1543           0 :                                 block_end_pfn = block_start_pfn,
    1544           0 :                                 block_start_pfn -= pageblock_nr_pages,
    1545           0 :                                 isolate_start_pfn = block_start_pfn) {
    1546           0 :                 unsigned long nr_isolated;
    1547             : 
    1548             :                 /*
    1549             :                  * This can iterate a massively long zone without finding any
    1550             :                  * suitable migration targets, so periodically check resched.
    1551             :                  */
    1552           0 :                 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
    1553           0 :                         cond_resched();
    1554             : 
    1555           0 :                 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
    1556             :                                                                         zone);
    1557           0 :                 if (!page)
    1558           0 :                         continue;
    1559             : 
    1560             :                 /* Check the block is suitable for migration */
    1561           0 :                 if (!suitable_migration_target(cc, page))
    1562           0 :                         continue;
    1563             : 
    1564             :                 /* If isolation recently failed, do not retry */
    1565           0 :                 if (!isolation_suitable(cc, page))
    1566           0 :                         continue;
    1567             : 
    1568             :                 /* Found a block suitable for isolating free pages from. */
    1569           0 :                 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
    1570             :                                         block_end_pfn, freelist, stride, false);
    1571             : 
    1572             :                 /* Update the skip hint if the full pageblock was scanned */
    1573           0 :                 if (isolate_start_pfn == block_end_pfn)
    1574           0 :                         update_pageblock_skip(cc, page, block_start_pfn);
    1575             : 
    1576             :                 /* Are enough freepages isolated? */
    1577           0 :                 if (cc->nr_freepages >= cc->nr_migratepages) {
    1578           0 :                         if (isolate_start_pfn >= block_end_pfn) {
    1579             :                                 /*
    1580             :                                  * Restart at previous pageblock if more
    1581             :                                  * freepages can be isolated next time.
    1582             :                                  */
    1583           0 :                                 isolate_start_pfn =
    1584           0 :                                         block_start_pfn - pageblock_nr_pages;
    1585             :                         }
    1586             :                         break;
    1587           0 :                 } else if (isolate_start_pfn < block_end_pfn) {
    1588             :                         /*
    1589             :                          * If isolation failed early, do not continue
    1590             :                          * needlessly.
    1591             :                          */
    1592             :                         break;
    1593             :                 }
    1594             : 
    1595             :                 /* Adjust stride depending on isolation */
    1596           0 :                 if (nr_isolated) {
    1597           0 :                         stride = 1;
    1598           0 :                         continue;
    1599             :                 }
    1600           0 :                 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
    1601             :         }
    1602             : 
    1603             :         /*
    1604             :          * Record where the free scanner will restart next time. Either we
    1605             :          * broke from the loop and set isolate_start_pfn based on the last
    1606             :          * call to isolate_freepages_block(), or we met the migration scanner
    1607             :          * and the loop terminated due to isolate_start_pfn < low_pfn
    1608             :          */
    1609           0 :         cc->free_pfn = isolate_start_pfn;
    1610             : 
    1611           0 : splitmap:
    1612             :         /* __isolate_free_page() does not map the pages */
    1613           0 :         split_map_pages(freelist);
    1614           0 : }
    1615             : 
    1616             : /*
    1617             :  * This is a migrate-callback that "allocates" freepages by taking pages
    1618             :  * from the isolated freelists in the block we are migrating to.
    1619             :  */
    1620           0 : static struct page *compaction_alloc(struct page *migratepage,
    1621             :                                         unsigned long data)
    1622             : {
    1623           0 :         struct compact_control *cc = (struct compact_control *)data;
    1624           0 :         struct page *freepage;
    1625             : 
    1626           0 :         if (list_empty(&cc->freepages)) {
    1627           0 :                 isolate_freepages(cc);
    1628             : 
    1629           0 :                 if (list_empty(&cc->freepages))
    1630             :                         return NULL;
    1631             :         }
    1632             : 
    1633           0 :         freepage = list_entry(cc->freepages.next, struct page, lru);
    1634           0 :         list_del(&freepage->lru);
    1635           0 :         cc->nr_freepages--;
    1636             : 
    1637           0 :         return freepage;
    1638             : }
    1639             : 
    1640             : /*
    1641             :  * This is a migrate-callback that "frees" freepages back to the isolated
    1642             :  * freelist.  All pages on the freelist are from the same zone, so there is no
    1643             :  * special handling needed for NUMA.
    1644             :  */
    1645           0 : static void compaction_free(struct page *page, unsigned long data)
    1646             : {
    1647           0 :         struct compact_control *cc = (struct compact_control *)data;
    1648             : 
    1649           0 :         list_add(&page->lru, &cc->freepages);
    1650           0 :         cc->nr_freepages++;
    1651           0 : }
    1652             : 
    1653             : /* possible outcome of isolate_migratepages */
    1654             : typedef enum {
    1655             :         ISOLATE_ABORT,          /* Abort compaction now */
    1656             :         ISOLATE_NONE,           /* No pages isolated, continue scanning */
    1657             :         ISOLATE_SUCCESS,        /* Pages isolated, migrate */
    1658             : } isolate_migrate_t;
    1659             : 
    1660             : /*
    1661             :  * Allow userspace to control policy on scanning the unevictable LRU for
    1662             :  * compactable pages.
    1663             :  */
    1664             : #ifdef CONFIG_PREEMPT_RT
    1665             : int sysctl_compact_unevictable_allowed __read_mostly = 0;
    1666             : #else
    1667             : int sysctl_compact_unevictable_allowed __read_mostly = 1;
    1668             : #endif
    1669             : 
    1670             : static inline void
    1671           0 : update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
    1672             : {
    1673           0 :         if (cc->fast_start_pfn == ULONG_MAX)
    1674             :                 return;
    1675             : 
    1676           0 :         if (!cc->fast_start_pfn)
    1677           0 :                 cc->fast_start_pfn = pfn;
    1678             : 
    1679           0 :         cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
    1680             : }
    1681             : 
    1682             : static inline unsigned long
    1683           0 : reinit_migrate_pfn(struct compact_control *cc)
    1684             : {
    1685           0 :         if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
    1686           0 :                 return cc->migrate_pfn;
    1687             : 
    1688           0 :         cc->migrate_pfn = cc->fast_start_pfn;
    1689           0 :         cc->fast_start_pfn = ULONG_MAX;
    1690             : 
    1691           0 :         return cc->migrate_pfn;
    1692             : }
    1693             : 
    1694             : /*
    1695             :  * Briefly search the free lists for a migration source that already has
    1696             :  * some free pages to reduce the number of pages that need migration
    1697             :  * before a pageblock is free.
    1698             :  */
    1699           0 : static unsigned long fast_find_migrateblock(struct compact_control *cc)
    1700             : {
    1701           0 :         unsigned int limit = freelist_scan_limit(cc);
    1702           0 :         unsigned int nr_scanned = 0;
    1703           0 :         unsigned long distance;
    1704           0 :         unsigned long pfn = cc->migrate_pfn;
    1705           0 :         unsigned long high_pfn;
    1706           0 :         int order;
    1707           0 :         bool found_block = false;
    1708             : 
    1709             :         /* Skip hints are relied on to avoid repeats on the fast search */
    1710           0 :         if (cc->ignore_skip_hint)
    1711             :                 return pfn;
    1712             : 
    1713             :         /*
    1714             :          * If the migrate_pfn is not at the start of a zone or the start
    1715             :          * of a pageblock then assume this is a continuation of a previous
    1716             :          * scan restarted due to COMPACT_CLUSTER_MAX.
    1717             :          */
    1718           0 :         if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
    1719             :                 return pfn;
    1720             : 
    1721             :         /*
    1722             :          * For smaller orders, just linearly scan as the number of pages
    1723             :          * to migrate should be relatively small and does not necessarily
    1724             :          * justify freeing up a large block for a small allocation.
    1725             :          */
    1726           0 :         if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
    1727             :                 return pfn;
    1728             : 
    1729             :         /*
    1730             :          * Only allow kcompactd and direct requests for movable pages to
    1731             :          * quickly clear out a MOVABLE pageblock for allocation. This
    1732             :          * reduces the risk that a large movable pageblock is freed for
    1733             :          * an unmovable/reclaimable small allocation.
    1734             :          */
    1735           0 :         if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
    1736             :                 return pfn;
    1737             : 
    1738             :         /*
    1739             :          * When starting the migration scanner, pick any pageblock within the
    1740             :          * first half of the search space. Otherwise try and pick a pageblock
    1741             :          * within the first eighth to reduce the chances that a migration
    1742             :          * target later becomes a source.
    1743             :          */
    1744           0 :         distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
    1745           0 :         if (cc->migrate_pfn != cc->zone->zone_start_pfn)
    1746           0 :                 distance >>= 2;
    1747           0 :         high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
    1748             : 
    1749           0 :         for (order = cc->order - 1;
    1750           0 :              order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
    1751           0 :              order--) {
    1752           0 :                 struct free_area *area = &cc->zone->free_area[order];
    1753           0 :                 struct list_head *freelist;
    1754           0 :                 unsigned long flags;
    1755           0 :                 struct page *freepage;
    1756             : 
    1757           0 :                 if (!area->nr_free)
    1758           0 :                         continue;
    1759             : 
    1760           0 :                 spin_lock_irqsave(&cc->zone->lock, flags);
    1761           0 :                 freelist = &area->free_list[MIGRATE_MOVABLE];
    1762           0 :                 list_for_each_entry(freepage, freelist, lru) {
    1763           0 :                         unsigned long free_pfn;
    1764             : 
    1765           0 :                         if (nr_scanned++ >= limit) {
    1766           0 :                                 move_freelist_tail(freelist, freepage);
    1767           0 :                                 break;
    1768             :                         }
    1769             : 
    1770           0 :                         free_pfn = page_to_pfn(freepage);
    1771           0 :                         if (free_pfn < high_pfn) {
    1772             :                                 /*
    1773             :                                  * Avoid if skipped recently. Ideally it would
    1774             :                                  * move to the tail but even safe iteration of
    1775             :                                  * the list assumes an entry is deleted, not
    1776             :                                  * reordered.
    1777             :                                  */
    1778           0 :                                 if (get_pageblock_skip(freepage))
    1779           0 :                                         continue;
    1780             : 
    1781             :                                 /* Reorder to so a future search skips recent pages */
    1782           0 :                                 move_freelist_tail(freelist, freepage);
    1783             : 
    1784           0 :                                 update_fast_start_pfn(cc, free_pfn);
    1785           0 :                                 pfn = pageblock_start_pfn(free_pfn);
    1786           0 :                                 cc->fast_search_fail = 0;
    1787           0 :                                 found_block = true;
    1788           0 :                                 set_pageblock_skip(freepage);
    1789           0 :                                 break;
    1790             :                         }
    1791             :                 }
    1792           0 :                 spin_unlock_irqrestore(&cc->zone->lock, flags);
    1793             :         }
    1794             : 
    1795           0 :         cc->total_migrate_scanned += nr_scanned;
    1796             : 
    1797             :         /*
    1798             :          * If fast scanning failed then use a cached entry for a page block
    1799             :          * that had free pages as the basis for starting a linear scan.
    1800             :          */
    1801           0 :         if (!found_block) {
    1802           0 :                 cc->fast_search_fail++;
    1803           0 :                 pfn = reinit_migrate_pfn(cc);
    1804             :         }
    1805             :         return pfn;
    1806             : }
    1807             : 
    1808             : /*
    1809             :  * Isolate all pages that can be migrated from the first suitable block,
    1810             :  * starting at the block pointed to by the migrate scanner pfn within
    1811             :  * compact_control.
    1812             :  */
    1813           0 : static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
    1814             : {
    1815           0 :         unsigned long block_start_pfn;
    1816           0 :         unsigned long block_end_pfn;
    1817           0 :         unsigned long low_pfn;
    1818           0 :         struct page *page;
    1819           0 :         const isolate_mode_t isolate_mode =
    1820           0 :                 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
    1821           0 :                 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
    1822           0 :         bool fast_find_block;
    1823             : 
    1824             :         /*
    1825             :          * Start at where we last stopped, or beginning of the zone as
    1826             :          * initialized by compact_zone(). The first failure will use
    1827             :          * the lowest PFN as the starting point for linear scanning.
    1828             :          */
    1829           0 :         low_pfn = fast_find_migrateblock(cc);
    1830           0 :         block_start_pfn = pageblock_start_pfn(low_pfn);
    1831           0 :         if (block_start_pfn < cc->zone->zone_start_pfn)
    1832             :                 block_start_pfn = cc->zone->zone_start_pfn;
    1833             : 
    1834             :         /*
    1835             :          * fast_find_migrateblock marks a pageblock skipped so to avoid
    1836             :          * the isolation_suitable check below, check whether the fast
    1837             :          * search was successful.
    1838             :          */
    1839           0 :         fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
    1840             : 
    1841             :         /* Only scan within a pageblock boundary */
    1842           0 :         block_end_pfn = pageblock_end_pfn(low_pfn);
    1843             : 
    1844             :         /*
    1845             :          * Iterate over whole pageblocks until we find the first suitable.
    1846             :          * Do not cross the free scanner.
    1847             :          */
    1848           0 :         for (; block_end_pfn <= cc->free_pfn;
    1849           0 :                         fast_find_block = false,
    1850           0 :                         low_pfn = block_end_pfn,
    1851           0 :                         block_start_pfn = block_end_pfn,
    1852           0 :                         block_end_pfn += pageblock_nr_pages) {
    1853             : 
    1854             :                 /*
    1855             :                  * This can potentially iterate a massively long zone with
    1856             :                  * many pageblocks unsuitable, so periodically check if we
    1857             :                  * need to schedule.
    1858             :                  */
    1859           0 :                 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
    1860           0 :                         cond_resched();
    1861             : 
    1862           0 :                 page = pageblock_pfn_to_page(block_start_pfn,
    1863             :                                                 block_end_pfn, cc->zone);
    1864           0 :                 if (!page)
    1865           0 :                         continue;
    1866             : 
    1867             :                 /*
    1868             :                  * If isolation recently failed, do not retry. Only check the
    1869             :                  * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
    1870             :                  * to be visited multiple times. Assume skip was checked
    1871             :                  * before making it "skip" so other compaction instances do
    1872             :                  * not scan the same block.
    1873             :                  */
    1874           0 :                 if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
    1875           0 :                     !fast_find_block && !isolation_suitable(cc, page))
    1876           0 :                         continue;
    1877             : 
    1878             :                 /*
    1879             :                  * For async compaction, also only scan in MOVABLE blocks
    1880             :                  * without huge pages. Async compaction is optimistic to see
    1881             :                  * if the minimum amount of work satisfies the allocation.
    1882             :                  * The cached PFN is updated as it's possible that all
    1883             :                  * remaining blocks between source and target are unsuitable
    1884             :                  * and the compaction scanners fail to meet.
    1885             :                  */
    1886           0 :                 if (!suitable_migration_source(cc, page)) {
    1887           0 :                         update_cached_migrate(cc, block_end_pfn);
    1888           0 :                         continue;
    1889             :                 }
    1890             : 
    1891             :                 /* Perform the isolation */
    1892           0 :                 low_pfn = isolate_migratepages_block(cc, low_pfn,
    1893             :                                                 block_end_pfn, isolate_mode);
    1894             : 
    1895           0 :                 if (!low_pfn)
    1896             :                         return ISOLATE_ABORT;
    1897             : 
    1898             :                 /*
    1899             :                  * Either we isolated something and proceed with migration. Or
    1900             :                  * we failed and compact_zone should decide if we should
    1901             :                  * continue or not.
    1902             :                  */
    1903             :                 break;
    1904             :         }
    1905             : 
    1906             :         /* Record where migration scanner will be restarted. */
    1907           0 :         cc->migrate_pfn = low_pfn;
    1908             : 
    1909           0 :         return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
    1910             : }
    1911             : 
    1912             : /*
    1913             :  * order == -1 is expected when compacting via
    1914             :  * /proc/sys/vm/compact_memory
    1915             :  */
    1916           0 : static inline bool is_via_compact_memory(int order)
    1917             : {
    1918           0 :         return order == -1;
    1919             : }
    1920             : 
    1921          79 : static bool kswapd_is_running(pg_data_t *pgdat)
    1922             : {
    1923          79 :         return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
    1924             : }
    1925             : 
    1926             : /*
    1927             :  * A zone's fragmentation score is the external fragmentation wrt to the
    1928             :  * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
    1929             :  */
    1930         237 : static unsigned int fragmentation_score_zone(struct zone *zone)
    1931             : {
    1932         237 :         return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
    1933             : }
    1934             : 
    1935             : /*
    1936             :  * A weighted zone's fragmentation score is the external fragmentation
    1937             :  * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
    1938             :  * returns a value in the range [0, 100].
    1939             :  *
    1940             :  * The scaling factor ensures that proactive compaction focuses on larger
    1941             :  * zones like ZONE_NORMAL, rather than smaller, specialized zones like
    1942             :  * ZONE_DMA32. For smaller zones, the score value remains close to zero,
    1943             :  * and thus never exceeds the high threshold for proactive compaction.
    1944             :  */
    1945         237 : static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
    1946             : {
    1947         237 :         unsigned long score;
    1948             : 
    1949         237 :         score = zone->present_pages * fragmentation_score_zone(zone);
    1950         237 :         return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
    1951             : }
    1952             : 
    1953             : /*
    1954             :  * The per-node proactive (background) compaction process is started by its
    1955             :  * corresponding kcompactd thread when the node's fragmentation score
    1956             :  * exceeds the high threshold. The compaction process remains active till
    1957             :  * the node's score falls below the low threshold, or one of the back-off
    1958             :  * conditions is met.
    1959             :  */
    1960          79 : static unsigned int fragmentation_score_node(pg_data_t *pgdat)
    1961             : {
    1962          79 :         unsigned int score = 0;
    1963          79 :         int zoneid;
    1964             : 
    1965         316 :         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
    1966         237 :                 struct zone *zone;
    1967             : 
    1968         237 :                 zone = &pgdat->node_zones[zoneid];
    1969         237 :                 score += fragmentation_score_zone_weighted(zone);
    1970             :         }
    1971             : 
    1972          79 :         return score;
    1973             : }
    1974             : 
    1975          79 : static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
    1976             : {
    1977          79 :         unsigned int wmark_low;
    1978             : 
    1979             :         /*
    1980             :          * Cap the low watermak to avoid excessive compaction
    1981             :          * activity in case a user sets the proactivess tunable
    1982             :          * close to 100 (maximum).
    1983             :          */
    1984          79 :         wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
    1985          79 :         return low ? wmark_low : min(wmark_low + 10, 100U);
    1986             : }
    1987             : 
    1988          79 : static bool should_proactive_compact_node(pg_data_t *pgdat)
    1989             : {
    1990          79 :         int wmark_high;
    1991             : 
    1992         158 :         if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
    1993             :                 return false;
    1994             : 
    1995          79 :         wmark_high = fragmentation_score_wmark(pgdat, false);
    1996          79 :         return fragmentation_score_node(pgdat) > wmark_high;
    1997             : }
    1998             : 
    1999           0 : static enum compact_result __compact_finished(struct compact_control *cc)
    2000             : {
    2001           0 :         unsigned int order;
    2002           0 :         const int migratetype = cc->migratetype;
    2003           0 :         int ret;
    2004             : 
    2005             :         /* Compaction run completes if the migrate and free scanner meet */
    2006           0 :         if (compact_scanners_met(cc)) {
    2007             :                 /* Let the next compaction start anew. */
    2008           0 :                 reset_cached_positions(cc->zone);
    2009             : 
    2010             :                 /*
    2011             :                  * Mark that the PG_migrate_skip information should be cleared
    2012             :                  * by kswapd when it goes to sleep. kcompactd does not set the
    2013             :                  * flag itself as the decision to be clear should be directly
    2014             :                  * based on an allocation request.
    2015             :                  */
    2016           0 :                 if (cc->direct_compaction)
    2017           0 :                         cc->zone->compact_blockskip_flush = true;
    2018             : 
    2019           0 :                 if (cc->whole_zone)
    2020             :                         return COMPACT_COMPLETE;
    2021             :                 else
    2022           0 :                         return COMPACT_PARTIAL_SKIPPED;
    2023             :         }
    2024             : 
    2025           0 :         if (cc->proactive_compaction) {
    2026           0 :                 int score, wmark_low;
    2027           0 :                 pg_data_t *pgdat;
    2028             : 
    2029           0 :                 pgdat = cc->zone->zone_pgdat;
    2030           0 :                 if (kswapd_is_running(pgdat))
    2031             :                         return COMPACT_PARTIAL_SKIPPED;
    2032             : 
    2033           0 :                 score = fragmentation_score_zone(cc->zone);
    2034           0 :                 wmark_low = fragmentation_score_wmark(pgdat, true);
    2035             : 
    2036           0 :                 if (score > wmark_low)
    2037             :                         ret = COMPACT_CONTINUE;
    2038             :                 else
    2039           0 :                         ret = COMPACT_SUCCESS;
    2040             : 
    2041           0 :                 goto out;
    2042             :         }
    2043             : 
    2044           0 :         if (is_via_compact_memory(cc->order))
    2045             :                 return COMPACT_CONTINUE;
    2046             : 
    2047             :         /*
    2048             :          * Always finish scanning a pageblock to reduce the possibility of
    2049             :          * fallbacks in the future. This is particularly important when
    2050             :          * migration source is unmovable/reclaimable but it's not worth
    2051             :          * special casing.
    2052             :          */
    2053           0 :         if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
    2054             :                 return COMPACT_CONTINUE;
    2055             : 
    2056             :         /* Direct compactor: Is a suitable page free? */
    2057           0 :         ret = COMPACT_NO_SUITABLE_PAGE;
    2058           0 :         for (order = cc->order; order < MAX_ORDER; order++) {
    2059           0 :                 struct free_area *area = &cc->zone->free_area[order];
    2060           0 :                 bool can_steal;
    2061             : 
    2062             :                 /* Job done if page is free of the right migratetype */
    2063           0 :                 if (!free_area_empty(area, migratetype))
    2064           0 :                         return COMPACT_SUCCESS;
    2065             : 
    2066             : #ifdef CONFIG_CMA
    2067             :                 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
    2068             :                 if (migratetype == MIGRATE_MOVABLE &&
    2069             :                         !free_area_empty(area, MIGRATE_CMA))
    2070             :                         return COMPACT_SUCCESS;
    2071             : #endif
    2072             :                 /*
    2073             :                  * Job done if allocation would steal freepages from
    2074             :                  * other migratetype buddy lists.
    2075             :                  */
    2076           0 :                 if (find_suitable_fallback(area, order, migratetype,
    2077             :                                                 true, &can_steal) != -1) {
    2078             : 
    2079             :                         /* movable pages are OK in any pageblock */
    2080           0 :                         if (migratetype == MIGRATE_MOVABLE)
    2081             :                                 return COMPACT_SUCCESS;
    2082             : 
    2083             :                         /*
    2084             :                          * We are stealing for a non-movable allocation. Make
    2085             :                          * sure we finish compacting the current pageblock
    2086             :                          * first so it is as free as possible and we won't
    2087             :                          * have to steal another one soon. This only applies
    2088             :                          * to sync compaction, as async compaction operates
    2089             :                          * on pageblocks of the same migratetype.
    2090             :                          */
    2091           0 :                         if (cc->mode == MIGRATE_ASYNC ||
    2092           0 :                                         IS_ALIGNED(cc->migrate_pfn,
    2093             :                                                         pageblock_nr_pages)) {
    2094             :                                 return COMPACT_SUCCESS;
    2095             :                         }
    2096             : 
    2097           0 :                         ret = COMPACT_CONTINUE;
    2098           0 :                         break;
    2099             :                 }
    2100             :         }
    2101             : 
    2102           0 : out:
    2103           0 :         if (cc->contended || fatal_signal_pending(current))
    2104             :                 ret = COMPACT_CONTENDED;
    2105             : 
    2106           0 :         return ret;
    2107             : }
    2108             : 
    2109           0 : static enum compact_result compact_finished(struct compact_control *cc)
    2110             : {
    2111           0 :         int ret;
    2112             : 
    2113           0 :         ret = __compact_finished(cc);
    2114           0 :         trace_mm_compaction_finished(cc->zone, cc->order, ret);
    2115           0 :         if (ret == COMPACT_NO_SUITABLE_PAGE)
    2116           0 :                 ret = COMPACT_CONTINUE;
    2117             : 
    2118           0 :         return ret;
    2119             : }
    2120             : 
    2121           0 : static enum compact_result __compaction_suitable(struct zone *zone, int order,
    2122             :                                         unsigned int alloc_flags,
    2123             :                                         int highest_zoneidx,
    2124             :                                         unsigned long wmark_target)
    2125             : {
    2126           0 :         unsigned long watermark;
    2127             : 
    2128           0 :         if (is_via_compact_memory(order))
    2129             :                 return COMPACT_CONTINUE;
    2130             : 
    2131           0 :         watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
    2132             :         /*
    2133             :          * If watermarks for high-order allocation are already met, there
    2134             :          * should be no need for compaction at all.
    2135             :          */
    2136           0 :         if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
    2137             :                                                                 alloc_flags))
    2138             :                 return COMPACT_SUCCESS;
    2139             : 
    2140             :         /*
    2141             :          * Watermarks for order-0 must be met for compaction to be able to
    2142             :          * isolate free pages for migration targets. This means that the
    2143             :          * watermark and alloc_flags have to match, or be more pessimistic than
    2144             :          * the check in __isolate_free_page(). We don't use the direct
    2145             :          * compactor's alloc_flags, as they are not relevant for freepage
    2146             :          * isolation. We however do use the direct compactor's highest_zoneidx
    2147             :          * to skip over zones where lowmem reserves would prevent allocation
    2148             :          * even if compaction succeeds.
    2149             :          * For costly orders, we require low watermark instead of min for
    2150             :          * compaction to proceed to increase its chances.
    2151             :          * ALLOC_CMA is used, as pages in CMA pageblocks are considered
    2152             :          * suitable migration targets
    2153             :          */
    2154           0 :         watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
    2155           0 :                                 low_wmark_pages(zone) : min_wmark_pages(zone);
    2156           0 :         watermark += compact_gap(order);
    2157           0 :         if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
    2158             :                                                 ALLOC_CMA, wmark_target))
    2159           0 :                 return COMPACT_SKIPPED;
    2160             : 
    2161             :         return COMPACT_CONTINUE;
    2162             : }
    2163             : 
    2164             : /*
    2165             :  * compaction_suitable: Is this suitable to run compaction on this zone now?
    2166             :  * Returns
    2167             :  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
    2168             :  *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
    2169             :  *   COMPACT_CONTINUE - If compaction should run now
    2170             :  */
    2171           0 : enum compact_result compaction_suitable(struct zone *zone, int order,
    2172             :                                         unsigned int alloc_flags,
    2173             :                                         int highest_zoneidx)
    2174             : {
    2175           0 :         enum compact_result ret;
    2176           0 :         int fragindex;
    2177             : 
    2178           0 :         ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
    2179             :                                     zone_page_state(zone, NR_FREE_PAGES));
    2180             :         /*
    2181             :          * fragmentation index determines if allocation failures are due to
    2182             :          * low memory or external fragmentation
    2183             :          *
    2184             :          * index of -1000 would imply allocations might succeed depending on
    2185             :          * watermarks, but we already failed the high-order watermark check
    2186             :          * index towards 0 implies failure is due to lack of memory
    2187             :          * index towards 1000 implies failure is due to fragmentation
    2188             :          *
    2189             :          * Only compact if a failure would be due to fragmentation. Also
    2190             :          * ignore fragindex for non-costly orders where the alternative to
    2191             :          * a successful reclaim/compaction is OOM. Fragindex and the
    2192             :          * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
    2193             :          * excessive compaction for costly orders, but it should not be at the
    2194             :          * expense of system stability.
    2195             :          */
    2196           0 :         if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
    2197           0 :                 fragindex = fragmentation_index(zone, order);
    2198           0 :                 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
    2199           0 :                         ret = COMPACT_NOT_SUITABLE_ZONE;
    2200             :         }
    2201             : 
    2202           0 :         trace_mm_compaction_suitable(zone, order, ret);
    2203           0 :         if (ret == COMPACT_NOT_SUITABLE_ZONE)
    2204             :                 ret = COMPACT_SKIPPED;
    2205             : 
    2206           0 :         return ret;
    2207             : }
    2208             : 
    2209           0 : bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
    2210             :                 int alloc_flags)
    2211             : {
    2212           0 :         struct zone *zone;
    2213           0 :         struct zoneref *z;
    2214             : 
    2215             :         /*
    2216             :          * Make sure at least one zone would pass __compaction_suitable if we continue
    2217             :          * retrying the reclaim.
    2218             :          */
    2219           0 :         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
    2220             :                                 ac->highest_zoneidx, ac->nodemask) {
    2221           0 :                 unsigned long available;
    2222           0 :                 enum compact_result compact_result;
    2223             : 
    2224             :                 /*
    2225             :                  * Do not consider all the reclaimable memory because we do not
    2226             :                  * want to trash just for a single high order allocation which
    2227             :                  * is even not guaranteed to appear even if __compaction_suitable
    2228             :                  * is happy about the watermark check.
    2229             :                  */
    2230           0 :                 available = zone_reclaimable_pages(zone) / order;
    2231           0 :                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
    2232           0 :                 compact_result = __compaction_suitable(zone, order, alloc_flags,
    2233           0 :                                 ac->highest_zoneidx, available);
    2234           0 :                 if (compact_result != COMPACT_SKIPPED)
    2235             :                         return true;
    2236             :         }
    2237             : 
    2238             :         return false;
    2239             : }
    2240             : 
    2241             : static enum compact_result
    2242           0 : compact_zone(struct compact_control *cc, struct capture_control *capc)
    2243             : {
    2244           0 :         enum compact_result ret;
    2245           0 :         unsigned long start_pfn = cc->zone->zone_start_pfn;
    2246           0 :         unsigned long end_pfn = zone_end_pfn(cc->zone);
    2247           0 :         unsigned long last_migrated_pfn;
    2248           0 :         const bool sync = cc->mode != MIGRATE_ASYNC;
    2249           0 :         bool update_cached;
    2250             : 
    2251             :         /*
    2252             :          * These counters track activities during zone compaction.  Initialize
    2253             :          * them before compacting a new zone.
    2254             :          */
    2255           0 :         cc->total_migrate_scanned = 0;
    2256           0 :         cc->total_free_scanned = 0;
    2257           0 :         cc->nr_migratepages = 0;
    2258           0 :         cc->nr_freepages = 0;
    2259           0 :         INIT_LIST_HEAD(&cc->freepages);
    2260           0 :         INIT_LIST_HEAD(&cc->migratepages);
    2261             : 
    2262           0 :         cc->migratetype = gfp_migratetype(cc->gfp_mask);
    2263           0 :         ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
    2264             :                                                         cc->highest_zoneidx);
    2265             :         /* Compaction is likely to fail */
    2266           0 :         if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
    2267             :                 return ret;
    2268             : 
    2269             :         /* huh, compaction_suitable is returning something unexpected */
    2270           0 :         VM_BUG_ON(ret != COMPACT_CONTINUE);
    2271             : 
    2272             :         /*
    2273             :          * Clear pageblock skip if there were failures recently and compaction
    2274             :          * is about to be retried after being deferred.
    2275             :          */
    2276           0 :         if (compaction_restarting(cc->zone, cc->order))
    2277           0 :                 __reset_isolation_suitable(cc->zone);
    2278             : 
    2279             :         /*
    2280             :          * Setup to move all movable pages to the end of the zone. Used cached
    2281             :          * information on where the scanners should start (unless we explicitly
    2282             :          * want to compact the whole zone), but check that it is initialised
    2283             :          * by ensuring the values are within zone boundaries.
    2284             :          */
    2285           0 :         cc->fast_start_pfn = 0;
    2286           0 :         if (cc->whole_zone) {
    2287           0 :                 cc->migrate_pfn = start_pfn;
    2288           0 :                 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
    2289             :         } else {
    2290           0 :                 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
    2291           0 :                 cc->free_pfn = cc->zone->compact_cached_free_pfn;
    2292           0 :                 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
    2293           0 :                         cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
    2294           0 :                         cc->zone->compact_cached_free_pfn = cc->free_pfn;
    2295             :                 }
    2296           0 :                 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
    2297           0 :                         cc->migrate_pfn = start_pfn;
    2298           0 :                         cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
    2299           0 :                         cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
    2300             :                 }
    2301             : 
    2302           0 :                 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
    2303           0 :                         cc->whole_zone = true;
    2304             :         }
    2305             : 
    2306           0 :         last_migrated_pfn = 0;
    2307             : 
    2308             :         /*
    2309             :          * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
    2310             :          * the basis that some migrations will fail in ASYNC mode. However,
    2311             :          * if the cached PFNs match and pageblocks are skipped due to having
    2312             :          * no isolation candidates, then the sync state does not matter.
    2313             :          * Until a pageblock with isolation candidates is found, keep the
    2314             :          * cached PFNs in sync to avoid revisiting the same blocks.
    2315             :          */
    2316           0 :         update_cached = !sync &&
    2317           0 :                 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
    2318             : 
    2319           0 :         trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
    2320             :                                 cc->free_pfn, end_pfn, sync);
    2321             : 
    2322           0 :         migrate_prep_local();
    2323             : 
    2324           0 :         while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
    2325           0 :                 int err;
    2326           0 :                 unsigned long iteration_start_pfn = cc->migrate_pfn;
    2327             : 
    2328             :                 /*
    2329             :                  * Avoid multiple rescans which can happen if a page cannot be
    2330             :                  * isolated (dirty/writeback in async mode) or if the migrated
    2331             :                  * pages are being allocated before the pageblock is cleared.
    2332             :                  * The first rescan will capture the entire pageblock for
    2333             :                  * migration. If it fails, it'll be marked skip and scanning
    2334             :                  * will proceed as normal.
    2335             :                  */
    2336           0 :                 cc->rescan = false;
    2337           0 :                 if (pageblock_start_pfn(last_migrated_pfn) ==
    2338             :                     pageblock_start_pfn(iteration_start_pfn)) {
    2339           0 :                         cc->rescan = true;
    2340             :                 }
    2341             : 
    2342           0 :                 switch (isolate_migratepages(cc)) {
    2343           0 :                 case ISOLATE_ABORT:
    2344           0 :                         ret = COMPACT_CONTENDED;
    2345           0 :                         putback_movable_pages(&cc->migratepages);
    2346           0 :                         cc->nr_migratepages = 0;
    2347           0 :                         goto out;
    2348           0 :                 case ISOLATE_NONE:
    2349           0 :                         if (update_cached) {
    2350           0 :                                 cc->zone->compact_cached_migrate_pfn[1] =
    2351           0 :                                         cc->zone->compact_cached_migrate_pfn[0];
    2352             :                         }
    2353             : 
    2354             :                         /*
    2355             :                          * We haven't isolated and migrated anything, but
    2356             :                          * there might still be unflushed migrations from
    2357             :                          * previous cc->order aligned block.
    2358             :                          */
    2359           0 :                         goto check_drain;
    2360           0 :                 case ISOLATE_SUCCESS:
    2361           0 :                         update_cached = false;
    2362           0 :                         last_migrated_pfn = iteration_start_pfn;
    2363             :                 }
    2364             : 
    2365           0 :                 err = migrate_pages(&cc->migratepages, compaction_alloc,
    2366             :                                 compaction_free, (unsigned long)cc, cc->mode,
    2367             :                                 MR_COMPACTION);
    2368             : 
    2369           0 :                 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
    2370             :                                                         &cc->migratepages);
    2371             : 
    2372             :                 /* All pages were either migrated or will be released */
    2373           0 :                 cc->nr_migratepages = 0;
    2374           0 :                 if (err) {
    2375           0 :                         putback_movable_pages(&cc->migratepages);
    2376             :                         /*
    2377             :                          * migrate_pages() may return -ENOMEM when scanners meet
    2378             :                          * and we want compact_finished() to detect it
    2379             :                          */
    2380           0 :                         if (err == -ENOMEM && !compact_scanners_met(cc)) {
    2381           0 :                                 ret = COMPACT_CONTENDED;
    2382           0 :                                 goto out;
    2383             :                         }
    2384             :                         /*
    2385             :                          * We failed to migrate at least one page in the current
    2386             :                          * order-aligned block, so skip the rest of it.
    2387             :                          */
    2388           0 :                         if (cc->direct_compaction &&
    2389           0 :                                                 (cc->mode == MIGRATE_ASYNC)) {
    2390           0 :                                 cc->migrate_pfn = block_end_pfn(
    2391             :                                                 cc->migrate_pfn - 1, cc->order);
    2392             :                                 /* Draining pcplists is useless in this case */
    2393           0 :                                 last_migrated_pfn = 0;
    2394             :                         }
    2395             :                 }
    2396             : 
    2397           0 : check_drain:
    2398             :                 /*
    2399             :                  * Has the migration scanner moved away from the previous
    2400             :                  * cc->order aligned block where we migrated from? If yes,
    2401             :                  * flush the pages that were freed, so that they can merge and
    2402             :                  * compact_finished() can detect immediately if allocation
    2403             :                  * would succeed.
    2404             :                  */
    2405           0 :                 if (cc->order > 0 && last_migrated_pfn) {
    2406           0 :                         unsigned long current_block_start =
    2407           0 :                                 block_start_pfn(cc->migrate_pfn, cc->order);
    2408             : 
    2409           0 :                         if (last_migrated_pfn < current_block_start) {
    2410           0 :                                 lru_add_drain_cpu_zone(cc->zone);
    2411             :                                 /* No more flushing until we migrate again */
    2412           0 :                                 last_migrated_pfn = 0;
    2413             :                         }
    2414             :                 }
    2415             : 
    2416             :                 /* Stop if a page has been captured */
    2417           0 :                 if (capc && capc->page) {
    2418             :                         ret = COMPACT_SUCCESS;
    2419             :                         break;
    2420             :                 }
    2421             :         }
    2422             : 
    2423           0 : out:
    2424             :         /*
    2425             :          * Release free pages and update where the free scanner should restart,
    2426             :          * so we don't leave any returned pages behind in the next attempt.
    2427             :          */
    2428           0 :         if (cc->nr_freepages > 0) {
    2429           0 :                 unsigned long free_pfn = release_freepages(&cc->freepages);
    2430             : 
    2431           0 :                 cc->nr_freepages = 0;
    2432           0 :                 VM_BUG_ON(free_pfn == 0);
    2433             :                 /* The cached pfn is always the first in a pageblock */
    2434           0 :                 free_pfn = pageblock_start_pfn(free_pfn);
    2435             :                 /*
    2436             :                  * Only go back, not forward. The cached pfn might have been
    2437             :                  * already reset to zone end in compact_finished()
    2438             :                  */
    2439           0 :                 if (free_pfn > cc->zone->compact_cached_free_pfn)
    2440           0 :                         cc->zone->compact_cached_free_pfn = free_pfn;
    2441             :         }
    2442             : 
    2443           0 :         count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
    2444           0 :         count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
    2445             : 
    2446           0 :         trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
    2447             :                                 cc->free_pfn, end_pfn, sync, ret);
    2448             : 
    2449           0 :         return ret;
    2450             : }
    2451             : 
    2452           0 : static enum compact_result compact_zone_order(struct zone *zone, int order,
    2453             :                 gfp_t gfp_mask, enum compact_priority prio,
    2454             :                 unsigned int alloc_flags, int highest_zoneidx,
    2455             :                 struct page **capture)
    2456             : {
    2457           0 :         enum compact_result ret;
    2458           0 :         struct compact_control cc = {
    2459             :                 .order = order,
    2460             :                 .search_order = order,
    2461             :                 .gfp_mask = gfp_mask,
    2462             :                 .zone = zone,
    2463             :                 .mode = (prio == COMPACT_PRIO_ASYNC) ?
    2464           0 :                                         MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
    2465             :                 .alloc_flags = alloc_flags,
    2466             :                 .highest_zoneidx = highest_zoneidx,
    2467             :                 .direct_compaction = true,
    2468             :                 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
    2469           0 :                 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
    2470             :                 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
    2471             :         };
    2472           0 :         struct capture_control capc = {
    2473             :                 .cc = &cc,
    2474             :                 .page = NULL,
    2475             :         };
    2476             : 
    2477             :         /*
    2478             :          * Make sure the structs are really initialized before we expose the
    2479             :          * capture control, in case we are interrupted and the interrupt handler
    2480             :          * frees a page.
    2481             :          */
    2482           0 :         barrier();
    2483           0 :         WRITE_ONCE(current->capture_control, &capc);
    2484             : 
    2485           0 :         ret = compact_zone(&cc, &capc);
    2486             : 
    2487           0 :         VM_BUG_ON(!list_empty(&cc.freepages));
    2488           0 :         VM_BUG_ON(!list_empty(&cc.migratepages));
    2489             : 
    2490             :         /*
    2491             :          * Make sure we hide capture control first before we read the captured
    2492             :          * page pointer, otherwise an interrupt could free and capture a page
    2493             :          * and we would leak it.
    2494             :          */
    2495           0 :         WRITE_ONCE(current->capture_control, NULL);
    2496           0 :         *capture = READ_ONCE(capc.page);
    2497             : 
    2498           0 :         return ret;
    2499             : }
    2500             : 
    2501             : int sysctl_extfrag_threshold = 500;
    2502             : 
    2503             : /**
    2504             :  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
    2505             :  * @gfp_mask: The GFP mask of the current allocation
    2506             :  * @order: The order of the current allocation
    2507             :  * @alloc_flags: The allocation flags of the current allocation
    2508             :  * @ac: The context of current allocation
    2509             :  * @prio: Determines how hard direct compaction should try to succeed
    2510             :  * @capture: Pointer to free page created by compaction will be stored here
    2511             :  *
    2512             :  * This is the main entry point for direct page compaction.
    2513             :  */
    2514           0 : enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
    2515             :                 unsigned int alloc_flags, const struct alloc_context *ac,
    2516             :                 enum compact_priority prio, struct page **capture)
    2517             : {
    2518           0 :         int may_perform_io = gfp_mask & __GFP_IO;
    2519           0 :         struct zoneref *z;
    2520           0 :         struct zone *zone;
    2521           0 :         enum compact_result rc = COMPACT_SKIPPED;
    2522             : 
    2523             :         /*
    2524             :          * Check if the GFP flags allow compaction - GFP_NOIO is really
    2525             :          * tricky context because the migration might require IO
    2526             :          */
    2527           0 :         if (!may_perform_io)
    2528             :                 return COMPACT_SKIPPED;
    2529             : 
    2530           0 :         trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
    2531             : 
    2532             :         /* Compact each zone in the list */
    2533           0 :         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
    2534             :                                         ac->highest_zoneidx, ac->nodemask) {
    2535           0 :                 enum compact_result status;
    2536             : 
    2537           0 :                 if (prio > MIN_COMPACT_PRIORITY
    2538           0 :                                         && compaction_deferred(zone, order)) {
    2539           0 :                         rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
    2540           0 :                         continue;
    2541             :                 }
    2542             : 
    2543           0 :                 status = compact_zone_order(zone, order, gfp_mask, prio,
    2544           0 :                                 alloc_flags, ac->highest_zoneidx, capture);
    2545           0 :                 rc = max(status, rc);
    2546             : 
    2547             :                 /* The allocation should succeed, stop compacting */
    2548           0 :                 if (status == COMPACT_SUCCESS) {
    2549             :                         /*
    2550             :                          * We think the allocation will succeed in this zone,
    2551             :                          * but it is not certain, hence the false. The caller
    2552             :                          * will repeat this with true if allocation indeed
    2553             :                          * succeeds in this zone.
    2554             :                          */
    2555           0 :                         compaction_defer_reset(zone, order, false);
    2556             : 
    2557           0 :                         break;
    2558             :                 }
    2559             : 
    2560           0 :                 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
    2561             :                                         status == COMPACT_PARTIAL_SKIPPED))
    2562             :                         /*
    2563             :                          * We think that allocation won't succeed in this zone
    2564             :                          * so we defer compaction there. If it ends up
    2565             :                          * succeeding after all, it will be reset.
    2566             :                          */
    2567           0 :                         defer_compaction(zone, order);
    2568             : 
    2569             :                 /*
    2570             :                  * We might have stopped compacting due to need_resched() in
    2571             :                  * async compaction, or due to a fatal signal detected. In that
    2572             :                  * case do not try further zones
    2573             :                  */
    2574           0 :                 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
    2575           0 :                                         || fatal_signal_pending(current))
    2576             :                         break;
    2577             :         }
    2578             : 
    2579             :         return rc;
    2580             : }
    2581             : 
    2582             : /*
    2583             :  * Compact all zones within a node till each zone's fragmentation score
    2584             :  * reaches within proactive compaction thresholds (as determined by the
    2585             :  * proactiveness tunable).
    2586             :  *
    2587             :  * It is possible that the function returns before reaching score targets
    2588             :  * due to various back-off conditions, such as, contention on per-node or
    2589             :  * per-zone locks.
    2590             :  */
    2591           0 : static void proactive_compact_node(pg_data_t *pgdat)
    2592             : {
    2593           0 :         int zoneid;
    2594           0 :         struct zone *zone;
    2595           0 :         struct compact_control cc = {
    2596             :                 .order = -1,
    2597             :                 .mode = MIGRATE_SYNC_LIGHT,
    2598             :                 .ignore_skip_hint = true,
    2599             :                 .whole_zone = true,
    2600             :                 .gfp_mask = GFP_KERNEL,
    2601             :                 .proactive_compaction = true,
    2602             :         };
    2603             : 
    2604           0 :         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
    2605           0 :                 zone = &pgdat->node_zones[zoneid];
    2606           0 :                 if (!populated_zone(zone))
    2607           0 :                         continue;
    2608             : 
    2609           0 :                 cc.zone = zone;
    2610             : 
    2611           0 :                 compact_zone(&cc, NULL);
    2612             : 
    2613           0 :                 VM_BUG_ON(!list_empty(&cc.freepages));
    2614           0 :                 VM_BUG_ON(!list_empty(&cc.migratepages));
    2615             :         }
    2616           0 : }
    2617             : 
    2618             : /* Compact all zones within a node */
    2619           0 : static void compact_node(int nid)
    2620             : {
    2621           0 :         pg_data_t *pgdat = NODE_DATA(nid);
    2622           0 :         int zoneid;
    2623           0 :         struct zone *zone;
    2624           0 :         struct compact_control cc = {
    2625             :                 .order = -1,
    2626             :                 .mode = MIGRATE_SYNC,
    2627             :                 .ignore_skip_hint = true,
    2628             :                 .whole_zone = true,
    2629             :                 .gfp_mask = GFP_KERNEL,
    2630             :         };
    2631             : 
    2632             : 
    2633           0 :         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
    2634             : 
    2635           0 :                 zone = &pgdat->node_zones[zoneid];
    2636           0 :                 if (!populated_zone(zone))
    2637           0 :                         continue;
    2638             : 
    2639           0 :                 cc.zone = zone;
    2640             : 
    2641           0 :                 compact_zone(&cc, NULL);
    2642             : 
    2643           0 :                 VM_BUG_ON(!list_empty(&cc.freepages));
    2644           0 :                 VM_BUG_ON(!list_empty(&cc.migratepages));
    2645             :         }
    2646           0 : }
    2647             : 
    2648             : /* Compact all nodes in the system */
    2649           0 : static void compact_nodes(void)
    2650             : {
    2651           0 :         int nid;
    2652             : 
    2653             :         /* Flush pending updates to the LRU lists */
    2654           0 :         lru_add_drain_all();
    2655             : 
    2656           0 :         for_each_online_node(nid)
    2657           0 :                 compact_node(nid);
    2658           0 : }
    2659             : 
    2660             : /* The written value is actually unused, all memory is compacted */
    2661             : int sysctl_compact_memory;
    2662             : 
    2663             : /*
    2664             :  * Tunable for proactive compaction. It determines how
    2665             :  * aggressively the kernel should compact memory in the
    2666             :  * background. It takes values in the range [0, 100].
    2667             :  */
    2668             : unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
    2669             : 
    2670             : /*
    2671             :  * This is the entry point for compacting all nodes via
    2672             :  * /proc/sys/vm/compact_memory
    2673             :  */
    2674           0 : int sysctl_compaction_handler(struct ctl_table *table, int write,
    2675             :                         void *buffer, size_t *length, loff_t *ppos)
    2676             : {
    2677           0 :         if (write)
    2678           0 :                 compact_nodes();
    2679             : 
    2680           0 :         return 0;
    2681             : }
    2682             : 
    2683             : #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
    2684           0 : static ssize_t sysfs_compact_node(struct device *dev,
    2685             :                         struct device_attribute *attr,
    2686             :                         const char *buf, size_t count)
    2687             : {
    2688           0 :         int nid = dev->id;
    2689             : 
    2690           0 :         if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
    2691             :                 /* Flush pending updates to the LRU lists */
    2692           0 :                 lru_add_drain_all();
    2693             : 
    2694           0 :                 compact_node(nid);
    2695             :         }
    2696             : 
    2697           0 :         return count;
    2698             : }
    2699             : static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
    2700             : 
    2701           1 : int compaction_register_node(struct node *node)
    2702             : {
    2703           1 :         return device_create_file(&node->dev, &dev_attr_compact);
    2704             : }
    2705             : 
    2706           0 : void compaction_unregister_node(struct node *node)
    2707             : {
    2708           0 :         return device_remove_file(&node->dev, &dev_attr_compact);
    2709             : }
    2710             : #endif /* CONFIG_SYSFS && CONFIG_NUMA */
    2711             : 
    2712         239 : static inline bool kcompactd_work_requested(pg_data_t *pgdat)
    2713             : {
    2714         239 :         return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
    2715             : }
    2716             : 
    2717           0 : static bool kcompactd_node_suitable(pg_data_t *pgdat)
    2718             : {
    2719           0 :         int zoneid;
    2720           0 :         struct zone *zone;
    2721           0 :         enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
    2722             : 
    2723           0 :         for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
    2724           0 :                 zone = &pgdat->node_zones[zoneid];
    2725             : 
    2726           0 :                 if (!populated_zone(zone))
    2727           0 :                         continue;
    2728             : 
    2729           0 :                 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
    2730             :                                         highest_zoneidx) == COMPACT_CONTINUE)
    2731             :                         return true;
    2732             :         }
    2733             : 
    2734             :         return false;
    2735             : }
    2736             : 
    2737           0 : static void kcompactd_do_work(pg_data_t *pgdat)
    2738             : {
    2739             :         /*
    2740             :          * With no special task, compact all zones so that a page of requested
    2741             :          * order is allocatable.
    2742             :          */
    2743           0 :         int zoneid;
    2744           0 :         struct zone *zone;
    2745           0 :         struct compact_control cc = {
    2746             :                 .order = pgdat->kcompactd_max_order,
    2747           0 :                 .search_order = pgdat->kcompactd_max_order,
    2748           0 :                 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
    2749             :                 .mode = MIGRATE_SYNC_LIGHT,
    2750             :                 .ignore_skip_hint = false,
    2751             :                 .gfp_mask = GFP_KERNEL,
    2752             :         };
    2753           0 :         trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
    2754             :                                                         cc.highest_zoneidx);
    2755           0 :         count_compact_event(KCOMPACTD_WAKE);
    2756             : 
    2757           0 :         for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
    2758           0 :                 int status;
    2759             : 
    2760           0 :                 zone = &pgdat->node_zones[zoneid];
    2761           0 :                 if (!populated_zone(zone))
    2762           0 :                         continue;
    2763             : 
    2764           0 :                 if (compaction_deferred(zone, cc.order))
    2765           0 :                         continue;
    2766             : 
    2767           0 :                 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
    2768             :                                                         COMPACT_CONTINUE)
    2769           0 :                         continue;
    2770             : 
    2771           0 :                 if (kthread_should_stop())
    2772           0 :                         return;
    2773             : 
    2774           0 :                 cc.zone = zone;
    2775           0 :                 status = compact_zone(&cc, NULL);
    2776             : 
    2777           0 :                 if (status == COMPACT_SUCCESS) {
    2778           0 :                         compaction_defer_reset(zone, cc.order, false);
    2779           0 :                 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
    2780             :                         /*
    2781             :                          * Buddy pages may become stranded on pcps that could
    2782             :                          * otherwise coalesce on the zone's free area for
    2783             :                          * order >= cc.order.  This is ratelimited by the
    2784             :                          * upcoming deferral.
    2785             :                          */
    2786           0 :                         drain_all_pages(zone);
    2787             : 
    2788             :                         /*
    2789             :                          * We use sync migration mode here, so we defer like
    2790             :                          * sync direct compaction does.
    2791             :                          */
    2792           0 :                         defer_compaction(zone, cc.order);
    2793             :                 }
    2794             : 
    2795           0 :                 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
    2796           0 :                                      cc.total_migrate_scanned);
    2797           0 :                 count_compact_events(KCOMPACTD_FREE_SCANNED,
    2798           0 :                                      cc.total_free_scanned);
    2799             : 
    2800           0 :                 VM_BUG_ON(!list_empty(&cc.freepages));
    2801           0 :                 VM_BUG_ON(!list_empty(&cc.migratepages));
    2802             :         }
    2803             : 
    2804             :         /*
    2805             :          * Regardless of success, we are done until woken up next. But remember
    2806             :          * the requested order/highest_zoneidx in case it was higher/tighter
    2807             :          * than our current ones
    2808             :          */
    2809           0 :         if (pgdat->kcompactd_max_order <= cc.order)
    2810           0 :                 pgdat->kcompactd_max_order = 0;
    2811           0 :         if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
    2812           0 :                 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
    2813             : }
    2814             : 
    2815           1 : void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
    2816             : {
    2817           1 :         if (!order)
    2818             :                 return;
    2819             : 
    2820           0 :         if (pgdat->kcompactd_max_order < order)
    2821           0 :                 pgdat->kcompactd_max_order = order;
    2822             : 
    2823           0 :         if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
    2824           0 :                 pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
    2825             : 
    2826             :         /*
    2827             :          * Pairs with implicit barrier in wait_event_freezable()
    2828             :          * such that wakeups are not missed.
    2829             :          */
    2830           0 :         if (!wq_has_sleeper(&pgdat->kcompactd_wait))
    2831             :                 return;
    2832             : 
    2833           0 :         if (!kcompactd_node_suitable(pgdat))
    2834             :                 return;
    2835             : 
    2836           0 :         trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
    2837             :                                                         highest_zoneidx);
    2838           0 :         wake_up_interruptible(&pgdat->kcompactd_wait);
    2839             : }
    2840             : 
    2841             : /*
    2842             :  * The background compaction daemon, started as a kernel thread
    2843             :  * from the init process.
    2844             :  */
    2845           1 : static int kcompactd(void *p)
    2846             : {
    2847           1 :         pg_data_t *pgdat = (pg_data_t*)p;
    2848           1 :         struct task_struct *tsk = current;
    2849           1 :         unsigned int proactive_defer = 0;
    2850             : 
    2851           1 :         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
    2852             : 
    2853           1 :         if (!cpumask_empty(cpumask))
    2854           1 :                 set_cpus_allowed_ptr(tsk, cpumask);
    2855             : 
    2856           1 :         set_freezable();
    2857             : 
    2858           1 :         pgdat->kcompactd_max_order = 0;
    2859           1 :         pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
    2860             : 
    2861          81 :         while (!kthread_should_stop()) {
    2862          80 :                 unsigned long pflags;
    2863             : 
    2864          80 :                 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
    2865         159 :                 if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
    2866             :                         kcompactd_work_requested(pgdat),
    2867             :                         msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) {
    2868             : 
    2869           0 :                         psi_memstall_enter(&pflags);
    2870           0 :                         kcompactd_do_work(pgdat);
    2871           0 :                         psi_memstall_leave(&pflags);
    2872          80 :                         continue;
    2873             :                 }
    2874             : 
    2875             :                 /* kcompactd wait timeout */
    2876          79 :                 if (should_proactive_compact_node(pgdat)) {
    2877           0 :                         unsigned int prev_score, score;
    2878             : 
    2879           0 :                         if (proactive_defer) {
    2880           0 :                                 proactive_defer--;
    2881           0 :                                 continue;
    2882             :                         }
    2883           0 :                         prev_score = fragmentation_score_node(pgdat);
    2884           0 :                         proactive_compact_node(pgdat);
    2885           0 :                         score = fragmentation_score_node(pgdat);
    2886             :                         /*
    2887             :                          * Defer proactive compaction if the fragmentation
    2888             :                          * score did not go down i.e. no progress made.
    2889             :                          */
    2890           0 :                         proactive_defer = score < prev_score ?
    2891           0 :                                         0 : 1 << COMPACT_MAX_DEFER_SHIFT;
    2892             :                 }
    2893             :         }
    2894             : 
    2895           0 :         return 0;
    2896             : }
    2897             : 
    2898             : /*
    2899             :  * This kcompactd start function will be called by init and node-hot-add.
    2900             :  * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
    2901             :  */
    2902           1 : int kcompactd_run(int nid)
    2903             : {
    2904           1 :         pg_data_t *pgdat = NODE_DATA(nid);
    2905           1 :         int ret = 0;
    2906             : 
    2907           1 :         if (pgdat->kcompactd)
    2908             :                 return 0;
    2909             : 
    2910           1 :         pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
    2911           1 :         if (IS_ERR(pgdat->kcompactd)) {
    2912           0 :                 pr_err("Failed to start kcompactd on node %d\n", nid);
    2913           0 :                 ret = PTR_ERR(pgdat->kcompactd);
    2914           0 :                 pgdat->kcompactd = NULL;
    2915             :         }
    2916             :         return ret;
    2917             : }
    2918             : 
    2919             : /*
    2920             :  * Called by memory hotplug when all memory in a node is offlined. Caller must
    2921             :  * hold mem_hotplug_begin/end().
    2922             :  */
    2923           0 : void kcompactd_stop(int nid)
    2924             : {
    2925           0 :         struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
    2926             : 
    2927           0 :         if (kcompactd) {
    2928           0 :                 kthread_stop(kcompactd);
    2929           0 :                 NODE_DATA(nid)->kcompactd = NULL;
    2930             :         }
    2931           0 : }
    2932             : 
    2933             : /*
    2934             :  * It's optimal to keep kcompactd on the same CPUs as their memory, but
    2935             :  * not required for correctness. So if the last cpu in a node goes
    2936             :  * away, we get changed to run anywhere: as the first one comes back,
    2937             :  * restore their cpu bindings.
    2938             :  */
    2939           0 : static int kcompactd_cpu_online(unsigned int cpu)
    2940             : {
    2941           0 :         int nid;
    2942             : 
    2943           0 :         for_each_node_state(nid, N_MEMORY) {
    2944           0 :                 pg_data_t *pgdat = NODE_DATA(nid);
    2945           0 :                 const struct cpumask *mask;
    2946             : 
    2947           0 :                 mask = cpumask_of_node(pgdat->node_id);
    2948             : 
    2949           0 :                 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
    2950             :                         /* One of our CPUs online: restore mask */
    2951           0 :                         set_cpus_allowed_ptr(pgdat->kcompactd, mask);
    2952             :         }
    2953           0 :         return 0;
    2954             : }
    2955             : 
    2956           1 : static int __init kcompactd_init(void)
    2957             : {
    2958           1 :         int nid;
    2959           1 :         int ret;
    2960             : 
    2961           1 :         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
    2962             :                                         "mm/compaction:online",
    2963             :                                         kcompactd_cpu_online, NULL);
    2964           1 :         if (ret < 0) {
    2965           0 :                 pr_err("kcompactd: failed to register hotplug callbacks.\n");
    2966           0 :                 return ret;
    2967             :         }
    2968             : 
    2969           2 :         for_each_node_state(nid, N_MEMORY)
    2970           1 :                 kcompactd_run(nid);
    2971             :         return 0;
    2972             : }
    2973             : subsys_initcall(kcompactd_init)
    2974             : 
    2975             : #endif /* CONFIG_COMPACTION */

Generated by: LCOV version 1.14