LCOV - code coverage report
Current view: top level - mm - migrate.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 0 786 0.0 %
Date: 2021-04-22 12:43:58 Functions: 0 43 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Memory Migration functionality - linux/mm/migrate.c
       4             :  *
       5             :  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
       6             :  *
       7             :  * Page migration was first developed in the context of the memory hotplug
       8             :  * project. The main authors of the migration code are:
       9             :  *
      10             :  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
      11             :  * Hirokazu Takahashi <taka@valinux.co.jp>
      12             :  * Dave Hansen <haveblue@us.ibm.com>
      13             :  * Christoph Lameter
      14             :  */
      15             : 
      16             : #include <linux/migrate.h>
      17             : #include <linux/export.h>
      18             : #include <linux/swap.h>
      19             : #include <linux/swapops.h>
      20             : #include <linux/pagemap.h>
      21             : #include <linux/buffer_head.h>
      22             : #include <linux/mm_inline.h>
      23             : #include <linux/nsproxy.h>
      24             : #include <linux/pagevec.h>
      25             : #include <linux/ksm.h>
      26             : #include <linux/rmap.h>
      27             : #include <linux/topology.h>
      28             : #include <linux/cpu.h>
      29             : #include <linux/cpuset.h>
      30             : #include <linux/writeback.h>
      31             : #include <linux/mempolicy.h>
      32             : #include <linux/vmalloc.h>
      33             : #include <linux/security.h>
      34             : #include <linux/backing-dev.h>
      35             : #include <linux/compaction.h>
      36             : #include <linux/syscalls.h>
      37             : #include <linux/compat.h>
      38             : #include <linux/hugetlb.h>
      39             : #include <linux/hugetlb_cgroup.h>
      40             : #include <linux/gfp.h>
      41             : #include <linux/pagewalk.h>
      42             : #include <linux/pfn_t.h>
      43             : #include <linux/memremap.h>
      44             : #include <linux/userfaultfd_k.h>
      45             : #include <linux/balloon_compaction.h>
      46             : #include <linux/mmu_notifier.h>
      47             : #include <linux/page_idle.h>
      48             : #include <linux/page_owner.h>
      49             : #include <linux/sched/mm.h>
      50             : #include <linux/ptrace.h>
      51             : #include <linux/oom.h>
      52             : 
      53             : #include <asm/tlbflush.h>
      54             : 
      55             : #define CREATE_TRACE_POINTS
      56             : #include <trace/events/migrate.h>
      57             : 
      58             : #include "internal.h"
      59             : 
      60             : /*
      61             :  * migrate_prep() needs to be called before we start compiling a list of pages
      62             :  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
      63             :  * undesirable, use migrate_prep_local()
      64             :  */
      65           0 : void migrate_prep(void)
      66             : {
      67             :         /*
      68             :          * Clear the LRU lists so pages can be isolated.
      69             :          * Note that pages may be moved off the LRU after we have
      70             :          * drained them. Those pages will fail to migrate like other
      71             :          * pages that may be busy.
      72             :          */
      73           0 :         lru_add_drain_all();
      74           0 : }
      75             : 
      76             : /* Do the necessary work of migrate_prep but not if it involves other CPUs */
      77           0 : void migrate_prep_local(void)
      78             : {
      79           0 :         lru_add_drain();
      80           0 : }
      81             : 
      82           0 : int isolate_movable_page(struct page *page, isolate_mode_t mode)
      83             : {
      84           0 :         struct address_space *mapping;
      85             : 
      86             :         /*
      87             :          * Avoid burning cycles with pages that are yet under __free_pages(),
      88             :          * or just got freed under us.
      89             :          *
      90             :          * In case we 'win' a race for a movable page being freed under us and
      91             :          * raise its refcount preventing __free_pages() from doing its job
      92             :          * the put_page() at the end of this block will take care of
      93             :          * release this page, thus avoiding a nasty leakage.
      94             :          */
      95           0 :         if (unlikely(!get_page_unless_zero(page)))
      96           0 :                 goto out;
      97             : 
      98             :         /*
      99             :          * Check PageMovable before holding a PG_lock because page's owner
     100             :          * assumes anybody doesn't touch PG_lock of newly allocated page
     101             :          * so unconditionally grabbing the lock ruins page's owner side.
     102             :          */
     103           0 :         if (unlikely(!__PageMovable(page)))
     104           0 :                 goto out_putpage;
     105             :         /*
     106             :          * As movable pages are not isolated from LRU lists, concurrent
     107             :          * compaction threads can race against page migration functions
     108             :          * as well as race against the releasing a page.
     109             :          *
     110             :          * In order to avoid having an already isolated movable page
     111             :          * being (wrongly) re-isolated while it is under migration,
     112             :          * or to avoid attempting to isolate pages being released,
     113             :          * lets be sure we have the page lock
     114             :          * before proceeding with the movable page isolation steps.
     115             :          */
     116           0 :         if (unlikely(!trylock_page(page)))
     117           0 :                 goto out_putpage;
     118             : 
     119           0 :         if (!PageMovable(page) || PageIsolated(page))
     120           0 :                 goto out_no_isolated;
     121             : 
     122           0 :         mapping = page_mapping(page);
     123           0 :         VM_BUG_ON_PAGE(!mapping, page);
     124             : 
     125           0 :         if (!mapping->a_ops->isolate_page(page, mode))
     126           0 :                 goto out_no_isolated;
     127             : 
     128             :         /* Driver shouldn't use PG_isolated bit of page->flags */
     129           0 :         WARN_ON_ONCE(PageIsolated(page));
     130           0 :         __SetPageIsolated(page);
     131           0 :         unlock_page(page);
     132             : 
     133           0 :         return 0;
     134             : 
     135           0 : out_no_isolated:
     136           0 :         unlock_page(page);
     137           0 : out_putpage:
     138           0 :         put_page(page);
     139             : out:
     140             :         return -EBUSY;
     141             : }
     142             : 
     143             : /* It should be called on page which is PG_movable */
     144           0 : void putback_movable_page(struct page *page)
     145             : {
     146           0 :         struct address_space *mapping;
     147             : 
     148           0 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     149           0 :         VM_BUG_ON_PAGE(!PageMovable(page), page);
     150           0 :         VM_BUG_ON_PAGE(!PageIsolated(page), page);
     151             : 
     152           0 :         mapping = page_mapping(page);
     153           0 :         mapping->a_ops->putback_page(page);
     154           0 :         __ClearPageIsolated(page);
     155           0 : }
     156             : 
     157             : /*
     158             :  * Put previously isolated pages back onto the appropriate lists
     159             :  * from where they were once taken off for compaction/migration.
     160             :  *
     161             :  * This function shall be used whenever the isolated pageset has been
     162             :  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
     163             :  * and isolate_huge_page().
     164             :  */
     165           0 : void putback_movable_pages(struct list_head *l)
     166             : {
     167           0 :         struct page *page;
     168           0 :         struct page *page2;
     169             : 
     170           0 :         list_for_each_entry_safe(page, page2, l, lru) {
     171           0 :                 if (unlikely(PageHuge(page))) {
     172             :                         putback_active_hugepage(page);
     173             :                         continue;
     174             :                 }
     175           0 :                 list_del(&page->lru);
     176             :                 /*
     177             :                  * We isolated non-lru movable page so here we can use
     178             :                  * __PageMovable because LRU page's mapping cannot have
     179             :                  * PAGE_MAPPING_MOVABLE.
     180             :                  */
     181           0 :                 if (unlikely(__PageMovable(page))) {
     182           0 :                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
     183           0 :                         lock_page(page);
     184           0 :                         if (PageMovable(page))
     185           0 :                                 putback_movable_page(page);
     186             :                         else
     187           0 :                                 __ClearPageIsolated(page);
     188           0 :                         unlock_page(page);
     189           0 :                         put_page(page);
     190             :                 } else {
     191           0 :                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
     192           0 :                                         page_is_file_lru(page), -thp_nr_pages(page));
     193           0 :                         putback_lru_page(page);
     194             :                 }
     195             :         }
     196           0 : }
     197             : 
     198             : /*
     199             :  * Restore a potential migration pte to a working pte entry
     200             :  */
     201           0 : static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
     202             :                                  unsigned long addr, void *old)
     203             : {
     204           0 :         struct page_vma_mapped_walk pvmw = {
     205             :                 .page = old,
     206             :                 .vma = vma,
     207             :                 .address = addr,
     208             :                 .flags = PVMW_SYNC | PVMW_MIGRATION,
     209             :         };
     210           0 :         struct page *new;
     211           0 :         pte_t pte;
     212           0 :         swp_entry_t entry;
     213             : 
     214           0 :         VM_BUG_ON_PAGE(PageTail(page), page);
     215           0 :         while (page_vma_mapped_walk(&pvmw)) {
     216           0 :                 if (PageKsm(page))
     217             :                         new = page;
     218             :                 else
     219           0 :                         new = page - pvmw.page->index +
     220           0 :                                 linear_page_index(vma, pvmw.address);
     221             : 
     222             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
     223             :                 /* PMD-mapped THP migration entry */
     224           0 :                 if (!pvmw.pte) {
     225           0 :                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
     226           0 :                         remove_migration_pmd(&pvmw, new);
     227           0 :                         continue;
     228             :                 }
     229             : #endif
     230             : 
     231           0 :                 get_page(new);
     232           0 :                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
     233           0 :                 if (pte_swp_soft_dirty(*pvmw.pte))
     234             :                         pte = pte_mksoft_dirty(pte);
     235             : 
     236             :                 /*
     237             :                  * Recheck VMA as permissions can change since migration started
     238             :                  */
     239           0 :                 entry = pte_to_swp_entry(*pvmw.pte);
     240           0 :                 if (is_write_migration_entry(entry))
     241           0 :                         pte = maybe_mkwrite(pte, vma);
     242             :                 else if (pte_swp_uffd_wp(*pvmw.pte))
     243             :                         pte = pte_mkuffd_wp(pte);
     244             : 
     245           0 :                 if (unlikely(is_device_private_page(new))) {
     246             :                         entry = make_device_private_entry(new, pte_write(pte));
     247             :                         pte = swp_entry_to_pte(entry);
     248             :                         if (pte_swp_soft_dirty(*pvmw.pte))
     249             :                                 pte = pte_swp_mksoft_dirty(pte);
     250             :                         if (pte_swp_uffd_wp(*pvmw.pte))
     251             :                                 pte = pte_swp_mkuffd_wp(pte);
     252             :                 }
     253             : 
     254             : #ifdef CONFIG_HUGETLB_PAGE
     255             :                 if (PageHuge(new)) {
     256             :                         pte = pte_mkhuge(pte);
     257             :                         pte = arch_make_huge_pte(pte, vma, new, 0);
     258             :                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
     259             :                         if (PageAnon(new))
     260             :                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
     261             :                         else
     262             :                                 page_dup_rmap(new, true);
     263             :                 } else
     264             : #endif
     265             :                 {
     266           0 :                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
     267             : 
     268           0 :                         if (PageAnon(new))
     269           0 :                                 page_add_anon_rmap(new, vma, pvmw.address, false);
     270             :                         else
     271           0 :                                 page_add_file_rmap(new, false);
     272             :                 }
     273           0 :                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
     274           0 :                         mlock_vma_page(new);
     275             : 
     276           0 :                 if (PageTransHuge(page) && PageMlocked(page))
     277           0 :                         clear_page_mlock(page);
     278             : 
     279             :                 /* No need to invalidate - it was non-present before */
     280           0 :                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
     281             :         }
     282             : 
     283           0 :         return true;
     284             : }
     285             : 
     286             : /*
     287             :  * Get rid of all migration entries and replace them by
     288             :  * references to the indicated page.
     289             :  */
     290           0 : void remove_migration_ptes(struct page *old, struct page *new, bool locked)
     291             : {
     292           0 :         struct rmap_walk_control rwc = {
     293             :                 .rmap_one = remove_migration_pte,
     294             :                 .arg = old,
     295             :         };
     296             : 
     297           0 :         if (locked)
     298           0 :                 rmap_walk_locked(new, &rwc);
     299             :         else
     300           0 :                 rmap_walk(new, &rwc);
     301           0 : }
     302             : 
     303             : /*
     304             :  * Something used the pte of a page under migration. We need to
     305             :  * get to the page and wait until migration is finished.
     306             :  * When we return from this function the fault will be retried.
     307             :  */
     308           0 : void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
     309             :                                 spinlock_t *ptl)
     310             : {
     311           0 :         pte_t pte;
     312           0 :         swp_entry_t entry;
     313           0 :         struct page *page;
     314             : 
     315           0 :         spin_lock(ptl);
     316           0 :         pte = *ptep;
     317           0 :         if (!is_swap_pte(pte))
     318           0 :                 goto out;
     319             : 
     320           0 :         entry = pte_to_swp_entry(pte);
     321           0 :         if (!is_migration_entry(entry))
     322           0 :                 goto out;
     323             : 
     324           0 :         page = migration_entry_to_page(entry);
     325             : 
     326             :         /*
     327             :          * Once page cache replacement of page migration started, page_count
     328             :          * is zero; but we must not call put_and_wait_on_page_locked() without
     329             :          * a ref. Use get_page_unless_zero(), and just fault again if it fails.
     330             :          */
     331           0 :         if (!get_page_unless_zero(page))
     332           0 :                 goto out;
     333           0 :         pte_unmap_unlock(ptep, ptl);
     334           0 :         put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
     335           0 :         return;
     336           0 : out:
     337           0 :         pte_unmap_unlock(ptep, ptl);
     338             : }
     339             : 
     340           0 : void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
     341             :                                 unsigned long address)
     342             : {
     343           0 :         spinlock_t *ptl = pte_lockptr(mm, pmd);
     344           0 :         pte_t *ptep = pte_offset_map(pmd, address);
     345           0 :         __migration_entry_wait(mm, ptep, ptl);
     346           0 : }
     347             : 
     348           0 : void migration_entry_wait_huge(struct vm_area_struct *vma,
     349             :                 struct mm_struct *mm, pte_t *pte)
     350             : {
     351           0 :         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
     352           0 :         __migration_entry_wait(mm, pte, ptl);
     353           0 : }
     354             : 
     355             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
     356           0 : void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
     357             : {
     358           0 :         spinlock_t *ptl;
     359           0 :         struct page *page;
     360             : 
     361           0 :         ptl = pmd_lock(mm, pmd);
     362           0 :         if (!is_pmd_migration_entry(*pmd))
     363           0 :                 goto unlock;
     364           0 :         page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
     365           0 :         if (!get_page_unless_zero(page))
     366           0 :                 goto unlock;
     367           0 :         spin_unlock(ptl);
     368           0 :         put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
     369           0 :         return;
     370           0 : unlock:
     371           0 :         spin_unlock(ptl);
     372             : }
     373             : #endif
     374             : 
     375           0 : static int expected_page_refs(struct address_space *mapping, struct page *page)
     376             : {
     377           0 :         int expected_count = 1;
     378             : 
     379             :         /*
     380             :          * Device private pages have an extra refcount as they are
     381             :          * ZONE_DEVICE pages.
     382             :          */
     383           0 :         expected_count += is_device_private_page(page);
     384           0 :         if (mapping)
     385           0 :                 expected_count += thp_nr_pages(page) + page_has_private(page);
     386             : 
     387           0 :         return expected_count;
     388             : }
     389             : 
     390             : /*
     391             :  * Replace the page in the mapping.
     392             :  *
     393             :  * The number of remaining references must be:
     394             :  * 1 for anonymous pages without a mapping
     395             :  * 2 for pages with a mapping
     396             :  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
     397             :  */
     398           0 : int migrate_page_move_mapping(struct address_space *mapping,
     399             :                 struct page *newpage, struct page *page, int extra_count)
     400             : {
     401           0 :         XA_STATE(xas, &mapping->i_pages, page_index(page));
     402           0 :         struct zone *oldzone, *newzone;
     403           0 :         int dirty;
     404           0 :         int expected_count = expected_page_refs(mapping, page) + extra_count;
     405           0 :         int nr = thp_nr_pages(page);
     406             : 
     407           0 :         if (!mapping) {
     408             :                 /* Anonymous page without mapping */
     409           0 :                 if (page_count(page) != expected_count)
     410             :                         return -EAGAIN;
     411             : 
     412             :                 /* No turning back from here */
     413           0 :                 newpage->index = page->index;
     414           0 :                 newpage->mapping = page->mapping;
     415           0 :                 if (PageSwapBacked(page))
     416           0 :                         __SetPageSwapBacked(newpage);
     417             : 
     418           0 :                 return MIGRATEPAGE_SUCCESS;
     419             :         }
     420             : 
     421           0 :         oldzone = page_zone(page);
     422           0 :         newzone = page_zone(newpage);
     423             : 
     424           0 :         xas_lock_irq(&xas);
     425           0 :         if (page_count(page) != expected_count || xas_load(&xas) != page) {
     426           0 :                 xas_unlock_irq(&xas);
     427           0 :                 return -EAGAIN;
     428             :         }
     429             : 
     430           0 :         if (!page_ref_freeze(page, expected_count)) {
     431           0 :                 xas_unlock_irq(&xas);
     432           0 :                 return -EAGAIN;
     433             :         }
     434             : 
     435             :         /*
     436             :          * Now we know that no one else is looking at the page:
     437             :          * no turning back from here.
     438             :          */
     439           0 :         newpage->index = page->index;
     440           0 :         newpage->mapping = page->mapping;
     441           0 :         page_ref_add(newpage, nr); /* add cache reference */
     442           0 :         if (PageSwapBacked(page)) {
     443           0 :                 __SetPageSwapBacked(newpage);
     444           0 :                 if (PageSwapCache(page)) {
     445             :                         SetPageSwapCache(newpage);
     446           0 :                         set_page_private(newpage, page_private(page));
     447             :                 }
     448             :         } else {
     449           0 :                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
     450             :         }
     451             : 
     452             :         /* Move dirty while page refs frozen and newpage not yet exposed */
     453           0 :         dirty = PageDirty(page);
     454           0 :         if (dirty) {
     455           0 :                 ClearPageDirty(page);
     456           0 :                 SetPageDirty(newpage);
     457             :         }
     458             : 
     459           0 :         xas_store(&xas, newpage);
     460           0 :         if (PageTransHuge(page)) {
     461             :                 int i;
     462             : 
     463           0 :                 for (i = 1; i < nr; i++) {
     464           0 :                         xas_next(&xas);
     465           0 :                         xas_store(&xas, newpage);
     466             :                 }
     467             :         }
     468             : 
     469             :         /*
     470             :          * Drop cache reference from old page by unfreezing
     471             :          * to one less reference.
     472             :          * We know this isn't the last reference.
     473             :          */
     474           0 :         page_ref_unfreeze(page, expected_count - nr);
     475             : 
     476           0 :         xas_unlock(&xas);
     477             :         /* Leave irq disabled to prevent preemption while updating stats */
     478             : 
     479             :         /*
     480             :          * If moved to a different zone then also account
     481             :          * the page for that zone. Other VM counters will be
     482             :          * taken care of when we establish references to the
     483             :          * new page and drop references to the old page.
     484             :          *
     485             :          * Note that anonymous pages are accounted for
     486             :          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
     487             :          * are mapped to swap space.
     488             :          */
     489           0 :         if (newzone != oldzone) {
     490           0 :                 struct lruvec *old_lruvec, *new_lruvec;
     491           0 :                 struct mem_cgroup *memcg;
     492             : 
     493           0 :                 memcg = page_memcg(page);
     494           0 :                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
     495           0 :                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
     496             : 
     497           0 :                 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
     498           0 :                 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
     499           0 :                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
     500           0 :                         __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
     501           0 :                         __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
     502             :                 }
     503             : #ifdef CONFIG_SWAP
     504             :                 if (PageSwapCache(page)) {
     505             :                         __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
     506             :                         __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
     507             :                 }
     508             : #endif
     509           0 :                 if (dirty && mapping_can_writeback(mapping)) {
     510           0 :                         __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
     511           0 :                         __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
     512           0 :                         __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
     513           0 :                         __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
     514             :                 }
     515             :         }
     516           0 :         local_irq_enable();
     517             : 
     518           0 :         return MIGRATEPAGE_SUCCESS;
     519             : }
     520             : EXPORT_SYMBOL(migrate_page_move_mapping);
     521             : 
     522             : /*
     523             :  * The expected number of remaining references is the same as that
     524             :  * of migrate_page_move_mapping().
     525             :  */
     526           0 : int migrate_huge_page_move_mapping(struct address_space *mapping,
     527             :                                    struct page *newpage, struct page *page)
     528             : {
     529           0 :         XA_STATE(xas, &mapping->i_pages, page_index(page));
     530           0 :         int expected_count;
     531             : 
     532           0 :         xas_lock_irq(&xas);
     533           0 :         expected_count = 2 + page_has_private(page);
     534           0 :         if (page_count(page) != expected_count || xas_load(&xas) != page) {
     535           0 :                 xas_unlock_irq(&xas);
     536           0 :                 return -EAGAIN;
     537             :         }
     538             : 
     539           0 :         if (!page_ref_freeze(page, expected_count)) {
     540           0 :                 xas_unlock_irq(&xas);
     541           0 :                 return -EAGAIN;
     542             :         }
     543             : 
     544           0 :         newpage->index = page->index;
     545           0 :         newpage->mapping = page->mapping;
     546             : 
     547           0 :         get_page(newpage);
     548             : 
     549           0 :         xas_store(&xas, newpage);
     550             : 
     551           0 :         page_ref_unfreeze(page, expected_count - 1);
     552             : 
     553           0 :         xas_unlock_irq(&xas);
     554             : 
     555           0 :         return MIGRATEPAGE_SUCCESS;
     556             : }
     557             : 
     558             : /*
     559             :  * Gigantic pages are so large that we do not guarantee that page++ pointer
     560             :  * arithmetic will work across the entire page.  We need something more
     561             :  * specialized.
     562             :  */
     563             : static void __copy_gigantic_page(struct page *dst, struct page *src,
     564             :                                 int nr_pages)
     565             : {
     566             :         int i;
     567             :         struct page *dst_base = dst;
     568             :         struct page *src_base = src;
     569             : 
     570             :         for (i = 0; i < nr_pages; ) {
     571             :                 cond_resched();
     572             :                 copy_highpage(dst, src);
     573             : 
     574             :                 i++;
     575             :                 dst = mem_map_next(dst, dst_base, i);
     576             :                 src = mem_map_next(src, src_base, i);
     577             :         }
     578             : }
     579             : 
     580           0 : static void copy_huge_page(struct page *dst, struct page *src)
     581             : {
     582           0 :         int i;
     583           0 :         int nr_pages;
     584             : 
     585           0 :         if (PageHuge(src)) {
     586             :                 /* hugetlbfs page */
     587             :                 struct hstate *h = page_hstate(src);
     588             :                 nr_pages = pages_per_huge_page(h);
     589             : 
     590             :                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
     591             :                         __copy_gigantic_page(dst, src, nr_pages);
     592             :                         return;
     593             :                 }
     594             :         } else {
     595             :                 /* thp page */
     596           0 :                 BUG_ON(!PageTransHuge(src));
     597           0 :                 nr_pages = thp_nr_pages(src);
     598             :         }
     599             : 
     600           0 :         for (i = 0; i < nr_pages; i++) {
     601           0 :                 cond_resched();
     602           0 :                 copy_highpage(dst + i, src + i);
     603             :         }
     604             : }
     605             : 
     606             : /*
     607             :  * Copy the page to its new location
     608             :  */
     609           0 : void migrate_page_states(struct page *newpage, struct page *page)
     610             : {
     611           0 :         int cpupid;
     612             : 
     613           0 :         if (PageError(page))
     614           0 :                 SetPageError(newpage);
     615           0 :         if (PageReferenced(page))
     616           0 :                 SetPageReferenced(newpage);
     617           0 :         if (PageUptodate(page))
     618           0 :                 SetPageUptodate(newpage);
     619           0 :         if (TestClearPageActive(page)) {
     620           0 :                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
     621           0 :                 SetPageActive(newpage);
     622           0 :         } else if (TestClearPageUnevictable(page))
     623           0 :                 SetPageUnevictable(newpage);
     624           0 :         if (PageWorkingset(page))
     625           0 :                 SetPageWorkingset(newpage);
     626           0 :         if (PageChecked(page))
     627           0 :                 SetPageChecked(newpage);
     628           0 :         if (PageMappedToDisk(page))
     629           0 :                 SetPageMappedToDisk(newpage);
     630             : 
     631             :         /* Move dirty on pages not done by migrate_page_move_mapping() */
     632           0 :         if (PageDirty(page))
     633           0 :                 SetPageDirty(newpage);
     634             : 
     635           0 :         if (page_is_young(page))
     636           0 :                 set_page_young(newpage);
     637           0 :         if (page_is_idle(page))
     638           0 :                 set_page_idle(newpage);
     639             : 
     640             :         /*
     641             :          * Copy NUMA information to the new page, to prevent over-eager
     642             :          * future migrations of this same page.
     643             :          */
     644           0 :         cpupid = page_cpupid_xchg_last(page, -1);
     645           0 :         page_cpupid_xchg_last(newpage, cpupid);
     646             : 
     647           0 :         ksm_migrate_page(newpage, page);
     648             :         /*
     649             :          * Please do not reorder this without considering how mm/ksm.c's
     650             :          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
     651             :          */
     652           0 :         if (PageSwapCache(page))
     653           0 :                 ClearPageSwapCache(page);
     654           0 :         ClearPagePrivate(page);
     655           0 :         set_page_private(page, 0);
     656             : 
     657             :         /*
     658             :          * If any waiters have accumulated on the new page then
     659             :          * wake them up.
     660             :          */
     661           0 :         if (PageWriteback(newpage))
     662           0 :                 end_page_writeback(newpage);
     663             : 
     664             :         /*
     665             :          * PG_readahead shares the same bit with PG_reclaim.  The above
     666             :          * end_page_writeback() may clear PG_readahead mistakenly, so set the
     667             :          * bit after that.
     668             :          */
     669           0 :         if (PageReadahead(page))
     670           0 :                 SetPageReadahead(newpage);
     671             : 
     672           0 :         copy_page_owner(page, newpage);
     673             : 
     674           0 :         if (!PageHuge(page))
     675           0 :                 mem_cgroup_migrate(page, newpage);
     676           0 : }
     677             : EXPORT_SYMBOL(migrate_page_states);
     678             : 
     679           0 : void migrate_page_copy(struct page *newpage, struct page *page)
     680             : {
     681           0 :         if (PageHuge(page) || PageTransHuge(page))
     682           0 :                 copy_huge_page(newpage, page);
     683             :         else
     684           0 :                 copy_highpage(newpage, page);
     685             : 
     686           0 :         migrate_page_states(newpage, page);
     687           0 : }
     688             : EXPORT_SYMBOL(migrate_page_copy);
     689             : 
     690             : /************************************************************
     691             :  *                    Migration functions
     692             :  ***********************************************************/
     693             : 
     694             : /*
     695             :  * Common logic to directly migrate a single LRU page suitable for
     696             :  * pages that do not use PagePrivate/PagePrivate2.
     697             :  *
     698             :  * Pages are locked upon entry and exit.
     699             :  */
     700           0 : int migrate_page(struct address_space *mapping,
     701             :                 struct page *newpage, struct page *page,
     702             :                 enum migrate_mode mode)
     703             : {
     704           0 :         int rc;
     705             : 
     706           0 :         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
     707             : 
     708           0 :         rc = migrate_page_move_mapping(mapping, newpage, page, 0);
     709             : 
     710           0 :         if (rc != MIGRATEPAGE_SUCCESS)
     711             :                 return rc;
     712             : 
     713           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     714           0 :                 migrate_page_copy(newpage, page);
     715             :         else
     716           0 :                 migrate_page_states(newpage, page);
     717             :         return MIGRATEPAGE_SUCCESS;
     718             : }
     719             : EXPORT_SYMBOL(migrate_page);
     720             : 
     721             : #ifdef CONFIG_BLOCK
     722             : /* Returns true if all buffers are successfully locked */
     723           0 : static bool buffer_migrate_lock_buffers(struct buffer_head *head,
     724             :                                                         enum migrate_mode mode)
     725             : {
     726           0 :         struct buffer_head *bh = head;
     727             : 
     728             :         /* Simple case, sync compaction */
     729           0 :         if (mode != MIGRATE_ASYNC) {
     730           0 :                 do {
     731           0 :                         lock_buffer(bh);
     732           0 :                         bh = bh->b_this_page;
     733             : 
     734           0 :                 } while (bh != head);
     735             : 
     736             :                 return true;
     737             :         }
     738             : 
     739             :         /* async case, we cannot block on lock_buffer so use trylock_buffer */
     740           0 :         do {
     741           0 :                 if (!trylock_buffer(bh)) {
     742             :                         /*
     743             :                          * We failed to lock the buffer and cannot stall in
     744             :                          * async migration. Release the taken locks
     745             :                          */
     746           0 :                         struct buffer_head *failed_bh = bh;
     747             :                         bh = head;
     748           0 :                         while (bh != failed_bh) {
     749           0 :                                 unlock_buffer(bh);
     750           0 :                                 bh = bh->b_this_page;
     751             :                         }
     752             :                         return false;
     753             :                 }
     754             : 
     755           0 :                 bh = bh->b_this_page;
     756           0 :         } while (bh != head);
     757             :         return true;
     758             : }
     759             : 
     760           0 : static int __buffer_migrate_page(struct address_space *mapping,
     761             :                 struct page *newpage, struct page *page, enum migrate_mode mode,
     762             :                 bool check_refs)
     763             : {
     764           0 :         struct buffer_head *bh, *head;
     765           0 :         int rc;
     766           0 :         int expected_count;
     767             : 
     768           0 :         if (!page_has_buffers(page))
     769           0 :                 return migrate_page(mapping, newpage, page, mode);
     770             : 
     771             :         /* Check whether page does not have extra refs before we do more work */
     772           0 :         expected_count = expected_page_refs(mapping, page);
     773           0 :         if (page_count(page) != expected_count)
     774             :                 return -EAGAIN;
     775             : 
     776           0 :         head = page_buffers(page);
     777           0 :         if (!buffer_migrate_lock_buffers(head, mode))
     778             :                 return -EAGAIN;
     779             : 
     780           0 :         if (check_refs) {
     781             :                 bool busy;
     782             :                 bool invalidated = false;
     783             : 
     784           0 : recheck_buffers:
     785           0 :                 busy = false;
     786           0 :                 spin_lock(&mapping->private_lock);
     787           0 :                 bh = head;
     788           0 :                 do {
     789           0 :                         if (atomic_read(&bh->b_count)) {
     790             :                                 busy = true;
     791             :                                 break;
     792             :                         }
     793           0 :                         bh = bh->b_this_page;
     794           0 :                 } while (bh != head);
     795           0 :                 if (busy) {
     796           0 :                         if (invalidated) {
     797           0 :                                 rc = -EAGAIN;
     798           0 :                                 goto unlock_buffers;
     799             :                         }
     800           0 :                         spin_unlock(&mapping->private_lock);
     801           0 :                         invalidate_bh_lrus();
     802           0 :                         invalidated = true;
     803           0 :                         goto recheck_buffers;
     804             :                 }
     805             :         }
     806             : 
     807           0 :         rc = migrate_page_move_mapping(mapping, newpage, page, 0);
     808           0 :         if (rc != MIGRATEPAGE_SUCCESS)
     809           0 :                 goto unlock_buffers;
     810             : 
     811           0 :         attach_page_private(newpage, detach_page_private(page));
     812             : 
     813           0 :         bh = head;
     814           0 :         do {
     815           0 :                 set_bh_page(bh, newpage, bh_offset(bh));
     816           0 :                 bh = bh->b_this_page;
     817             : 
     818           0 :         } while (bh != head);
     819             : 
     820           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     821           0 :                 migrate_page_copy(newpage, page);
     822             :         else
     823           0 :                 migrate_page_states(newpage, page);
     824             : 
     825             :         rc = MIGRATEPAGE_SUCCESS;
     826           0 : unlock_buffers:
     827           0 :         if (check_refs)
     828           0 :                 spin_unlock(&mapping->private_lock);
     829             :         bh = head;
     830           0 :         do {
     831           0 :                 unlock_buffer(bh);
     832           0 :                 bh = bh->b_this_page;
     833             : 
     834           0 :         } while (bh != head);
     835             : 
     836             :         return rc;
     837             : }
     838             : 
     839             : /*
     840             :  * Migration function for pages with buffers. This function can only be used
     841             :  * if the underlying filesystem guarantees that no other references to "page"
     842             :  * exist. For example attached buffer heads are accessed only under page lock.
     843             :  */
     844           0 : int buffer_migrate_page(struct address_space *mapping,
     845             :                 struct page *newpage, struct page *page, enum migrate_mode mode)
     846             : {
     847           0 :         return __buffer_migrate_page(mapping, newpage, page, mode, false);
     848             : }
     849             : EXPORT_SYMBOL(buffer_migrate_page);
     850             : 
     851             : /*
     852             :  * Same as above except that this variant is more careful and checks that there
     853             :  * are also no buffer head references. This function is the right one for
     854             :  * mappings where buffer heads are directly looked up and referenced (such as
     855             :  * block device mappings).
     856             :  */
     857           0 : int buffer_migrate_page_norefs(struct address_space *mapping,
     858             :                 struct page *newpage, struct page *page, enum migrate_mode mode)
     859             : {
     860           0 :         return __buffer_migrate_page(mapping, newpage, page, mode, true);
     861             : }
     862             : #endif
     863             : 
     864             : /*
     865             :  * Writeback a page to clean the dirty state
     866             :  */
     867           0 : static int writeout(struct address_space *mapping, struct page *page)
     868             : {
     869           0 :         struct writeback_control wbc = {
     870             :                 .sync_mode = WB_SYNC_NONE,
     871             :                 .nr_to_write = 1,
     872             :                 .range_start = 0,
     873             :                 .range_end = LLONG_MAX,
     874             :                 .for_reclaim = 1
     875             :         };
     876           0 :         int rc;
     877             : 
     878           0 :         if (!mapping->a_ops->writepage)
     879             :                 /* No write method for the address space */
     880             :                 return -EINVAL;
     881             : 
     882           0 :         if (!clear_page_dirty_for_io(page))
     883             :                 /* Someone else already triggered a write */
     884             :                 return -EAGAIN;
     885             : 
     886             :         /*
     887             :          * A dirty page may imply that the underlying filesystem has
     888             :          * the page on some queue. So the page must be clean for
     889             :          * migration. Writeout may mean we loose the lock and the
     890             :          * page state is no longer what we checked for earlier.
     891             :          * At this point we know that the migration attempt cannot
     892             :          * be successful.
     893             :          */
     894           0 :         remove_migration_ptes(page, page, false);
     895             : 
     896           0 :         rc = mapping->a_ops->writepage(page, &wbc);
     897             : 
     898           0 :         if (rc != AOP_WRITEPAGE_ACTIVATE)
     899             :                 /* unlocked. Relock */
     900           0 :                 lock_page(page);
     901             : 
     902           0 :         return (rc < 0) ? -EIO : -EAGAIN;
     903             : }
     904             : 
     905             : /*
     906             :  * Default handling if a filesystem does not provide a migration function.
     907             :  */
     908           0 : static int fallback_migrate_page(struct address_space *mapping,
     909             :         struct page *newpage, struct page *page, enum migrate_mode mode)
     910             : {
     911           0 :         if (PageDirty(page)) {
     912             :                 /* Only writeback pages in full synchronous migration */
     913           0 :                 switch (mode) {
     914             :                 case MIGRATE_SYNC:
     915             :                 case MIGRATE_SYNC_NO_COPY:
     916           0 :                         break;
     917             :                 default:
     918             :                         return -EBUSY;
     919             :                 }
     920           0 :                 return writeout(mapping, page);
     921             :         }
     922             : 
     923             :         /*
     924             :          * Buffers may be managed in a filesystem specific way.
     925             :          * We must have no buffers or drop them.
     926             :          */
     927           0 :         if (page_has_private(page) &&
     928           0 :             !try_to_release_page(page, GFP_KERNEL))
     929           0 :                 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
     930             : 
     931           0 :         return migrate_page(mapping, newpage, page, mode);
     932             : }
     933             : 
     934             : /*
     935             :  * Move a page to a newly allocated page
     936             :  * The page is locked and all ptes have been successfully removed.
     937             :  *
     938             :  * The new page will have replaced the old page if this function
     939             :  * is successful.
     940             :  *
     941             :  * Return value:
     942             :  *   < 0 - error code
     943             :  *  MIGRATEPAGE_SUCCESS - success
     944             :  */
     945           0 : static int move_to_new_page(struct page *newpage, struct page *page,
     946             :                                 enum migrate_mode mode)
     947             : {
     948           0 :         struct address_space *mapping;
     949           0 :         int rc = -EAGAIN;
     950           0 :         bool is_lru = !__PageMovable(page);
     951             : 
     952           0 :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     953           0 :         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
     954             : 
     955           0 :         mapping = page_mapping(page);
     956             : 
     957           0 :         if (likely(is_lru)) {
     958           0 :                 if (!mapping)
     959           0 :                         rc = migrate_page(mapping, newpage, page, mode);
     960           0 :                 else if (mapping->a_ops->migratepage)
     961             :                         /*
     962             :                          * Most pages have a mapping and most filesystems
     963             :                          * provide a migratepage callback. Anonymous pages
     964             :                          * are part of swap space which also has its own
     965             :                          * migratepage callback. This is the most common path
     966             :                          * for page migration.
     967             :                          */
     968           0 :                         rc = mapping->a_ops->migratepage(mapping, newpage,
     969             :                                                         page, mode);
     970             :                 else
     971           0 :                         rc = fallback_migrate_page(mapping, newpage,
     972             :                                                         page, mode);
     973             :         } else {
     974             :                 /*
     975             :                  * In case of non-lru page, it could be released after
     976             :                  * isolation step. In that case, we shouldn't try migration.
     977             :                  */
     978           0 :                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
     979           0 :                 if (!PageMovable(page)) {
     980           0 :                         rc = MIGRATEPAGE_SUCCESS;
     981           0 :                         __ClearPageIsolated(page);
     982           0 :                         goto out;
     983             :                 }
     984             : 
     985           0 :                 rc = mapping->a_ops->migratepage(mapping, newpage,
     986             :                                                 page, mode);
     987           0 :                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
     988             :                         !PageIsolated(page));
     989             :         }
     990             : 
     991             :         /*
     992             :          * When successful, old pagecache page->mapping must be cleared before
     993             :          * page is freed; but stats require that PageAnon be left as PageAnon.
     994             :          */
     995           0 :         if (rc == MIGRATEPAGE_SUCCESS) {
     996           0 :                 if (__PageMovable(page)) {
     997           0 :                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
     998             : 
     999             :                         /*
    1000             :                          * We clear PG_movable under page_lock so any compactor
    1001             :                          * cannot try to migrate this page.
    1002             :                          */
    1003           0 :                         __ClearPageIsolated(page);
    1004             :                 }
    1005             : 
    1006             :                 /*
    1007             :                  * Anonymous and movable page->mapping will be cleared by
    1008             :                  * free_pages_prepare so don't reset it here for keeping
    1009             :                  * the type to work PageAnon, for example.
    1010             :                  */
    1011           0 :                 if (!PageMappingFlags(page))
    1012           0 :                         page->mapping = NULL;
    1013             : 
    1014           0 :                 if (likely(!is_zone_device_page(newpage)))
    1015           0 :                         flush_dcache_page(newpage);
    1016             : 
    1017             :         }
    1018           0 : out:
    1019           0 :         return rc;
    1020             : }
    1021             : 
    1022           0 : static int __unmap_and_move(struct page *page, struct page *newpage,
    1023             :                                 int force, enum migrate_mode mode)
    1024             : {
    1025           0 :         int rc = -EAGAIN;
    1026           0 :         int page_was_mapped = 0;
    1027           0 :         struct anon_vma *anon_vma = NULL;
    1028           0 :         bool is_lru = !__PageMovable(page);
    1029             : 
    1030           0 :         if (!trylock_page(page)) {
    1031           0 :                 if (!force || mode == MIGRATE_ASYNC)
    1032           0 :                         goto out;
    1033             : 
    1034             :                 /*
    1035             :                  * It's not safe for direct compaction to call lock_page.
    1036             :                  * For example, during page readahead pages are added locked
    1037             :                  * to the LRU. Later, when the IO completes the pages are
    1038             :                  * marked uptodate and unlocked. However, the queueing
    1039             :                  * could be merging multiple pages for one bio (e.g.
    1040             :                  * mpage_readahead). If an allocation happens for the
    1041             :                  * second or third page, the process can end up locking
    1042             :                  * the same page twice and deadlocking. Rather than
    1043             :                  * trying to be clever about what pages can be locked,
    1044             :                  * avoid the use of lock_page for direct compaction
    1045             :                  * altogether.
    1046             :                  */
    1047           0 :                 if (current->flags & PF_MEMALLOC)
    1048           0 :                         goto out;
    1049             : 
    1050           0 :                 lock_page(page);
    1051             :         }
    1052             : 
    1053           0 :         if (PageWriteback(page)) {
    1054             :                 /*
    1055             :                  * Only in the case of a full synchronous migration is it
    1056             :                  * necessary to wait for PageWriteback. In the async case,
    1057             :                  * the retry loop is too short and in the sync-light case,
    1058             :                  * the overhead of stalling is too much
    1059             :                  */
    1060           0 :                 switch (mode) {
    1061             :                 case MIGRATE_SYNC:
    1062             :                 case MIGRATE_SYNC_NO_COPY:
    1063           0 :                         break;
    1064           0 :                 default:
    1065           0 :                         rc = -EBUSY;
    1066           0 :                         goto out_unlock;
    1067             :                 }
    1068           0 :                 if (!force)
    1069           0 :                         goto out_unlock;
    1070           0 :                 wait_on_page_writeback(page);
    1071             :         }
    1072             : 
    1073             :         /*
    1074             :          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
    1075             :          * we cannot notice that anon_vma is freed while we migrates a page.
    1076             :          * This get_anon_vma() delays freeing anon_vma pointer until the end
    1077             :          * of migration. File cache pages are no problem because of page_lock()
    1078             :          * File Caches may use write_page() or lock_page() in migration, then,
    1079             :          * just care Anon page here.
    1080             :          *
    1081             :          * Only page_get_anon_vma() understands the subtleties of
    1082             :          * getting a hold on an anon_vma from outside one of its mms.
    1083             :          * But if we cannot get anon_vma, then we won't need it anyway,
    1084             :          * because that implies that the anon page is no longer mapped
    1085             :          * (and cannot be remapped so long as we hold the page lock).
    1086             :          */
    1087           0 :         if (PageAnon(page) && !PageKsm(page))
    1088           0 :                 anon_vma = page_get_anon_vma(page);
    1089             : 
    1090             :         /*
    1091             :          * Block others from accessing the new page when we get around to
    1092             :          * establishing additional references. We are usually the only one
    1093             :          * holding a reference to newpage at this point. We used to have a BUG
    1094             :          * here if trylock_page(newpage) fails, but would like to allow for
    1095             :          * cases where there might be a race with the previous use of newpage.
    1096             :          * This is much like races on refcount of oldpage: just don't BUG().
    1097             :          */
    1098           0 :         if (unlikely(!trylock_page(newpage)))
    1099           0 :                 goto out_unlock;
    1100             : 
    1101           0 :         if (unlikely(!is_lru)) {
    1102           0 :                 rc = move_to_new_page(newpage, page, mode);
    1103           0 :                 goto out_unlock_both;
    1104             :         }
    1105             : 
    1106             :         /*
    1107             :          * Corner case handling:
    1108             :          * 1. When a new swap-cache page is read into, it is added to the LRU
    1109             :          * and treated as swapcache but it has no rmap yet.
    1110             :          * Calling try_to_unmap() against a page->mapping==NULL page will
    1111             :          * trigger a BUG.  So handle it here.
    1112             :          * 2. An orphaned page (see truncate_cleanup_page) might have
    1113             :          * fs-private metadata. The page can be picked up due to memory
    1114             :          * offlining.  Everywhere else except page reclaim, the page is
    1115             :          * invisible to the vm, so the page can not be migrated.  So try to
    1116             :          * free the metadata, so the page can be freed.
    1117             :          */
    1118           0 :         if (!page->mapping) {
    1119           0 :                 VM_BUG_ON_PAGE(PageAnon(page), page);
    1120           0 :                 if (page_has_private(page)) {
    1121           0 :                         try_to_free_buffers(page);
    1122           0 :                         goto out_unlock_both;
    1123             :                 }
    1124           0 :         } else if (page_mapped(page)) {
    1125             :                 /* Establish migration ptes */
    1126           0 :                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
    1127             :                                 page);
    1128           0 :                 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK);
    1129           0 :                 page_was_mapped = 1;
    1130             :         }
    1131             : 
    1132           0 :         if (!page_mapped(page))
    1133           0 :                 rc = move_to_new_page(newpage, page, mode);
    1134             : 
    1135           0 :         if (page_was_mapped)
    1136           0 :                 remove_migration_ptes(page,
    1137             :                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
    1138             : 
    1139           0 : out_unlock_both:
    1140           0 :         unlock_page(newpage);
    1141           0 : out_unlock:
    1142             :         /* Drop an anon_vma reference if we took one */
    1143           0 :         if (anon_vma)
    1144           0 :                 put_anon_vma(anon_vma);
    1145           0 :         unlock_page(page);
    1146           0 : out:
    1147             :         /*
    1148             :          * If migration is successful, decrease refcount of the newpage
    1149             :          * which will not free the page because new page owner increased
    1150             :          * refcounter. As well, if it is LRU page, add the page to LRU
    1151             :          * list in here. Use the old state of the isolated source page to
    1152             :          * determine if we migrated a LRU page. newpage was already unlocked
    1153             :          * and possibly modified by its owner - don't rely on the page
    1154             :          * state.
    1155             :          */
    1156           0 :         if (rc == MIGRATEPAGE_SUCCESS) {
    1157           0 :                 if (unlikely(!is_lru))
    1158           0 :                         put_page(newpage);
    1159             :                 else
    1160           0 :                         putback_lru_page(newpage);
    1161             :         }
    1162             : 
    1163           0 :         return rc;
    1164             : }
    1165             : 
    1166             : /*
    1167             :  * Obtain the lock on page, remove all ptes and migrate the page
    1168             :  * to the newly allocated page in newpage.
    1169             :  */
    1170           0 : static int unmap_and_move(new_page_t get_new_page,
    1171             :                                    free_page_t put_new_page,
    1172             :                                    unsigned long private, struct page *page,
    1173             :                                    int force, enum migrate_mode mode,
    1174             :                                    enum migrate_reason reason,
    1175             :                                    struct list_head *ret)
    1176             : {
    1177           0 :         int rc = MIGRATEPAGE_SUCCESS;
    1178           0 :         struct page *newpage = NULL;
    1179             : 
    1180           0 :         if (!thp_migration_supported() && PageTransHuge(page))
    1181             :                 return -ENOSYS;
    1182             : 
    1183           0 :         if (page_count(page) == 1) {
    1184             :                 /* page was freed from under us. So we are done. */
    1185           0 :                 ClearPageActive(page);
    1186           0 :                 ClearPageUnevictable(page);
    1187           0 :                 if (unlikely(__PageMovable(page))) {
    1188           0 :                         lock_page(page);
    1189           0 :                         if (!PageMovable(page))
    1190           0 :                                 __ClearPageIsolated(page);
    1191           0 :                         unlock_page(page);
    1192             :                 }
    1193           0 :                 goto out;
    1194             :         }
    1195             : 
    1196           0 :         newpage = get_new_page(page, private);
    1197           0 :         if (!newpage)
    1198             :                 return -ENOMEM;
    1199             : 
    1200           0 :         rc = __unmap_and_move(page, newpage, force, mode);
    1201           0 :         if (rc == MIGRATEPAGE_SUCCESS)
    1202           0 :                 set_page_owner_migrate_reason(newpage, reason);
    1203             : 
    1204           0 : out:
    1205           0 :         if (rc != -EAGAIN) {
    1206             :                 /*
    1207             :                  * A page that has been migrated has all references
    1208             :                  * removed and will be freed. A page that has not been
    1209             :                  * migrated will have kept its references and be restored.
    1210             :                  */
    1211           0 :                 list_del(&page->lru);
    1212             :         }
    1213             : 
    1214             :         /*
    1215             :          * If migration is successful, releases reference grabbed during
    1216             :          * isolation. Otherwise, restore the page to right list unless
    1217             :          * we want to retry.
    1218             :          */
    1219           0 :         if (rc == MIGRATEPAGE_SUCCESS) {
    1220             :                 /*
    1221             :                  * Compaction can migrate also non-LRU pages which are
    1222             :                  * not accounted to NR_ISOLATED_*. They can be recognized
    1223             :                  * as __PageMovable
    1224             :                  */
    1225           0 :                 if (likely(!__PageMovable(page)))
    1226           0 :                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
    1227           0 :                                         page_is_file_lru(page), -thp_nr_pages(page));
    1228             : 
    1229           0 :                 if (reason != MR_MEMORY_FAILURE)
    1230             :                         /*
    1231             :                          * We release the page in page_handle_poison.
    1232             :                          */
    1233           0 :                         put_page(page);
    1234             :         } else {
    1235           0 :                 if (rc != -EAGAIN)
    1236           0 :                         list_add_tail(&page->lru, ret);
    1237             : 
    1238           0 :                 if (put_new_page)
    1239           0 :                         put_new_page(newpage, private);
    1240             :                 else
    1241           0 :                         put_page(newpage);
    1242             :         }
    1243             : 
    1244             :         return rc;
    1245             : }
    1246             : 
    1247             : /*
    1248             :  * Counterpart of unmap_and_move_page() for hugepage migration.
    1249             :  *
    1250             :  * This function doesn't wait the completion of hugepage I/O
    1251             :  * because there is no race between I/O and migration for hugepage.
    1252             :  * Note that currently hugepage I/O occurs only in direct I/O
    1253             :  * where no lock is held and PG_writeback is irrelevant,
    1254             :  * and writeback status of all subpages are counted in the reference
    1255             :  * count of the head page (i.e. if all subpages of a 2MB hugepage are
    1256             :  * under direct I/O, the reference of the head page is 512 and a bit more.)
    1257             :  * This means that when we try to migrate hugepage whose subpages are
    1258             :  * doing direct I/O, some references remain after try_to_unmap() and
    1259             :  * hugepage migration fails without data corruption.
    1260             :  *
    1261             :  * There is also no race when direct I/O is issued on the page under migration,
    1262             :  * because then pte is replaced with migration swap entry and direct I/O code
    1263             :  * will wait in the page fault for migration to complete.
    1264             :  */
    1265             : static int unmap_and_move_huge_page(new_page_t get_new_page,
    1266             :                                 free_page_t put_new_page, unsigned long private,
    1267             :                                 struct page *hpage, int force,
    1268             :                                 enum migrate_mode mode, int reason,
    1269             :                                 struct list_head *ret)
    1270             : {
    1271             :         int rc = -EAGAIN;
    1272             :         int page_was_mapped = 0;
    1273             :         struct page *new_hpage;
    1274             :         struct anon_vma *anon_vma = NULL;
    1275             :         struct address_space *mapping = NULL;
    1276             : 
    1277             :         /*
    1278             :          * Migratability of hugepages depends on architectures and their size.
    1279             :          * This check is necessary because some callers of hugepage migration
    1280             :          * like soft offline and memory hotremove don't walk through page
    1281             :          * tables or check whether the hugepage is pmd-based or not before
    1282             :          * kicking migration.
    1283             :          */
    1284             :         if (!hugepage_migration_supported(page_hstate(hpage))) {
    1285             :                 list_move_tail(&hpage->lru, ret);
    1286             :                 return -ENOSYS;
    1287             :         }
    1288             : 
    1289             :         if (page_count(hpage) == 1) {
    1290             :                 /* page was freed from under us. So we are done. */
    1291             :                 putback_active_hugepage(hpage);
    1292             :                 return MIGRATEPAGE_SUCCESS;
    1293             :         }
    1294             : 
    1295             :         new_hpage = get_new_page(hpage, private);
    1296             :         if (!new_hpage)
    1297             :                 return -ENOMEM;
    1298             : 
    1299             :         if (!trylock_page(hpage)) {
    1300             :                 if (!force)
    1301             :                         goto out;
    1302             :                 switch (mode) {
    1303             :                 case MIGRATE_SYNC:
    1304             :                 case MIGRATE_SYNC_NO_COPY:
    1305             :                         break;
    1306             :                 default:
    1307             :                         goto out;
    1308             :                 }
    1309             :                 lock_page(hpage);
    1310             :         }
    1311             : 
    1312             :         /*
    1313             :          * Check for pages which are in the process of being freed.  Without
    1314             :          * page_mapping() set, hugetlbfs specific move page routine will not
    1315             :          * be called and we could leak usage counts for subpools.
    1316             :          */
    1317             :         if (page_private(hpage) && !page_mapping(hpage)) {
    1318             :                 rc = -EBUSY;
    1319             :                 goto out_unlock;
    1320             :         }
    1321             : 
    1322             :         if (PageAnon(hpage))
    1323             :                 anon_vma = page_get_anon_vma(hpage);
    1324             : 
    1325             :         if (unlikely(!trylock_page(new_hpage)))
    1326             :                 goto put_anon;
    1327             : 
    1328             :         if (page_mapped(hpage)) {
    1329             :                 bool mapping_locked = false;
    1330             :                 enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK;
    1331             : 
    1332             :                 if (!PageAnon(hpage)) {
    1333             :                         /*
    1334             :                          * In shared mappings, try_to_unmap could potentially
    1335             :                          * call huge_pmd_unshare.  Because of this, take
    1336             :                          * semaphore in write mode here and set TTU_RMAP_LOCKED
    1337             :                          * to let lower levels know we have taken the lock.
    1338             :                          */
    1339             :                         mapping = hugetlb_page_mapping_lock_write(hpage);
    1340             :                         if (unlikely(!mapping))
    1341             :                                 goto unlock_put_anon;
    1342             : 
    1343             :                         mapping_locked = true;
    1344             :                         ttu |= TTU_RMAP_LOCKED;
    1345             :                 }
    1346             : 
    1347             :                 try_to_unmap(hpage, ttu);
    1348             :                 page_was_mapped = 1;
    1349             : 
    1350             :                 if (mapping_locked)
    1351             :                         i_mmap_unlock_write(mapping);
    1352             :         }
    1353             : 
    1354             :         if (!page_mapped(hpage))
    1355             :                 rc = move_to_new_page(new_hpage, hpage, mode);
    1356             : 
    1357             :         if (page_was_mapped)
    1358             :                 remove_migration_ptes(hpage,
    1359             :                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
    1360             : 
    1361             : unlock_put_anon:
    1362             :         unlock_page(new_hpage);
    1363             : 
    1364             : put_anon:
    1365             :         if (anon_vma)
    1366             :                 put_anon_vma(anon_vma);
    1367             : 
    1368             :         if (rc == MIGRATEPAGE_SUCCESS) {
    1369             :                 move_hugetlb_state(hpage, new_hpage, reason);
    1370             :                 put_new_page = NULL;
    1371             :         }
    1372             : 
    1373             : out_unlock:
    1374             :         unlock_page(hpage);
    1375             : out:
    1376             :         if (rc == MIGRATEPAGE_SUCCESS)
    1377             :                 putback_active_hugepage(hpage);
    1378             :         else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS)
    1379             :                 list_move_tail(&hpage->lru, ret);
    1380             : 
    1381             :         /*
    1382             :          * If migration was not successful and there's a freeing callback, use
    1383             :          * it.  Otherwise, put_page() will drop the reference grabbed during
    1384             :          * isolation.
    1385             :          */
    1386             :         if (put_new_page)
    1387             :                 put_new_page(new_hpage, private);
    1388             :         else
    1389             :                 putback_active_hugepage(new_hpage);
    1390             : 
    1391             :         return rc;
    1392             : }
    1393             : 
    1394           0 : static inline int try_split_thp(struct page *page, struct page **page2,
    1395             :                                 struct list_head *from)
    1396             : {
    1397           0 :         int rc = 0;
    1398             : 
    1399           0 :         lock_page(page);
    1400           0 :         rc = split_huge_page_to_list(page, from);
    1401           0 :         unlock_page(page);
    1402           0 :         if (!rc)
    1403           0 :                 list_safe_reset_next(page, *page2, lru);
    1404             : 
    1405           0 :         return rc;
    1406             : }
    1407             : 
    1408             : /*
    1409             :  * migrate_pages - migrate the pages specified in a list, to the free pages
    1410             :  *                 supplied as the target for the page migration
    1411             :  *
    1412             :  * @from:               The list of pages to be migrated.
    1413             :  * @get_new_page:       The function used to allocate free pages to be used
    1414             :  *                      as the target of the page migration.
    1415             :  * @put_new_page:       The function used to free target pages if migration
    1416             :  *                      fails, or NULL if no special handling is necessary.
    1417             :  * @private:            Private data to be passed on to get_new_page()
    1418             :  * @mode:               The migration mode that specifies the constraints for
    1419             :  *                      page migration, if any.
    1420             :  * @reason:             The reason for page migration.
    1421             :  *
    1422             :  * The function returns after 10 attempts or if no pages are movable any more
    1423             :  * because the list has become empty or no retryable pages exist any more.
    1424             :  * It is caller's responsibility to call putback_movable_pages() to return pages
    1425             :  * to the LRU or free list only if ret != 0.
    1426             :  *
    1427             :  * Returns the number of pages that were not migrated, or an error code.
    1428             :  */
    1429           0 : int migrate_pages(struct list_head *from, new_page_t get_new_page,
    1430             :                 free_page_t put_new_page, unsigned long private,
    1431             :                 enum migrate_mode mode, int reason)
    1432             : {
    1433           0 :         int retry = 1;
    1434           0 :         int thp_retry = 1;
    1435           0 :         int nr_failed = 0;
    1436           0 :         int nr_succeeded = 0;
    1437           0 :         int nr_thp_succeeded = 0;
    1438           0 :         int nr_thp_failed = 0;
    1439           0 :         int nr_thp_split = 0;
    1440           0 :         int pass = 0;
    1441           0 :         bool is_thp = false;
    1442           0 :         struct page *page;
    1443           0 :         struct page *page2;
    1444           0 :         int swapwrite = current->flags & PF_SWAPWRITE;
    1445           0 :         int rc, nr_subpages;
    1446           0 :         LIST_HEAD(ret_pages);
    1447             : 
    1448           0 :         if (!swapwrite)
    1449           0 :                 current->flags |= PF_SWAPWRITE;
    1450             : 
    1451           0 :         for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
    1452           0 :                 retry = 0;
    1453           0 :                 thp_retry = 0;
    1454             : 
    1455           0 :                 list_for_each_entry_safe(page, page2, from, lru) {
    1456           0 : retry:
    1457             :                         /*
    1458             :                          * THP statistics is based on the source huge page.
    1459             :                          * Capture required information that might get lost
    1460             :                          * during migration.
    1461             :                          */
    1462           0 :                         is_thp = PageTransHuge(page) && !PageHuge(page);
    1463           0 :                         nr_subpages = thp_nr_pages(page);
    1464           0 :                         cond_resched();
    1465             : 
    1466           0 :                         if (PageHuge(page))
    1467             :                                 rc = unmap_and_move_huge_page(get_new_page,
    1468             :                                                 put_new_page, private, page,
    1469             :                                                 pass > 2, mode, reason,
    1470             :                                                 &ret_pages);
    1471             :                         else
    1472           0 :                                 rc = unmap_and_move(get_new_page, put_new_page,
    1473             :                                                 private, page, pass > 2, mode,
    1474             :                                                 reason, &ret_pages);
    1475             :                         /*
    1476             :                          * The rules are:
    1477             :                          *      Success: non hugetlb page will be freed, hugetlb
    1478             :                          *               page will be put back
    1479             :                          *      -EAGAIN: stay on the from list
    1480             :                          *      -ENOMEM: stay on the from list
    1481             :                          *      Other errno: put on ret_pages list then splice to
    1482             :                          *                   from list
    1483             :                          */
    1484           0 :                         switch(rc) {
    1485             :                         /*
    1486             :                          * THP migration might be unsupported or the
    1487             :                          * allocation could've failed so we should
    1488             :                          * retry on the same page with the THP split
    1489             :                          * to base pages.
    1490             :                          *
    1491             :                          * Head page is retried immediately and tail
    1492             :                          * pages are added to the tail of the list so
    1493             :                          * we encounter them after the rest of the list
    1494             :                          * is processed.
    1495             :                          */
    1496           0 :                         case -ENOSYS:
    1497             :                                 /* THP migration is unsupported */
    1498           0 :                                 if (is_thp) {
    1499           0 :                                         if (!try_split_thp(page, &page2, from)) {
    1500           0 :                                                 nr_thp_split++;
    1501           0 :                                                 goto retry;
    1502             :                                         }
    1503             : 
    1504           0 :                                         nr_thp_failed++;
    1505           0 :                                         nr_failed += nr_subpages;
    1506           0 :                                         break;
    1507             :                                 }
    1508             : 
    1509             :                                 /* Hugetlb migration is unsupported */
    1510           0 :                                 nr_failed++;
    1511           0 :                                 break;
    1512           0 :                         case -ENOMEM:
    1513             :                                 /*
    1514             :                                  * When memory is low, don't bother to try to migrate
    1515             :                                  * other pages, just exit.
    1516             :                                  */
    1517           0 :                                 if (is_thp) {
    1518           0 :                                         if (!try_split_thp(page, &page2, from)) {
    1519           0 :                                                 nr_thp_split++;
    1520           0 :                                                 goto retry;
    1521             :                                         }
    1522             : 
    1523           0 :                                         nr_thp_failed++;
    1524           0 :                                         nr_failed += nr_subpages;
    1525           0 :                                         goto out;
    1526             :                                 }
    1527           0 :                                 nr_failed++;
    1528           0 :                                 goto out;
    1529           0 :                         case -EAGAIN:
    1530           0 :                                 if (is_thp) {
    1531           0 :                                         thp_retry++;
    1532           0 :                                         break;
    1533             :                                 }
    1534           0 :                                 retry++;
    1535           0 :                                 break;
    1536           0 :                         case MIGRATEPAGE_SUCCESS:
    1537           0 :                                 if (is_thp) {
    1538           0 :                                         nr_thp_succeeded++;
    1539           0 :                                         nr_succeeded += nr_subpages;
    1540           0 :                                         break;
    1541             :                                 }
    1542           0 :                                 nr_succeeded++;
    1543           0 :                                 break;
    1544           0 :                         default:
    1545             :                                 /*
    1546             :                                  * Permanent failure (-EBUSY, etc.):
    1547             :                                  * unlike -EAGAIN case, the failed page is
    1548             :                                  * removed from migration page list and not
    1549             :                                  * retried in the next outer loop.
    1550             :                                  */
    1551           0 :                                 if (is_thp) {
    1552           0 :                                         nr_thp_failed++;
    1553           0 :                                         nr_failed += nr_subpages;
    1554           0 :                                         break;
    1555             :                                 }
    1556           0 :                                 nr_failed++;
    1557           0 :                                 break;
    1558             :                         }
    1559             :                 }
    1560             :         }
    1561           0 :         nr_failed += retry + thp_retry;
    1562           0 :         nr_thp_failed += thp_retry;
    1563           0 :         rc = nr_failed;
    1564           0 : out:
    1565             :         /*
    1566             :          * Put the permanent failure page back to migration list, they
    1567             :          * will be put back to the right list by the caller.
    1568             :          */
    1569           0 :         list_splice(&ret_pages, from);
    1570             : 
    1571           0 :         count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
    1572           0 :         count_vm_events(PGMIGRATE_FAIL, nr_failed);
    1573           0 :         count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
    1574           0 :         count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
    1575           0 :         count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
    1576           0 :         trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
    1577             :                                nr_thp_failed, nr_thp_split, mode, reason);
    1578             : 
    1579           0 :         if (!swapwrite)
    1580           0 :                 current->flags &= ~PF_SWAPWRITE;
    1581             : 
    1582           0 :         return rc;
    1583             : }
    1584             : 
    1585           0 : struct page *alloc_migration_target(struct page *page, unsigned long private)
    1586             : {
    1587           0 :         struct migration_target_control *mtc;
    1588           0 :         gfp_t gfp_mask;
    1589           0 :         unsigned int order = 0;
    1590           0 :         struct page *new_page = NULL;
    1591           0 :         int nid;
    1592           0 :         int zidx;
    1593             : 
    1594           0 :         mtc = (struct migration_target_control *)private;
    1595           0 :         gfp_mask = mtc->gfp_mask;
    1596           0 :         nid = mtc->nid;
    1597           0 :         if (nid == NUMA_NO_NODE)
    1598           0 :                 nid = page_to_nid(page);
    1599             : 
    1600           0 :         if (PageHuge(page)) {
    1601             :                 struct hstate *h = page_hstate(compound_head(page));
    1602             : 
    1603             :                 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
    1604             :                 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
    1605             :         }
    1606             : 
    1607           0 :         if (PageTransHuge(page)) {
    1608             :                 /*
    1609             :                  * clear __GFP_RECLAIM to make the migration callback
    1610             :                  * consistent with regular THP allocations.
    1611             :                  */
    1612           0 :                 gfp_mask &= ~__GFP_RECLAIM;
    1613           0 :                 gfp_mask |= GFP_TRANSHUGE;
    1614           0 :                 order = HPAGE_PMD_ORDER;
    1615             :         }
    1616           0 :         zidx = zone_idx(page_zone(page));
    1617           0 :         if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
    1618           0 :                 gfp_mask |= __GFP_HIGHMEM;
    1619             : 
    1620           0 :         new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
    1621             : 
    1622           0 :         if (new_page && PageTransHuge(new_page))
    1623           0 :                 prep_transhuge_page(new_page);
    1624             : 
    1625           0 :         return new_page;
    1626             : }
    1627             : 
    1628             : #ifdef CONFIG_NUMA
    1629             : 
    1630           0 : static int store_status(int __user *status, int start, int value, int nr)
    1631             : {
    1632           0 :         while (nr-- > 0) {
    1633           0 :                 if (put_user(value, status + start))
    1634             :                         return -EFAULT;
    1635           0 :                 start++;
    1636             :         }
    1637             : 
    1638             :         return 0;
    1639             : }
    1640             : 
    1641           0 : static int do_move_pages_to_node(struct mm_struct *mm,
    1642             :                 struct list_head *pagelist, int node)
    1643             : {
    1644           0 :         int err;
    1645           0 :         struct migration_target_control mtc = {
    1646             :                 .nid = node,
    1647             :                 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
    1648             :         };
    1649             : 
    1650           0 :         err = migrate_pages(pagelist, alloc_migration_target, NULL,
    1651             :                         (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
    1652           0 :         if (err)
    1653           0 :                 putback_movable_pages(pagelist);
    1654           0 :         return err;
    1655             : }
    1656             : 
    1657             : /*
    1658             :  * Resolves the given address to a struct page, isolates it from the LRU and
    1659             :  * puts it to the given pagelist.
    1660             :  * Returns:
    1661             :  *     errno - if the page cannot be found/isolated
    1662             :  *     0 - when it doesn't have to be migrated because it is already on the
    1663             :  *         target node
    1664             :  *     1 - when it has been queued
    1665             :  */
    1666           0 : static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
    1667             :                 int node, struct list_head *pagelist, bool migrate_all)
    1668             : {
    1669           0 :         struct vm_area_struct *vma;
    1670           0 :         struct page *page;
    1671           0 :         unsigned int follflags;
    1672           0 :         int err;
    1673             : 
    1674           0 :         mmap_read_lock(mm);
    1675           0 :         err = -EFAULT;
    1676           0 :         vma = find_vma(mm, addr);
    1677           0 :         if (!vma || addr < vma->vm_start || !vma_migratable(vma))
    1678           0 :                 goto out;
    1679             : 
    1680             :         /* FOLL_DUMP to ignore special (like zero) pages */
    1681           0 :         follflags = FOLL_GET | FOLL_DUMP;
    1682           0 :         page = follow_page(vma, addr, follflags);
    1683             : 
    1684           0 :         err = PTR_ERR(page);
    1685           0 :         if (IS_ERR(page))
    1686           0 :                 goto out;
    1687             : 
    1688           0 :         err = -ENOENT;
    1689           0 :         if (!page)
    1690           0 :                 goto out;
    1691             : 
    1692           0 :         err = 0;
    1693           0 :         if (page_to_nid(page) == node)
    1694           0 :                 goto out_putpage;
    1695             : 
    1696           0 :         err = -EACCES;
    1697           0 :         if (page_mapcount(page) > 1 && !migrate_all)
    1698           0 :                 goto out_putpage;
    1699             : 
    1700           0 :         if (PageHuge(page)) {
    1701             :                 if (PageHead(page)) {
    1702             :                         isolate_huge_page(page, pagelist);
    1703             :                         err = 1;
    1704             :                 }
    1705             :         } else {
    1706           0 :                 struct page *head;
    1707             : 
    1708           0 :                 head = compound_head(page);
    1709           0 :                 err = isolate_lru_page(head);
    1710           0 :                 if (err)
    1711           0 :                         goto out_putpage;
    1712             : 
    1713           0 :                 err = 1;
    1714           0 :                 list_add_tail(&head->lru, pagelist);
    1715           0 :                 mod_node_page_state(page_pgdat(head),
    1716           0 :                         NR_ISOLATED_ANON + page_is_file_lru(head),
    1717           0 :                         thp_nr_pages(head));
    1718             :         }
    1719           0 : out_putpage:
    1720             :         /*
    1721             :          * Either remove the duplicate refcount from
    1722             :          * isolate_lru_page() or drop the page ref if it was
    1723             :          * not isolated.
    1724             :          */
    1725           0 :         put_page(page);
    1726           0 : out:
    1727           0 :         mmap_read_unlock(mm);
    1728           0 :         return err;
    1729             : }
    1730             : 
    1731           0 : static int move_pages_and_store_status(struct mm_struct *mm, int node,
    1732             :                 struct list_head *pagelist, int __user *status,
    1733             :                 int start, int i, unsigned long nr_pages)
    1734             : {
    1735           0 :         int err;
    1736             : 
    1737           0 :         if (list_empty(pagelist))
    1738             :                 return 0;
    1739             : 
    1740           0 :         err = do_move_pages_to_node(mm, pagelist, node);
    1741           0 :         if (err) {
    1742             :                 /*
    1743             :                  * Positive err means the number of failed
    1744             :                  * pages to migrate.  Since we are going to
    1745             :                  * abort and return the number of non-migrated
    1746             :                  * pages, so need to include the rest of the
    1747             :                  * nr_pages that have not been attempted as
    1748             :                  * well.
    1749             :                  */
    1750           0 :                 if (err > 0)
    1751           0 :                         err += nr_pages - i - 1;
    1752           0 :                 return err;
    1753             :         }
    1754           0 :         return store_status(status, start, node, i - start);
    1755             : }
    1756             : 
    1757             : /*
    1758             :  * Migrate an array of page address onto an array of nodes and fill
    1759             :  * the corresponding array of status.
    1760             :  */
    1761           0 : static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
    1762             :                          unsigned long nr_pages,
    1763             :                          const void __user * __user *pages,
    1764             :                          const int __user *nodes,
    1765             :                          int __user *status, int flags)
    1766             : {
    1767           0 :         int current_node = NUMA_NO_NODE;
    1768           0 :         LIST_HEAD(pagelist);
    1769           0 :         int start, i;
    1770           0 :         int err = 0, err1;
    1771             : 
    1772           0 :         migrate_prep();
    1773             : 
    1774           0 :         for (i = start = 0; i < nr_pages; i++) {
    1775           0 :                 const void __user *p;
    1776           0 :                 unsigned long addr;
    1777           0 :                 int node;
    1778             : 
    1779           0 :                 err = -EFAULT;
    1780           0 :                 if (get_user(p, pages + i))
    1781           0 :                         goto out_flush;
    1782           0 :                 if (get_user(node, nodes + i))
    1783           0 :                         goto out_flush;
    1784           0 :                 addr = (unsigned long)untagged_addr(p);
    1785             : 
    1786           0 :                 err = -ENODEV;
    1787           0 :                 if (node < 0 || node >= MAX_NUMNODES)
    1788           0 :                         goto out_flush;
    1789           0 :                 if (!node_state(node, N_MEMORY))
    1790           0 :                         goto out_flush;
    1791             : 
    1792           0 :                 err = -EACCES;
    1793           0 :                 if (!node_isset(node, task_nodes))
    1794           0 :                         goto out_flush;
    1795             : 
    1796           0 :                 if (current_node == NUMA_NO_NODE) {
    1797             :                         current_node = node;
    1798             :                         start = i;
    1799           0 :                 } else if (node != current_node) {
    1800           0 :                         err = move_pages_and_store_status(mm, current_node,
    1801             :                                         &pagelist, status, start, i, nr_pages);
    1802           0 :                         if (err)
    1803           0 :                                 goto out;
    1804             :                         start = i;
    1805             :                         current_node = node;
    1806             :                 }
    1807             : 
    1808             :                 /*
    1809             :                  * Errors in the page lookup or isolation are not fatal and we simply
    1810             :                  * report them via status
    1811             :                  */
    1812           0 :                 err = add_page_for_migration(mm, addr, current_node,
    1813           0 :                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
    1814             : 
    1815           0 :                 if (err > 0) {
    1816             :                         /* The page is successfully queued for migration */
    1817           0 :                         continue;
    1818             :                 }
    1819             : 
    1820             :                 /*
    1821             :                  * If the page is already on the target node (!err), store the
    1822             :                  * node, otherwise, store the err.
    1823             :                  */
    1824           0 :                 err = store_status(status, i, err ? : current_node, 1);
    1825           0 :                 if (err)
    1826           0 :                         goto out_flush;
    1827             : 
    1828           0 :                 err = move_pages_and_store_status(mm, current_node, &pagelist,
    1829             :                                 status, start, i, nr_pages);
    1830           0 :                 if (err)
    1831           0 :                         goto out;
    1832             :                 current_node = NUMA_NO_NODE;
    1833             :         }
    1834           0 : out_flush:
    1835             :         /* Make sure we do not overwrite the existing error */
    1836           0 :         err1 = move_pages_and_store_status(mm, current_node, &pagelist,
    1837             :                                 status, start, i, nr_pages);
    1838           0 :         if (err >= 0)
    1839           0 :                 err = err1;
    1840           0 : out:
    1841           0 :         return err;
    1842             : }
    1843             : 
    1844             : /*
    1845             :  * Determine the nodes of an array of pages and store it in an array of status.
    1846             :  */
    1847           0 : static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
    1848             :                                 const void __user **pages, int *status)
    1849             : {
    1850           0 :         unsigned long i;
    1851             : 
    1852           0 :         mmap_read_lock(mm);
    1853             : 
    1854           0 :         for (i = 0; i < nr_pages; i++) {
    1855           0 :                 unsigned long addr = (unsigned long)(*pages);
    1856           0 :                 struct vm_area_struct *vma;
    1857           0 :                 struct page *page;
    1858           0 :                 int err = -EFAULT;
    1859             : 
    1860           0 :                 vma = find_vma(mm, addr);
    1861           0 :                 if (!vma || addr < vma->vm_start)
    1862           0 :                         goto set_status;
    1863             : 
    1864             :                 /* FOLL_DUMP to ignore special (like zero) pages */
    1865           0 :                 page = follow_page(vma, addr, FOLL_DUMP);
    1866             : 
    1867           0 :                 err = PTR_ERR(page);
    1868           0 :                 if (IS_ERR(page))
    1869           0 :                         goto set_status;
    1870             : 
    1871           0 :                 err = page ? page_to_nid(page) : -ENOENT;
    1872           0 : set_status:
    1873           0 :                 *status = err;
    1874             : 
    1875           0 :                 pages++;
    1876           0 :                 status++;
    1877             :         }
    1878             : 
    1879           0 :         mmap_read_unlock(mm);
    1880           0 : }
    1881             : 
    1882             : /*
    1883             :  * Determine the nodes of a user array of pages and store it in
    1884             :  * a user array of status.
    1885             :  */
    1886           0 : static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
    1887             :                          const void __user * __user *pages,
    1888             :                          int __user *status)
    1889             : {
    1890             : #define DO_PAGES_STAT_CHUNK_NR 16
    1891           0 :         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
    1892           0 :         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
    1893             : 
    1894           0 :         while (nr_pages) {
    1895           0 :                 unsigned long chunk_nr;
    1896             : 
    1897           0 :                 chunk_nr = nr_pages;
    1898           0 :                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
    1899             :                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
    1900             : 
    1901           0 :                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
    1902             :                         break;
    1903             : 
    1904           0 :                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
    1905             : 
    1906           0 :                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
    1907             :                         break;
    1908             : 
    1909           0 :                 pages += chunk_nr;
    1910           0 :                 status += chunk_nr;
    1911           0 :                 nr_pages -= chunk_nr;
    1912             :         }
    1913           0 :         return nr_pages ? -EFAULT : 0;
    1914             : }
    1915             : 
    1916           0 : static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
    1917             : {
    1918           0 :         struct task_struct *task;
    1919           0 :         struct mm_struct *mm;
    1920             : 
    1921             :         /*
    1922             :          * There is no need to check if current process has the right to modify
    1923             :          * the specified process when they are same.
    1924             :          */
    1925           0 :         if (!pid) {
    1926           0 :                 mmget(current->mm);
    1927           0 :                 *mem_nodes = cpuset_mems_allowed(current);
    1928           0 :                 return current->mm;
    1929             :         }
    1930             : 
    1931             :         /* Find the mm_struct */
    1932           0 :         rcu_read_lock();
    1933           0 :         task = find_task_by_vpid(pid);
    1934           0 :         if (!task) {
    1935           0 :                 rcu_read_unlock();
    1936           0 :                 return ERR_PTR(-ESRCH);
    1937             :         }
    1938           0 :         get_task_struct(task);
    1939             : 
    1940             :         /*
    1941             :          * Check if this process has the right to modify the specified
    1942             :          * process. Use the regular "ptrace_may_access()" checks.
    1943             :          */
    1944           0 :         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
    1945           0 :                 rcu_read_unlock();
    1946           0 :                 mm = ERR_PTR(-EPERM);
    1947           0 :                 goto out;
    1948             :         }
    1949           0 :         rcu_read_unlock();
    1950             : 
    1951           0 :         mm = ERR_PTR(security_task_movememory(task));
    1952           0 :         if (IS_ERR(mm))
    1953           0 :                 goto out;
    1954           0 :         *mem_nodes = cpuset_mems_allowed(task);
    1955           0 :         mm = get_task_mm(task);
    1956           0 : out:
    1957           0 :         put_task_struct(task);
    1958           0 :         if (!mm)
    1959           0 :                 mm = ERR_PTR(-EINVAL);
    1960             :         return mm;
    1961             : }
    1962             : 
    1963             : /*
    1964             :  * Move a list of pages in the address space of the currently executing
    1965             :  * process.
    1966             :  */
    1967           0 : static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
    1968             :                              const void __user * __user *pages,
    1969             :                              const int __user *nodes,
    1970             :                              int __user *status, int flags)
    1971             : {
    1972           0 :         struct mm_struct *mm;
    1973           0 :         int err;
    1974           0 :         nodemask_t task_nodes;
    1975             : 
    1976             :         /* Check flags */
    1977           0 :         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
    1978             :                 return -EINVAL;
    1979             : 
    1980           0 :         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
    1981             :                 return -EPERM;
    1982             : 
    1983           0 :         mm = find_mm_struct(pid, &task_nodes);
    1984           0 :         if (IS_ERR(mm))
    1985           0 :                 return PTR_ERR(mm);
    1986             : 
    1987           0 :         if (nodes)
    1988           0 :                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
    1989             :                                     nodes, status, flags);
    1990             :         else
    1991           0 :                 err = do_pages_stat(mm, nr_pages, pages, status);
    1992             : 
    1993           0 :         mmput(mm);
    1994           0 :         return err;
    1995             : }
    1996             : 
    1997           0 : SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
    1998             :                 const void __user * __user *, pages,
    1999             :                 const int __user *, nodes,
    2000             :                 int __user *, status, int, flags)
    2001             : {
    2002           0 :         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
    2003             : }
    2004             : 
    2005             : #ifdef CONFIG_COMPAT
    2006           0 : COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
    2007             :                        compat_uptr_t __user *, pages32,
    2008             :                        const int __user *, nodes,
    2009             :                        int __user *, status,
    2010             :                        int, flags)
    2011             : {
    2012           0 :         const void __user * __user *pages;
    2013           0 :         int i;
    2014             : 
    2015           0 :         pages = compat_alloc_user_space(nr_pages * sizeof(void *));
    2016           0 :         for (i = 0; i < nr_pages; i++) {
    2017           0 :                 compat_uptr_t p;
    2018             : 
    2019           0 :                 if (get_user(p, pages32 + i) ||
    2020           0 :                         put_user(compat_ptr(p), pages + i))
    2021           0 :                         return -EFAULT;
    2022             :         }
    2023           0 :         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
    2024             : }
    2025             : #endif /* CONFIG_COMPAT */
    2026             : 
    2027             : #ifdef CONFIG_NUMA_BALANCING
    2028             : /*
    2029             :  * Returns true if this is a safe migration target node for misplaced NUMA
    2030             :  * pages. Currently it only checks the watermarks which crude
    2031             :  */
    2032             : static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
    2033             :                                    unsigned long nr_migrate_pages)
    2034             : {
    2035             :         int z;
    2036             : 
    2037             :         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
    2038             :                 struct zone *zone = pgdat->node_zones + z;
    2039             : 
    2040             :                 if (!populated_zone(zone))
    2041             :                         continue;
    2042             : 
    2043             :                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
    2044             :                 if (!zone_watermark_ok(zone, 0,
    2045             :                                        high_wmark_pages(zone) +
    2046             :                                        nr_migrate_pages,
    2047             :                                        ZONE_MOVABLE, 0))
    2048             :                         continue;
    2049             :                 return true;
    2050             :         }
    2051             :         return false;
    2052             : }
    2053             : 
    2054             : static struct page *alloc_misplaced_dst_page(struct page *page,
    2055             :                                            unsigned long data)
    2056             : {
    2057             :         int nid = (int) data;
    2058             :         struct page *newpage;
    2059             : 
    2060             :         newpage = __alloc_pages_node(nid,
    2061             :                                          (GFP_HIGHUSER_MOVABLE |
    2062             :                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
    2063             :                                           __GFP_NORETRY | __GFP_NOWARN) &
    2064             :                                          ~__GFP_RECLAIM, 0);
    2065             : 
    2066             :         return newpage;
    2067             : }
    2068             : 
    2069             : static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    2070             : {
    2071             :         int page_lru;
    2072             : 
    2073             :         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
    2074             : 
    2075             :         /* Avoid migrating to a node that is nearly full */
    2076             :         if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
    2077             :                 return 0;
    2078             : 
    2079             :         if (isolate_lru_page(page))
    2080             :                 return 0;
    2081             : 
    2082             :         /*
    2083             :          * migrate_misplaced_transhuge_page() skips page migration's usual
    2084             :          * check on page_count(), so we must do it here, now that the page
    2085             :          * has been isolated: a GUP pin, or any other pin, prevents migration.
    2086             :          * The expected page count is 3: 1 for page's mapcount and 1 for the
    2087             :          * caller's pin and 1 for the reference taken by isolate_lru_page().
    2088             :          */
    2089             :         if (PageTransHuge(page) && page_count(page) != 3) {
    2090             :                 putback_lru_page(page);
    2091             :                 return 0;
    2092             :         }
    2093             : 
    2094             :         page_lru = page_is_file_lru(page);
    2095             :         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
    2096             :                                 thp_nr_pages(page));
    2097             : 
    2098             :         /*
    2099             :          * Isolating the page has taken another reference, so the
    2100             :          * caller's reference can be safely dropped without the page
    2101             :          * disappearing underneath us during migration.
    2102             :          */
    2103             :         put_page(page);
    2104             :         return 1;
    2105             : }
    2106             : 
    2107             : bool pmd_trans_migrating(pmd_t pmd)
    2108             : {
    2109             :         struct page *page = pmd_page(pmd);
    2110             :         return PageLocked(page);
    2111             : }
    2112             : 
    2113             : static inline bool is_shared_exec_page(struct vm_area_struct *vma,
    2114             :                                        struct page *page)
    2115             : {
    2116             :         if (page_mapcount(page) != 1 &&
    2117             :             (page_is_file_lru(page) || vma_is_shmem(vma)) &&
    2118             :             (vma->vm_flags & VM_EXEC))
    2119             :                 return true;
    2120             : 
    2121             :         return false;
    2122             : }
    2123             : 
    2124             : /*
    2125             :  * Attempt to migrate a misplaced page to the specified destination
    2126             :  * node. Caller is expected to have an elevated reference count on
    2127             :  * the page that will be dropped by this function before returning.
    2128             :  */
    2129             : int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    2130             :                            int node)
    2131             : {
    2132             :         pg_data_t *pgdat = NODE_DATA(node);
    2133             :         int isolated;
    2134             :         int nr_remaining;
    2135             :         LIST_HEAD(migratepages);
    2136             : 
    2137             :         /*
    2138             :          * Don't migrate file pages that are mapped in multiple processes
    2139             :          * with execute permissions as they are probably shared libraries.
    2140             :          */
    2141             :         if (is_shared_exec_page(vma, page))
    2142             :                 goto out;
    2143             : 
    2144             :         /*
    2145             :          * Also do not migrate dirty pages as not all filesystems can move
    2146             :          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
    2147             :          */
    2148             :         if (page_is_file_lru(page) && PageDirty(page))
    2149             :                 goto out;
    2150             : 
    2151             :         isolated = numamigrate_isolate_page(pgdat, page);
    2152             :         if (!isolated)
    2153             :                 goto out;
    2154             : 
    2155             :         list_add(&page->lru, &migratepages);
    2156             :         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
    2157             :                                      NULL, node, MIGRATE_ASYNC,
    2158             :                                      MR_NUMA_MISPLACED);
    2159             :         if (nr_remaining) {
    2160             :                 if (!list_empty(&migratepages)) {
    2161             :                         list_del(&page->lru);
    2162             :                         dec_node_page_state(page, NR_ISOLATED_ANON +
    2163             :                                         page_is_file_lru(page));
    2164             :                         putback_lru_page(page);
    2165             :                 }
    2166             :                 isolated = 0;
    2167             :         } else
    2168             :                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
    2169             :         BUG_ON(!list_empty(&migratepages));
    2170             :         return isolated;
    2171             : 
    2172             : out:
    2173             :         put_page(page);
    2174             :         return 0;
    2175             : }
    2176             : #endif /* CONFIG_NUMA_BALANCING */
    2177             : 
    2178             : #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
    2179             : /*
    2180             :  * Migrates a THP to a given target node. page must be locked and is unlocked
    2181             :  * before returning.
    2182             :  */
    2183             : int migrate_misplaced_transhuge_page(struct mm_struct *mm,
    2184             :                                 struct vm_area_struct *vma,
    2185             :                                 pmd_t *pmd, pmd_t entry,
    2186             :                                 unsigned long address,
    2187             :                                 struct page *page, int node)
    2188             : {
    2189             :         spinlock_t *ptl;
    2190             :         pg_data_t *pgdat = NODE_DATA(node);
    2191             :         int isolated = 0;
    2192             :         struct page *new_page = NULL;
    2193             :         int page_lru = page_is_file_lru(page);
    2194             :         unsigned long start = address & HPAGE_PMD_MASK;
    2195             : 
    2196             :         if (is_shared_exec_page(vma, page))
    2197             :                 goto out;
    2198             : 
    2199             :         new_page = alloc_pages_node(node,
    2200             :                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
    2201             :                 HPAGE_PMD_ORDER);
    2202             :         if (!new_page)
    2203             :                 goto out_fail;
    2204             :         prep_transhuge_page(new_page);
    2205             : 
    2206             :         isolated = numamigrate_isolate_page(pgdat, page);
    2207             :         if (!isolated) {
    2208             :                 put_page(new_page);
    2209             :                 goto out_fail;
    2210             :         }
    2211             : 
    2212             :         /* Prepare a page as a migration target */
    2213             :         __SetPageLocked(new_page);
    2214             :         if (PageSwapBacked(page))
    2215             :                 __SetPageSwapBacked(new_page);
    2216             : 
    2217             :         /* anon mapping, we can simply copy page->mapping to the new page: */
    2218             :         new_page->mapping = page->mapping;
    2219             :         new_page->index = page->index;
    2220             :         /* flush the cache before copying using the kernel virtual address */
    2221             :         flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
    2222             :         migrate_page_copy(new_page, page);
    2223             :         WARN_ON(PageLRU(new_page));
    2224             : 
    2225             :         /* Recheck the target PMD */
    2226             :         ptl = pmd_lock(mm, pmd);
    2227             :         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
    2228             :                 spin_unlock(ptl);
    2229             : 
    2230             :                 /* Reverse changes made by migrate_page_copy() */
    2231             :                 if (TestClearPageActive(new_page))
    2232             :                         SetPageActive(page);
    2233             :                 if (TestClearPageUnevictable(new_page))
    2234             :                         SetPageUnevictable(page);
    2235             : 
    2236             :                 unlock_page(new_page);
    2237             :                 put_page(new_page);             /* Free it */
    2238             : 
    2239             :                 /* Retake the callers reference and putback on LRU */
    2240             :                 get_page(page);
    2241             :                 putback_lru_page(page);
    2242             :                 mod_node_page_state(page_pgdat(page),
    2243             :                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
    2244             : 
    2245             :                 goto out_unlock;
    2246             :         }
    2247             : 
    2248             :         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
    2249             :         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
    2250             : 
    2251             :         /*
    2252             :          * Overwrite the old entry under pagetable lock and establish
    2253             :          * the new PTE. Any parallel GUP will either observe the old
    2254             :          * page blocking on the page lock, block on the page table
    2255             :          * lock or observe the new page. The SetPageUptodate on the
    2256             :          * new page and page_add_new_anon_rmap guarantee the copy is
    2257             :          * visible before the pagetable update.
    2258             :          */
    2259             :         page_add_anon_rmap(new_page, vma, start, true);
    2260             :         /*
    2261             :          * At this point the pmd is numa/protnone (i.e. non present) and the TLB
    2262             :          * has already been flushed globally.  So no TLB can be currently
    2263             :          * caching this non present pmd mapping.  There's no need to clear the
    2264             :          * pmd before doing set_pmd_at(), nor to flush the TLB after
    2265             :          * set_pmd_at().  Clearing the pmd here would introduce a race
    2266             :          * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
    2267             :          * mmap_lock for reading.  If the pmd is set to NULL at any given time,
    2268             :          * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
    2269             :          * pmd.
    2270             :          */
    2271             :         set_pmd_at(mm, start, pmd, entry);
    2272             :         update_mmu_cache_pmd(vma, address, &entry);
    2273             : 
    2274             :         page_ref_unfreeze(page, 2);
    2275             :         mlock_migrate_page(new_page, page);
    2276             :         page_remove_rmap(page, true);
    2277             :         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
    2278             : 
    2279             :         spin_unlock(ptl);
    2280             : 
    2281             :         /* Take an "isolate" reference and put new page on the LRU. */
    2282             :         get_page(new_page);
    2283             :         putback_lru_page(new_page);
    2284             : 
    2285             :         unlock_page(new_page);
    2286             :         unlock_page(page);
    2287             :         put_page(page);                 /* Drop the rmap reference */
    2288             :         put_page(page);                 /* Drop the LRU isolation reference */
    2289             : 
    2290             :         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
    2291             :         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
    2292             : 
    2293             :         mod_node_page_state(page_pgdat(page),
    2294             :                         NR_ISOLATED_ANON + page_lru,
    2295             :                         -HPAGE_PMD_NR);
    2296             :         return isolated;
    2297             : 
    2298             : out_fail:
    2299             :         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
    2300             :         ptl = pmd_lock(mm, pmd);
    2301             :         if (pmd_same(*pmd, entry)) {
    2302             :                 entry = pmd_modify(entry, vma->vm_page_prot);
    2303             :                 set_pmd_at(mm, start, pmd, entry);
    2304             :                 update_mmu_cache_pmd(vma, address, &entry);
    2305             :         }
    2306             :         spin_unlock(ptl);
    2307             : 
    2308             : out_unlock:
    2309             :         unlock_page(page);
    2310             : out:
    2311             :         put_page(page);
    2312             :         return 0;
    2313             : }
    2314             : #endif /* CONFIG_NUMA_BALANCING */
    2315             : 
    2316             : #endif /* CONFIG_NUMA */
    2317             : 
    2318             : #ifdef CONFIG_DEVICE_PRIVATE
    2319             : static int migrate_vma_collect_hole(unsigned long start,
    2320             :                                     unsigned long end,
    2321             :                                     __always_unused int depth,
    2322             :                                     struct mm_walk *walk)
    2323             : {
    2324             :         struct migrate_vma *migrate = walk->private;
    2325             :         unsigned long addr;
    2326             : 
    2327             :         /* Only allow populating anonymous memory. */
    2328             :         if (!vma_is_anonymous(walk->vma)) {
    2329             :                 for (addr = start; addr < end; addr += PAGE_SIZE) {
    2330             :                         migrate->src[migrate->npages] = 0;
    2331             :                         migrate->dst[migrate->npages] = 0;
    2332             :                         migrate->npages++;
    2333             :                 }
    2334             :                 return 0;
    2335             :         }
    2336             : 
    2337             :         for (addr = start; addr < end; addr += PAGE_SIZE) {
    2338             :                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
    2339             :                 migrate->dst[migrate->npages] = 0;
    2340             :                 migrate->npages++;
    2341             :                 migrate->cpages++;
    2342             :         }
    2343             : 
    2344             :         return 0;
    2345             : }
    2346             : 
    2347             : static int migrate_vma_collect_skip(unsigned long start,
    2348             :                                     unsigned long end,
    2349             :                                     struct mm_walk *walk)
    2350             : {
    2351             :         struct migrate_vma *migrate = walk->private;
    2352             :         unsigned long addr;
    2353             : 
    2354             :         for (addr = start; addr < end; addr += PAGE_SIZE) {
    2355             :                 migrate->dst[migrate->npages] = 0;
    2356             :                 migrate->src[migrate->npages++] = 0;
    2357             :         }
    2358             : 
    2359             :         return 0;
    2360             : }
    2361             : 
    2362             : static int migrate_vma_collect_pmd(pmd_t *pmdp,
    2363             :                                    unsigned long start,
    2364             :                                    unsigned long end,
    2365             :                                    struct mm_walk *walk)
    2366             : {
    2367             :         struct migrate_vma *migrate = walk->private;
    2368             :         struct vm_area_struct *vma = walk->vma;
    2369             :         struct mm_struct *mm = vma->vm_mm;
    2370             :         unsigned long addr = start, unmapped = 0;
    2371             :         spinlock_t *ptl;
    2372             :         pte_t *ptep;
    2373             : 
    2374             : again:
    2375             :         if (pmd_none(*pmdp))
    2376             :                 return migrate_vma_collect_hole(start, end, -1, walk);
    2377             : 
    2378             :         if (pmd_trans_huge(*pmdp)) {
    2379             :                 struct page *page;
    2380             : 
    2381             :                 ptl = pmd_lock(mm, pmdp);
    2382             :                 if (unlikely(!pmd_trans_huge(*pmdp))) {
    2383             :                         spin_unlock(ptl);
    2384             :                         goto again;
    2385             :                 }
    2386             : 
    2387             :                 page = pmd_page(*pmdp);
    2388             :                 if (is_huge_zero_page(page)) {
    2389             :                         spin_unlock(ptl);
    2390             :                         split_huge_pmd(vma, pmdp, addr);
    2391             :                         if (pmd_trans_unstable(pmdp))
    2392             :                                 return migrate_vma_collect_skip(start, end,
    2393             :                                                                 walk);
    2394             :                 } else {
    2395             :                         int ret;
    2396             : 
    2397             :                         get_page(page);
    2398             :                         spin_unlock(ptl);
    2399             :                         if (unlikely(!trylock_page(page)))
    2400             :                                 return migrate_vma_collect_skip(start, end,
    2401             :                                                                 walk);
    2402             :                         ret = split_huge_page(page);
    2403             :                         unlock_page(page);
    2404             :                         put_page(page);
    2405             :                         if (ret)
    2406             :                                 return migrate_vma_collect_skip(start, end,
    2407             :                                                                 walk);
    2408             :                         if (pmd_none(*pmdp))
    2409             :                                 return migrate_vma_collect_hole(start, end, -1,
    2410             :                                                                 walk);
    2411             :                 }
    2412             :         }
    2413             : 
    2414             :         if (unlikely(pmd_bad(*pmdp)))
    2415             :                 return migrate_vma_collect_skip(start, end, walk);
    2416             : 
    2417             :         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
    2418             :         arch_enter_lazy_mmu_mode();
    2419             : 
    2420             :         for (; addr < end; addr += PAGE_SIZE, ptep++) {
    2421             :                 unsigned long mpfn = 0, pfn;
    2422             :                 struct page *page;
    2423             :                 swp_entry_t entry;
    2424             :                 pte_t pte;
    2425             : 
    2426             :                 pte = *ptep;
    2427             : 
    2428             :                 if (pte_none(pte)) {
    2429             :                         if (vma_is_anonymous(vma)) {
    2430             :                                 mpfn = MIGRATE_PFN_MIGRATE;
    2431             :                                 migrate->cpages++;
    2432             :                         }
    2433             :                         goto next;
    2434             :                 }
    2435             : 
    2436             :                 if (!pte_present(pte)) {
    2437             :                         /*
    2438             :                          * Only care about unaddressable device page special
    2439             :                          * page table entry. Other special swap entries are not
    2440             :                          * migratable, and we ignore regular swapped page.
    2441             :                          */
    2442             :                         entry = pte_to_swp_entry(pte);
    2443             :                         if (!is_device_private_entry(entry))
    2444             :                                 goto next;
    2445             : 
    2446             :                         page = device_private_entry_to_page(entry);
    2447             :                         if (!(migrate->flags &
    2448             :                                 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
    2449             :                             page->pgmap->owner != migrate->pgmap_owner)
    2450             :                                 goto next;
    2451             : 
    2452             :                         mpfn = migrate_pfn(page_to_pfn(page)) |
    2453             :                                         MIGRATE_PFN_MIGRATE;
    2454             :                         if (is_write_device_private_entry(entry))
    2455             :                                 mpfn |= MIGRATE_PFN_WRITE;
    2456             :                 } else {
    2457             :                         if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
    2458             :                                 goto next;
    2459             :                         pfn = pte_pfn(pte);
    2460             :                         if (is_zero_pfn(pfn)) {
    2461             :                                 mpfn = MIGRATE_PFN_MIGRATE;
    2462             :                                 migrate->cpages++;
    2463             :                                 goto next;
    2464             :                         }
    2465             :                         page = vm_normal_page(migrate->vma, addr, pte);
    2466             :                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
    2467             :                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
    2468             :                 }
    2469             : 
    2470             :                 /* FIXME support THP */
    2471             :                 if (!page || !page->mapping || PageTransCompound(page)) {
    2472             :                         mpfn = 0;
    2473             :                         goto next;
    2474             :                 }
    2475             : 
    2476             :                 /*
    2477             :                  * By getting a reference on the page we pin it and that blocks
    2478             :                  * any kind of migration. Side effect is that it "freezes" the
    2479             :                  * pte.
    2480             :                  *
    2481             :                  * We drop this reference after isolating the page from the lru
    2482             :                  * for non device page (device page are not on the lru and thus
    2483             :                  * can't be dropped from it).
    2484             :                  */
    2485             :                 get_page(page);
    2486             :                 migrate->cpages++;
    2487             : 
    2488             :                 /*
    2489             :                  * Optimize for the common case where page is only mapped once
    2490             :                  * in one process. If we can lock the page, then we can safely
    2491             :                  * set up a special migration page table entry now.
    2492             :                  */
    2493             :                 if (trylock_page(page)) {
    2494             :                         pte_t swp_pte;
    2495             : 
    2496             :                         mpfn |= MIGRATE_PFN_LOCKED;
    2497             :                         ptep_get_and_clear(mm, addr, ptep);
    2498             : 
    2499             :                         /* Setup special migration page table entry */
    2500             :                         entry = make_migration_entry(page, mpfn &
    2501             :                                                      MIGRATE_PFN_WRITE);
    2502             :                         swp_pte = swp_entry_to_pte(entry);
    2503             :                         if (pte_present(pte)) {
    2504             :                                 if (pte_soft_dirty(pte))
    2505             :                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
    2506             :                                 if (pte_uffd_wp(pte))
    2507             :                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
    2508             :                         } else {
    2509             :                                 if (pte_swp_soft_dirty(pte))
    2510             :                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
    2511             :                                 if (pte_swp_uffd_wp(pte))
    2512             :                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
    2513             :                         }
    2514             :                         set_pte_at(mm, addr, ptep, swp_pte);
    2515             : 
    2516             :                         /*
    2517             :                          * This is like regular unmap: we remove the rmap and
    2518             :                          * drop page refcount. Page won't be freed, as we took
    2519             :                          * a reference just above.
    2520             :                          */
    2521             :                         page_remove_rmap(page, false);
    2522             :                         put_page(page);
    2523             : 
    2524             :                         if (pte_present(pte))
    2525             :                                 unmapped++;
    2526             :                 }
    2527             : 
    2528             : next:
    2529             :                 migrate->dst[migrate->npages] = 0;
    2530             :                 migrate->src[migrate->npages++] = mpfn;
    2531             :         }
    2532             :         arch_leave_lazy_mmu_mode();
    2533             :         pte_unmap_unlock(ptep - 1, ptl);
    2534             : 
    2535             :         /* Only flush the TLB if we actually modified any entries */
    2536             :         if (unmapped)
    2537             :                 flush_tlb_range(walk->vma, start, end);
    2538             : 
    2539             :         return 0;
    2540             : }
    2541             : 
    2542             : static const struct mm_walk_ops migrate_vma_walk_ops = {
    2543             :         .pmd_entry              = migrate_vma_collect_pmd,
    2544             :         .pte_hole               = migrate_vma_collect_hole,
    2545             : };
    2546             : 
    2547             : /*
    2548             :  * migrate_vma_collect() - collect pages over a range of virtual addresses
    2549             :  * @migrate: migrate struct containing all migration information
    2550             :  *
    2551             :  * This will walk the CPU page table. For each virtual address backed by a
    2552             :  * valid page, it updates the src array and takes a reference on the page, in
    2553             :  * order to pin the page until we lock it and unmap it.
    2554             :  */
    2555             : static void migrate_vma_collect(struct migrate_vma *migrate)
    2556             : {
    2557             :         struct mmu_notifier_range range;
    2558             : 
    2559             :         /*
    2560             :          * Note that the pgmap_owner is passed to the mmu notifier callback so
    2561             :          * that the registered device driver can skip invalidating device
    2562             :          * private page mappings that won't be migrated.
    2563             :          */
    2564             :         mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
    2565             :                 migrate->vma->vm_mm, migrate->start, migrate->end,
    2566             :                 migrate->pgmap_owner);
    2567             :         mmu_notifier_invalidate_range_start(&range);
    2568             : 
    2569             :         walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
    2570             :                         &migrate_vma_walk_ops, migrate);
    2571             : 
    2572             :         mmu_notifier_invalidate_range_end(&range);
    2573             :         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
    2574             : }
    2575             : 
    2576             : /*
    2577             :  * migrate_vma_check_page() - check if page is pinned or not
    2578             :  * @page: struct page to check
    2579             :  *
    2580             :  * Pinned pages cannot be migrated. This is the same test as in
    2581             :  * migrate_page_move_mapping(), except that here we allow migration of a
    2582             :  * ZONE_DEVICE page.
    2583             :  */
    2584             : static bool migrate_vma_check_page(struct page *page)
    2585             : {
    2586             :         /*
    2587             :          * One extra ref because caller holds an extra reference, either from
    2588             :          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
    2589             :          * a device page.
    2590             :          */
    2591             :         int extra = 1;
    2592             : 
    2593             :         /*
    2594             :          * FIXME support THP (transparent huge page), it is bit more complex to
    2595             :          * check them than regular pages, because they can be mapped with a pmd
    2596             :          * or with a pte (split pte mapping).
    2597             :          */
    2598             :         if (PageCompound(page))
    2599             :                 return false;
    2600             : 
    2601             :         /* Page from ZONE_DEVICE have one extra reference */
    2602             :         if (is_zone_device_page(page)) {
    2603             :                 /*
    2604             :                  * Private page can never be pin as they have no valid pte and
    2605             :                  * GUP will fail for those. Yet if there is a pending migration
    2606             :                  * a thread might try to wait on the pte migration entry and
    2607             :                  * will bump the page reference count. Sadly there is no way to
    2608             :                  * differentiate a regular pin from migration wait. Hence to
    2609             :                  * avoid 2 racing thread trying to migrate back to CPU to enter
    2610             :                  * infinite loop (one stopping migration because the other is
    2611             :                  * waiting on pte migration entry). We always return true here.
    2612             :                  *
    2613             :                  * FIXME proper solution is to rework migration_entry_wait() so
    2614             :                  * it does not need to take a reference on page.
    2615             :                  */
    2616             :                 return is_device_private_page(page);
    2617             :         }
    2618             : 
    2619             :         /* For file back page */
    2620             :         if (page_mapping(page))
    2621             :                 extra += 1 + page_has_private(page);
    2622             : 
    2623             :         if ((page_count(page) - extra) > page_mapcount(page))
    2624             :                 return false;
    2625             : 
    2626             :         return true;
    2627             : }
    2628             : 
    2629             : /*
    2630             :  * migrate_vma_prepare() - lock pages and isolate them from the lru
    2631             :  * @migrate: migrate struct containing all migration information
    2632             :  *
    2633             :  * This locks pages that have been collected by migrate_vma_collect(). Once each
    2634             :  * page is locked it is isolated from the lru (for non-device pages). Finally,
    2635             :  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
    2636             :  * migrated by concurrent kernel threads.
    2637             :  */
    2638             : static void migrate_vma_prepare(struct migrate_vma *migrate)
    2639             : {
    2640             :         const unsigned long npages = migrate->npages;
    2641             :         const unsigned long start = migrate->start;
    2642             :         unsigned long addr, i, restore = 0;
    2643             :         bool allow_drain = true;
    2644             : 
    2645             :         lru_add_drain();
    2646             : 
    2647             :         for (i = 0; (i < npages) && migrate->cpages; i++) {
    2648             :                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
    2649             :                 bool remap = true;
    2650             : 
    2651             :                 if (!page)
    2652             :                         continue;
    2653             : 
    2654             :                 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
    2655             :                         /*
    2656             :                          * Because we are migrating several pages there can be
    2657             :                          * a deadlock between 2 concurrent migration where each
    2658             :                          * are waiting on each other page lock.
    2659             :                          *
    2660             :                          * Make migrate_vma() a best effort thing and backoff
    2661             :                          * for any page we can not lock right away.
    2662             :                          */
    2663             :                         if (!trylock_page(page)) {
    2664             :                                 migrate->src[i] = 0;
    2665             :                                 migrate->cpages--;
    2666             :                                 put_page(page);
    2667             :                                 continue;
    2668             :                         }
    2669             :                         remap = false;
    2670             :                         migrate->src[i] |= MIGRATE_PFN_LOCKED;
    2671             :                 }
    2672             : 
    2673             :                 /* ZONE_DEVICE pages are not on LRU */
    2674             :                 if (!is_zone_device_page(page)) {
    2675             :                         if (!PageLRU(page) && allow_drain) {
    2676             :                                 /* Drain CPU's pagevec */
    2677             :                                 lru_add_drain_all();
    2678             :                                 allow_drain = false;
    2679             :                         }
    2680             : 
    2681             :                         if (isolate_lru_page(page)) {
    2682             :                                 if (remap) {
    2683             :                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    2684             :                                         migrate->cpages--;
    2685             :                                         restore++;
    2686             :                                 } else {
    2687             :                                         migrate->src[i] = 0;
    2688             :                                         unlock_page(page);
    2689             :                                         migrate->cpages--;
    2690             :                                         put_page(page);
    2691             :                                 }
    2692             :                                 continue;
    2693             :                         }
    2694             : 
    2695             :                         /* Drop the reference we took in collect */
    2696             :                         put_page(page);
    2697             :                 }
    2698             : 
    2699             :                 if (!migrate_vma_check_page(page)) {
    2700             :                         if (remap) {
    2701             :                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    2702             :                                 migrate->cpages--;
    2703             :                                 restore++;
    2704             : 
    2705             :                                 if (!is_zone_device_page(page)) {
    2706             :                                         get_page(page);
    2707             :                                         putback_lru_page(page);
    2708             :                                 }
    2709             :                         } else {
    2710             :                                 migrate->src[i] = 0;
    2711             :                                 unlock_page(page);
    2712             :                                 migrate->cpages--;
    2713             : 
    2714             :                                 if (!is_zone_device_page(page))
    2715             :                                         putback_lru_page(page);
    2716             :                                 else
    2717             :                                         put_page(page);
    2718             :                         }
    2719             :                 }
    2720             :         }
    2721             : 
    2722             :         for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
    2723             :                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
    2724             : 
    2725             :                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
    2726             :                         continue;
    2727             : 
    2728             :                 remove_migration_pte(page, migrate->vma, addr, page);
    2729             : 
    2730             :                 migrate->src[i] = 0;
    2731             :                 unlock_page(page);
    2732             :                 put_page(page);
    2733             :                 restore--;
    2734             :         }
    2735             : }
    2736             : 
    2737             : /*
    2738             :  * migrate_vma_unmap() - replace page mapping with special migration pte entry
    2739             :  * @migrate: migrate struct containing all migration information
    2740             :  *
    2741             :  * Replace page mapping (CPU page table pte) with a special migration pte entry
    2742             :  * and check again if it has been pinned. Pinned pages are restored because we
    2743             :  * cannot migrate them.
    2744             :  *
    2745             :  * This is the last step before we call the device driver callback to allocate
    2746             :  * destination memory and copy contents of original page over to new page.
    2747             :  */
    2748             : static void migrate_vma_unmap(struct migrate_vma *migrate)
    2749             : {
    2750             :         int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK;
    2751             :         const unsigned long npages = migrate->npages;
    2752             :         const unsigned long start = migrate->start;
    2753             :         unsigned long addr, i, restore = 0;
    2754             : 
    2755             :         for (i = 0; i < npages; i++) {
    2756             :                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
    2757             : 
    2758             :                 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
    2759             :                         continue;
    2760             : 
    2761             :                 if (page_mapped(page)) {
    2762             :                         try_to_unmap(page, flags);
    2763             :                         if (page_mapped(page))
    2764             :                                 goto restore;
    2765             :                 }
    2766             : 
    2767             :                 if (migrate_vma_check_page(page))
    2768             :                         continue;
    2769             : 
    2770             : restore:
    2771             :                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    2772             :                 migrate->cpages--;
    2773             :                 restore++;
    2774             :         }
    2775             : 
    2776             :         for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
    2777             :                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
    2778             : 
    2779             :                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
    2780             :                         continue;
    2781             : 
    2782             :                 remove_migration_ptes(page, page, false);
    2783             : 
    2784             :                 migrate->src[i] = 0;
    2785             :                 unlock_page(page);
    2786             :                 restore--;
    2787             : 
    2788             :                 if (is_zone_device_page(page))
    2789             :                         put_page(page);
    2790             :                 else
    2791             :                         putback_lru_page(page);
    2792             :         }
    2793             : }
    2794             : 
    2795             : /**
    2796             :  * migrate_vma_setup() - prepare to migrate a range of memory
    2797             :  * @args: contains the vma, start, and pfns arrays for the migration
    2798             :  *
    2799             :  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
    2800             :  * without an error.
    2801             :  *
    2802             :  * Prepare to migrate a range of memory virtual address range by collecting all
    2803             :  * the pages backing each virtual address in the range, saving them inside the
    2804             :  * src array.  Then lock those pages and unmap them. Once the pages are locked
    2805             :  * and unmapped, check whether each page is pinned or not.  Pages that aren't
    2806             :  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
    2807             :  * corresponding src array entry.  Then restores any pages that are pinned, by
    2808             :  * remapping and unlocking those pages.
    2809             :  *
    2810             :  * The caller should then allocate destination memory and copy source memory to
    2811             :  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
    2812             :  * flag set).  Once these are allocated and copied, the caller must update each
    2813             :  * corresponding entry in the dst array with the pfn value of the destination
    2814             :  * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
    2815             :  * (destination pages must have their struct pages locked, via lock_page()).
    2816             :  *
    2817             :  * Note that the caller does not have to migrate all the pages that are marked
    2818             :  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
    2819             :  * device memory to system memory.  If the caller cannot migrate a device page
    2820             :  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
    2821             :  * consequences for the userspace process, so it must be avoided if at all
    2822             :  * possible.
    2823             :  *
    2824             :  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
    2825             :  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
    2826             :  * allowing the caller to allocate device memory for those unback virtual
    2827             :  * address.  For this the caller simply has to allocate device memory and
    2828             :  * properly set the destination entry like for regular migration.  Note that
    2829             :  * this can still fails and thus inside the device driver must check if the
    2830             :  * migration was successful for those entries after calling migrate_vma_pages()
    2831             :  * just like for regular migration.
    2832             :  *
    2833             :  * After that, the callers must call migrate_vma_pages() to go over each entry
    2834             :  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
    2835             :  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
    2836             :  * then migrate_vma_pages() to migrate struct page information from the source
    2837             :  * struct page to the destination struct page.  If it fails to migrate the
    2838             :  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
    2839             :  * src array.
    2840             :  *
    2841             :  * At this point all successfully migrated pages have an entry in the src
    2842             :  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
    2843             :  * array entry with MIGRATE_PFN_VALID flag set.
    2844             :  *
    2845             :  * Once migrate_vma_pages() returns the caller may inspect which pages were
    2846             :  * successfully migrated, and which were not.  Successfully migrated pages will
    2847             :  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
    2848             :  *
    2849             :  * It is safe to update device page table after migrate_vma_pages() because
    2850             :  * both destination and source page are still locked, and the mmap_lock is held
    2851             :  * in read mode (hence no one can unmap the range being migrated).
    2852             :  *
    2853             :  * Once the caller is done cleaning up things and updating its page table (if it
    2854             :  * chose to do so, this is not an obligation) it finally calls
    2855             :  * migrate_vma_finalize() to update the CPU page table to point to new pages
    2856             :  * for successfully migrated pages or otherwise restore the CPU page table to
    2857             :  * point to the original source pages.
    2858             :  */
    2859             : int migrate_vma_setup(struct migrate_vma *args)
    2860             : {
    2861             :         long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
    2862             : 
    2863             :         args->start &= PAGE_MASK;
    2864             :         args->end &= PAGE_MASK;
    2865             :         if (!args->vma || is_vm_hugetlb_page(args->vma) ||
    2866             :             (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
    2867             :                 return -EINVAL;
    2868             :         if (nr_pages <= 0)
    2869             :                 return -EINVAL;
    2870             :         if (args->start < args->vma->vm_start ||
    2871             :             args->start >= args->vma->vm_end)
    2872             :                 return -EINVAL;
    2873             :         if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
    2874             :                 return -EINVAL;
    2875             :         if (!args->src || !args->dst)
    2876             :                 return -EINVAL;
    2877             : 
    2878             :         memset(args->src, 0, sizeof(*args->src) * nr_pages);
    2879             :         args->cpages = 0;
    2880             :         args->npages = 0;
    2881             : 
    2882             :         migrate_vma_collect(args);
    2883             : 
    2884             :         if (args->cpages)
    2885             :                 migrate_vma_prepare(args);
    2886             :         if (args->cpages)
    2887             :                 migrate_vma_unmap(args);
    2888             : 
    2889             :         /*
    2890             :          * At this point pages are locked and unmapped, and thus they have
    2891             :          * stable content and can safely be copied to destination memory that
    2892             :          * is allocated by the drivers.
    2893             :          */
    2894             :         return 0;
    2895             : 
    2896             : }
    2897             : EXPORT_SYMBOL(migrate_vma_setup);
    2898             : 
    2899             : /*
    2900             :  * This code closely matches the code in:
    2901             :  *   __handle_mm_fault()
    2902             :  *     handle_pte_fault()
    2903             :  *       do_anonymous_page()
    2904             :  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
    2905             :  * private page.
    2906             :  */
    2907             : static void migrate_vma_insert_page(struct migrate_vma *migrate,
    2908             :                                     unsigned long addr,
    2909             :                                     struct page *page,
    2910             :                                     unsigned long *src)
    2911             : {
    2912             :         struct vm_area_struct *vma = migrate->vma;
    2913             :         struct mm_struct *mm = vma->vm_mm;
    2914             :         bool flush = false;
    2915             :         spinlock_t *ptl;
    2916             :         pte_t entry;
    2917             :         pgd_t *pgdp;
    2918             :         p4d_t *p4dp;
    2919             :         pud_t *pudp;
    2920             :         pmd_t *pmdp;
    2921             :         pte_t *ptep;
    2922             : 
    2923             :         /* Only allow populating anonymous memory */
    2924             :         if (!vma_is_anonymous(vma))
    2925             :                 goto abort;
    2926             : 
    2927             :         pgdp = pgd_offset(mm, addr);
    2928             :         p4dp = p4d_alloc(mm, pgdp, addr);
    2929             :         if (!p4dp)
    2930             :                 goto abort;
    2931             :         pudp = pud_alloc(mm, p4dp, addr);
    2932             :         if (!pudp)
    2933             :                 goto abort;
    2934             :         pmdp = pmd_alloc(mm, pudp, addr);
    2935             :         if (!pmdp)
    2936             :                 goto abort;
    2937             : 
    2938             :         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
    2939             :                 goto abort;
    2940             : 
    2941             :         /*
    2942             :          * Use pte_alloc() instead of pte_alloc_map().  We can't run
    2943             :          * pte_offset_map() on pmds where a huge pmd might be created
    2944             :          * from a different thread.
    2945             :          *
    2946             :          * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
    2947             :          * parallel threads are excluded by other means.
    2948             :          *
    2949             :          * Here we only have mmap_read_lock(mm).
    2950             :          */
    2951             :         if (pte_alloc(mm, pmdp))
    2952             :                 goto abort;
    2953             : 
    2954             :         /* See the comment in pte_alloc_one_map() */
    2955             :         if (unlikely(pmd_trans_unstable(pmdp)))
    2956             :                 goto abort;
    2957             : 
    2958             :         if (unlikely(anon_vma_prepare(vma)))
    2959             :                 goto abort;
    2960             :         if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
    2961             :                 goto abort;
    2962             : 
    2963             :         /*
    2964             :          * The memory barrier inside __SetPageUptodate makes sure that
    2965             :          * preceding stores to the page contents become visible before
    2966             :          * the set_pte_at() write.
    2967             :          */
    2968             :         __SetPageUptodate(page);
    2969             : 
    2970             :         if (is_zone_device_page(page)) {
    2971             :                 if (is_device_private_page(page)) {
    2972             :                         swp_entry_t swp_entry;
    2973             : 
    2974             :                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
    2975             :                         entry = swp_entry_to_pte(swp_entry);
    2976             :                 }
    2977             :         } else {
    2978             :                 entry = mk_pte(page, vma->vm_page_prot);
    2979             :                 if (vma->vm_flags & VM_WRITE)
    2980             :                         entry = pte_mkwrite(pte_mkdirty(entry));
    2981             :         }
    2982             : 
    2983             :         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
    2984             : 
    2985             :         if (check_stable_address_space(mm))
    2986             :                 goto unlock_abort;
    2987             : 
    2988             :         if (pte_present(*ptep)) {
    2989             :                 unsigned long pfn = pte_pfn(*ptep);
    2990             : 
    2991             :                 if (!is_zero_pfn(pfn))
    2992             :                         goto unlock_abort;
    2993             :                 flush = true;
    2994             :         } else if (!pte_none(*ptep))
    2995             :                 goto unlock_abort;
    2996             : 
    2997             :         /*
    2998             :          * Check for userfaultfd but do not deliver the fault. Instead,
    2999             :          * just back off.
    3000             :          */
    3001             :         if (userfaultfd_missing(vma))
    3002             :                 goto unlock_abort;
    3003             : 
    3004             :         inc_mm_counter(mm, MM_ANONPAGES);
    3005             :         page_add_new_anon_rmap(page, vma, addr, false);
    3006             :         if (!is_zone_device_page(page))
    3007             :                 lru_cache_add_inactive_or_unevictable(page, vma);
    3008             :         get_page(page);
    3009             : 
    3010             :         if (flush) {
    3011             :                 flush_cache_page(vma, addr, pte_pfn(*ptep));
    3012             :                 ptep_clear_flush_notify(vma, addr, ptep);
    3013             :                 set_pte_at_notify(mm, addr, ptep, entry);
    3014             :                 update_mmu_cache(vma, addr, ptep);
    3015             :         } else {
    3016             :                 /* No need to invalidate - it was non-present before */
    3017             :                 set_pte_at(mm, addr, ptep, entry);
    3018             :                 update_mmu_cache(vma, addr, ptep);
    3019             :         }
    3020             : 
    3021             :         pte_unmap_unlock(ptep, ptl);
    3022             :         *src = MIGRATE_PFN_MIGRATE;
    3023             :         return;
    3024             : 
    3025             : unlock_abort:
    3026             :         pte_unmap_unlock(ptep, ptl);
    3027             : abort:
    3028             :         *src &= ~MIGRATE_PFN_MIGRATE;
    3029             : }
    3030             : 
    3031             : /**
    3032             :  * migrate_vma_pages() - migrate meta-data from src page to dst page
    3033             :  * @migrate: migrate struct containing all migration information
    3034             :  *
    3035             :  * This migrates struct page meta-data from source struct page to destination
    3036             :  * struct page. This effectively finishes the migration from source page to the
    3037             :  * destination page.
    3038             :  */
    3039             : void migrate_vma_pages(struct migrate_vma *migrate)
    3040             : {
    3041             :         const unsigned long npages = migrate->npages;
    3042             :         const unsigned long start = migrate->start;
    3043             :         struct mmu_notifier_range range;
    3044             :         unsigned long addr, i;
    3045             :         bool notified = false;
    3046             : 
    3047             :         for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
    3048             :                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
    3049             :                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
    3050             :                 struct address_space *mapping;
    3051             :                 int r;
    3052             : 
    3053             :                 if (!newpage) {
    3054             :                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    3055             :                         continue;
    3056             :                 }
    3057             : 
    3058             :                 if (!page) {
    3059             :                         if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
    3060             :                                 continue;
    3061             :                         if (!notified) {
    3062             :                                 notified = true;
    3063             : 
    3064             :                                 mmu_notifier_range_init_migrate(&range, 0,
    3065             :                                         migrate->vma, migrate->vma->vm_mm,
    3066             :                                         addr, migrate->end,
    3067             :                                         migrate->pgmap_owner);
    3068             :                                 mmu_notifier_invalidate_range_start(&range);
    3069             :                         }
    3070             :                         migrate_vma_insert_page(migrate, addr, newpage,
    3071             :                                                 &migrate->src[i]);
    3072             :                         continue;
    3073             :                 }
    3074             : 
    3075             :                 mapping = page_mapping(page);
    3076             : 
    3077             :                 if (is_zone_device_page(newpage)) {
    3078             :                         if (is_device_private_page(newpage)) {
    3079             :                                 /*
    3080             :                                  * For now only support private anonymous when
    3081             :                                  * migrating to un-addressable device memory.
    3082             :                                  */
    3083             :                                 if (mapping) {
    3084             :                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    3085             :                                         continue;
    3086             :                                 }
    3087             :                         } else {
    3088             :                                 /*
    3089             :                                  * Other types of ZONE_DEVICE page are not
    3090             :                                  * supported.
    3091             :                                  */
    3092             :                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    3093             :                                 continue;
    3094             :                         }
    3095             :                 }
    3096             : 
    3097             :                 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
    3098             :                 if (r != MIGRATEPAGE_SUCCESS)
    3099             :                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
    3100             :         }
    3101             : 
    3102             :         /*
    3103             :          * No need to double call mmu_notifier->invalidate_range() callback as
    3104             :          * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
    3105             :          * did already call it.
    3106             :          */
    3107             :         if (notified)
    3108             :                 mmu_notifier_invalidate_range_only_end(&range);
    3109             : }
    3110             : EXPORT_SYMBOL(migrate_vma_pages);
    3111             : 
    3112             : /**
    3113             :  * migrate_vma_finalize() - restore CPU page table entry
    3114             :  * @migrate: migrate struct containing all migration information
    3115             :  *
    3116             :  * This replaces the special migration pte entry with either a mapping to the
    3117             :  * new page if migration was successful for that page, or to the original page
    3118             :  * otherwise.
    3119             :  *
    3120             :  * This also unlocks the pages and puts them back on the lru, or drops the extra
    3121             :  * refcount, for device pages.
    3122             :  */
    3123             : void migrate_vma_finalize(struct migrate_vma *migrate)
    3124             : {
    3125             :         const unsigned long npages = migrate->npages;
    3126             :         unsigned long i;
    3127             : 
    3128             :         for (i = 0; i < npages; i++) {
    3129             :                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
    3130             :                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
    3131             : 
    3132             :                 if (!page) {
    3133             :                         if (newpage) {
    3134             :                                 unlock_page(newpage);
    3135             :                                 put_page(newpage);
    3136             :                         }
    3137             :                         continue;
    3138             :                 }
    3139             : 
    3140             :                 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
    3141             :                         if (newpage) {
    3142             :                                 unlock_page(newpage);
    3143             :                                 put_page(newpage);
    3144             :                         }
    3145             :                         newpage = page;
    3146             :                 }
    3147             : 
    3148             :                 remove_migration_ptes(page, newpage, false);
    3149             :                 unlock_page(page);
    3150             : 
    3151             :                 if (is_zone_device_page(page))
    3152             :                         put_page(page);
    3153             :                 else
    3154             :                         putback_lru_page(page);
    3155             : 
    3156             :                 if (newpage != page) {
    3157             :                         unlock_page(newpage);
    3158             :                         if (is_zone_device_page(newpage))
    3159             :                                 put_page(newpage);
    3160             :                         else
    3161             :                                 putback_lru_page(newpage);
    3162             :                 }
    3163             :         }
    3164             : }
    3165             : EXPORT_SYMBOL(migrate_vma_finalize);
    3166             : #endif /* CONFIG_DEVICE_PRIVATE */

Generated by: LCOV version 1.14