LCOV - code coverage report
Current view: top level - mm - page_vma_mapped.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 53 108 49.1 %
Date: 2021-04-22 12:43:58 Functions: 4 5 80.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/mm.h>
       3             : #include <linux/rmap.h>
       4             : #include <linux/hugetlb.h>
       5             : #include <linux/swap.h>
       6             : #include <linux/swapops.h>
       7             : 
       8             : #include "internal.h"
       9             : 
      10          68 : static inline bool not_found(struct page_vma_mapped_walk *pvmw)
      11             : {
      12         136 :         page_vma_mapped_walk_done(pvmw);
      13          68 :         return false;
      14             : }
      15             : 
      16          68 : static bool map_pte(struct page_vma_mapped_walk *pvmw)
      17             : {
      18          68 :         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
      19          68 :         if (!(pvmw->flags & PVMW_SYNC)) {
      20           0 :                 if (pvmw->flags & PVMW_MIGRATION) {
      21           0 :                         if (!is_swap_pte(*pvmw->pte))
      22             :                                 return false;
      23             :                 } else {
      24             :                         /*
      25             :                          * We get here when we are trying to unmap a private
      26             :                          * device page from the process address space. Such
      27             :                          * page is not CPU accessible and thus is mapped as
      28             :                          * a special swap entry, nonetheless it still does
      29             :                          * count as a valid regular mapping for the page (and
      30             :                          * is accounted as such in page maps count).
      31             :                          *
      32             :                          * So handle this special case as if it was a normal
      33             :                          * page mapping ie lock CPU page table and returns
      34             :                          * true.
      35             :                          *
      36             :                          * For more details on device private memory see HMM
      37             :                          * (include/linux/hmm.h or mm/hmm.c).
      38             :                          */
      39           0 :                         if (is_swap_pte(*pvmw->pte)) {
      40             :                                 swp_entry_t entry;
      41             : 
      42             :                                 /* Handle un-addressable ZONE_DEVICE memory */
      43          68 :                                 entry = pte_to_swp_entry(*pvmw->pte);
      44             :                                 if (!is_device_private_entry(entry))
      45          68 :                                         return false;
      46           0 :                         } else if (!pte_present(*pvmw->pte))
      47             :                                 return false;
      48             :                 }
      49             :         }
      50          68 :         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
      51          68 :         spin_lock(pvmw->ptl);
      52          68 :         return true;
      53             : }
      54             : 
      55          68 : static inline bool pfn_is_match(struct page *page, unsigned long pfn)
      56             : {
      57          68 :         unsigned long page_pfn = page_to_pfn(page);
      58             : 
      59             :         /* normal page and hugetlbfs page */
      60          68 :         if (!PageTransCompound(page) || PageHuge(page))
      61          68 :                 return page_pfn == pfn;
      62             : 
      63             :         /* THP can be referenced by any subpage */
      64           0 :         return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
      65             : }
      66             : 
      67             : /**
      68             :  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
      69             :  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
      70             :  *
      71             :  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
      72             :  * mapped. check_pte() has to validate this.
      73             :  *
      74             :  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
      75             :  * arbitrary page.
      76             :  *
      77             :  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
      78             :  * entry that points to @pvmw->page or any subpage in case of THP.
      79             :  *
      80             :  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
      81             :  * pvmw->page or any subpage in case of THP.
      82             :  *
      83             :  * Otherwise, return false.
      84             :  *
      85             :  */
      86          68 : static bool check_pte(struct page_vma_mapped_walk *pvmw)
      87             : {
      88          68 :         unsigned long pfn;
      89             : 
      90          68 :         if (pvmw->flags & PVMW_MIGRATION) {
      91           0 :                 swp_entry_t entry;
      92           0 :                 if (!is_swap_pte(*pvmw->pte))
      93          68 :                         return false;
      94           0 :                 entry = pte_to_swp_entry(*pvmw->pte);
      95             : 
      96           0 :                 if (!is_migration_entry(entry))
      97             :                         return false;
      98             : 
      99           0 :                 pfn = migration_entry_to_pfn(entry);
     100          68 :         } else if (is_swap_pte(*pvmw->pte)) {
     101             :                 swp_entry_t entry;
     102             : 
     103             :                 /* Handle un-addressable ZONE_DEVICE memory */
     104          68 :                 entry = pte_to_swp_entry(*pvmw->pte);
     105             :                 if (!is_device_private_entry(entry))
     106          68 :                         return false;
     107             : 
     108             :                 pfn = device_private_entry_to_pfn(entry);
     109             :         } else {
     110          68 :                 if (!pte_present(*pvmw->pte))
     111             :                         return false;
     112             : 
     113          68 :                 pfn = pte_pfn(*pvmw->pte);
     114             :         }
     115             : 
     116          68 :         return pfn_is_match(pvmw->page, pfn);
     117             : }
     118             : 
     119             : /**
     120             :  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
     121             :  * @pvmw->address
     122             :  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
     123             :  * must be set. pmd, pte and ptl must be NULL.
     124             :  *
     125             :  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
     126             :  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
     127             :  * adjusted if needed (for PTE-mapped THPs).
     128             :  *
     129             :  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
     130             :  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
     131             :  * a loop to find all PTEs that map the THP.
     132             :  *
     133             :  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
     134             :  * regardless of which page table level the page is mapped at. @pvmw->pmd is
     135             :  * NULL.
     136             :  *
     137             :  * Retruns false if there are no more page table entries for the page in
     138             :  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
     139             :  *
     140             :  * If you need to stop the walk before page_vma_mapped_walk() returned false,
     141             :  * use page_vma_mapped_walk_done(). It will do the housekeeping.
     142             :  */
     143         136 : bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
     144             : {
     145         136 :         struct mm_struct *mm = pvmw->vma->vm_mm;
     146         136 :         struct page *page = pvmw->page;
     147         136 :         pgd_t *pgd;
     148         136 :         p4d_t *p4d;
     149         136 :         pud_t *pud;
     150         136 :         pmd_t pmde;
     151             : 
     152             :         /* The only possible pmd mapping has been handled on last iteration */
     153         136 :         if (pvmw->pmd && !pvmw->pte)
     154           0 :                 return not_found(pvmw);
     155             : 
     156         136 :         if (pvmw->pte)
     157          68 :                 goto next_pte;
     158             : 
     159          68 :         if (unlikely(PageHuge(pvmw->page))) {
     160             :                 /* when pud is not present, pte will be NULL */
     161             :                 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
     162             :                 if (!pvmw->pte)
     163             :                         return false;
     164             : 
     165             :                 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
     166             :                 spin_lock(pvmw->ptl);
     167             :                 if (!check_pte(pvmw))
     168             :                         return not_found(pvmw);
     169             :                 return true;
     170             :         }
     171          68 : restart:
     172          68 :         pgd = pgd_offset(mm, pvmw->address);
     173          68 :         if (!pgd_present(*pgd))
     174             :                 return false;
     175          68 :         p4d = p4d_offset(pgd, pvmw->address);
     176          68 :         if (!p4d_present(*p4d))
     177             :                 return false;
     178          68 :         pud = pud_offset(p4d, pvmw->address);
     179         136 :         if (!pud_present(*pud))
     180             :                 return false;
     181          68 :         pvmw->pmd = pmd_offset(pud, pvmw->address);
     182             :         /*
     183             :          * Make sure the pmd value isn't cached in a register by the
     184             :          * compiler and used as a stale value after we've observed a
     185             :          * subsequent update.
     186             :          */
     187          68 :         pmde = READ_ONCE(*pvmw->pmd);
     188          68 :         if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
     189           0 :                 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
     190           0 :                 if (likely(pmd_trans_huge(*pvmw->pmd))) {
     191           0 :                         if (pvmw->flags & PVMW_MIGRATION)
     192           0 :                                 return not_found(pvmw);
     193           0 :                         if (pmd_page(*pvmw->pmd) != page)
     194           0 :                                 return not_found(pvmw);
     195             :                         return true;
     196           0 :                 } else if (!pmd_present(*pvmw->pmd)) {
     197           0 :                         if (thp_migration_supported()) {
     198           0 :                                 if (!(pvmw->flags & PVMW_MIGRATION))
     199           0 :                                         return not_found(pvmw);
     200           0 :                                 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
     201           0 :                                         swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
     202             : 
     203           0 :                                         if (migration_entry_to_page(entry) != page)
     204           0 :                                                 return not_found(pvmw);
     205             :                                         return true;
     206             :                                 }
     207             :                         }
     208           0 :                         return not_found(pvmw);
     209             :                 } else {
     210             :                         /* THP pmd was split under us: handle on pte level */
     211           0 :                         spin_unlock(pvmw->ptl);
     212           0 :                         pvmw->ptl = NULL;
     213             :                 }
     214         136 :         } else if (!pmd_present(pmde)) {
     215             :                 return false;
     216             :         }
     217          68 :         if (!map_pte(pvmw))
     218           0 :                 goto next_pte;
     219          68 :         while (1) {
     220          68 :                 if (check_pte(pvmw))
     221             :                         return true;
     222           0 : next_pte:
     223             :                 /* Seek to next pte only makes sense for THP */
     224          68 :                 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
     225         136 :                         return not_found(pvmw);
     226           0 :                 do {
     227           0 :                         pvmw->address += PAGE_SIZE;
     228           0 :                         if (pvmw->address >= pvmw->vma->vm_end ||
     229             :                             pvmw->address >=
     230           0 :                                         __vma_address(pvmw->page, pvmw->vma) +
     231           0 :                                         thp_size(pvmw->page))
     232           0 :                                 return not_found(pvmw);
     233             :                         /* Did we cross page table boundary? */
     234           0 :                         if (pvmw->address % PMD_SIZE == 0) {
     235           0 :                                 pte_unmap(pvmw->pte);
     236           0 :                                 if (pvmw->ptl) {
     237           0 :                                         spin_unlock(pvmw->ptl);
     238           0 :                                         pvmw->ptl = NULL;
     239             :                                 }
     240           0 :                                 goto restart;
     241             :                         } else {
     242           0 :                                 pvmw->pte++;
     243             :                         }
     244           0 :                 } while (pte_none(*pvmw->pte));
     245             : 
     246           0 :                 if (!pvmw->ptl) {
     247           0 :                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
     248          68 :                         spin_lock(pvmw->ptl);
     249             :                 }
     250             :         }
     251             : }
     252             : 
     253             : /**
     254             :  * page_mapped_in_vma - check whether a page is really mapped in a VMA
     255             :  * @page: the page to test
     256             :  * @vma: the VMA to test
     257             :  *
     258             :  * Returns 1 if the page is mapped into the page tables of the VMA, 0
     259             :  * if the page is not mapped into the page tables of this VMA.  Only
     260             :  * valid for normal file or anonymous VMAs.
     261             :  */
     262           0 : int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
     263             : {
     264           0 :         struct page_vma_mapped_walk pvmw = {
     265             :                 .page = page,
     266             :                 .vma = vma,
     267             :                 .flags = PVMW_SYNC,
     268             :         };
     269           0 :         unsigned long start, end;
     270             : 
     271           0 :         start = __vma_address(page, vma);
     272           0 :         end = start + thp_size(page) - PAGE_SIZE;
     273             : 
     274           0 :         if (unlikely(end < vma->vm_start || start >= vma->vm_end))
     275             :                 return 0;
     276           0 :         pvmw.address = max(start, vma->vm_start);
     277           0 :         if (!page_vma_mapped_walk(&pvmw))
     278             :                 return 0;
     279           0 :         page_vma_mapped_walk_done(&pvmw);
     280             :         return 1;
     281             : }

Generated by: LCOV version 1.14