LCOV - code coverage report
Current view: top level - include/linux - huge_mm.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 29 44 65.9 %
Date: 2021-04-22 12:43:58 Functions: 5 6 83.3 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_HUGE_MM_H
       3             : #define _LINUX_HUGE_MM_H
       4             : 
       5             : #include <linux/sched/coredump.h>
       6             : #include <linux/mm_types.h>
       7             : 
       8             : #include <linux/fs.h> /* only for vma_is_dax() */
       9             : 
      10             : vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
      11             : int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
      12             :                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
      13             :                   struct vm_area_struct *vma);
      14             : void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
      15             : int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
      16             :                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
      17             :                   struct vm_area_struct *vma);
      18             : 
      19             : #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
      20             : void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
      21             : #else
      22             : static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
      23             : {
      24             : }
      25             : #endif
      26             : 
      27             : vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
      28             : struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
      29             :                                    unsigned long addr, pmd_t *pmd,
      30             :                                    unsigned int flags);
      31             : bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
      32             :                            pmd_t *pmd, unsigned long addr, unsigned long next);
      33             : int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
      34             :                  unsigned long addr);
      35             : int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
      36             :                  unsigned long addr);
      37             : bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
      38             :                    unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
      39             : int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
      40             :                     pgprot_t newprot, unsigned long cp_flags);
      41             : vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
      42             :                                    pgprot_t pgprot, bool write);
      43             : 
      44             : /**
      45             :  * vmf_insert_pfn_pmd - insert a pmd size pfn
      46             :  * @vmf: Structure describing the fault
      47             :  * @pfn: pfn to insert
      48             :  * @pgprot: page protection to use
      49             :  * @write: whether it's a write fault
      50             :  *
      51             :  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
      52             :  *
      53             :  * Return: vm_fault_t value.
      54             :  */
      55             : static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
      56             :                                             bool write)
      57             : {
      58             :         return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
      59             : }
      60             : vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
      61             :                                    pgprot_t pgprot, bool write);
      62             : 
      63             : /**
      64             :  * vmf_insert_pfn_pud - insert a pud size pfn
      65             :  * @vmf: Structure describing the fault
      66             :  * @pfn: pfn to insert
      67             :  * @pgprot: page protection to use
      68             :  * @write: whether it's a write fault
      69             :  *
      70             :  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
      71             :  *
      72             :  * Return: vm_fault_t value.
      73             :  */
      74             : static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
      75             :                                             bool write)
      76             : {
      77             :         return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
      78             : }
      79             : 
      80             : enum transparent_hugepage_flag {
      81             :         TRANSPARENT_HUGEPAGE_NEVER_DAX,
      82             :         TRANSPARENT_HUGEPAGE_FLAG,
      83             :         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
      84             :         TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
      85             :         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
      86             :         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
      87             :         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
      88             :         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
      89             :         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
      90             : #ifdef CONFIG_DEBUG_VM
      91             :         TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
      92             : #endif
      93             : };
      94             : 
      95             : struct kobject;
      96             : struct kobj_attribute;
      97             : 
      98             : ssize_t single_hugepage_flag_store(struct kobject *kobj,
      99             :                                    struct kobj_attribute *attr,
     100             :                                    const char *buf, size_t count,
     101             :                                    enum transparent_hugepage_flag flag);
     102             : ssize_t single_hugepage_flag_show(struct kobject *kobj,
     103             :                                   struct kobj_attribute *attr, char *buf,
     104             :                                   enum transparent_hugepage_flag flag);
     105             : extern struct kobj_attribute shmem_enabled_attr;
     106             : 
     107             : #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
     108             : #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
     109             : 
     110             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     111             : #define HPAGE_PMD_SHIFT PMD_SHIFT
     112             : #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
     113             : #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
     114             : 
     115             : #define HPAGE_PUD_SHIFT PUD_SHIFT
     116             : #define HPAGE_PUD_SIZE  ((1UL) << HPAGE_PUD_SHIFT)
     117             : #define HPAGE_PUD_MASK  (~(HPAGE_PUD_SIZE - 1))
     118             : 
     119             : extern unsigned long transparent_hugepage_flags;
     120             : 
     121             : /*
     122             :  * to be used on vmas which are known to support THP.
     123             :  * Use transparent_hugepage_enabled otherwise
     124             :  */
     125       13022 : static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
     126             : {
     127             : 
     128             :         /*
     129             :          * If the hardware/firmware marked hugepage support disabled.
     130             :          */
     131       13022 :         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
     132             :                 return false;
     133             : 
     134       13022 :         if (vma->vm_flags & VM_NOHUGEPAGE)
     135             :                 return false;
     136             : 
     137       13022 :         if (vma_is_temporary_stack(vma))
     138             :                 return false;
     139             : 
     140        8048 :         if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
     141             :                 return false;
     142             : 
     143        8048 :         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
     144             :                 return true;
     145             : 
     146           0 :         if (vma_is_dax(vma))
     147             :                 return true;
     148             : 
     149           0 :         if (transparent_hugepage_flags &
     150             :                                 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
     151           0 :                 return !!(vma->vm_flags & VM_HUGEPAGE);
     152             : 
     153             :         return false;
     154             : }
     155             : 
     156             : bool transparent_hugepage_enabled(struct vm_area_struct *vma);
     157             : 
     158             : #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
     159             : 
     160        1114 : static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
     161             :                 unsigned long haddr)
     162             : {
     163             :         /* Don't have to check pgoff for anonymous vma */
     164        1114 :         if (!vma_is_anonymous(vma)) {
     165           0 :                 if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
     166           0 :                         (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
     167             :                         return false;
     168             :         }
     169             : 
     170        1114 :         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
     171        1097 :                 return false;
     172             :         return true;
     173             : }
     174             : 
     175             : #define transparent_hugepage_use_zero_page()                            \
     176             :         (transparent_hugepage_flags &                                       \
     177             :          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
     178             : 
     179             : unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
     180             :                 unsigned long len, unsigned long pgoff, unsigned long flags);
     181             : 
     182             : void prep_transhuge_page(struct page *page);
     183             : void free_transhuge_page(struct page *page);
     184             : bool is_transparent_hugepage(struct page *page);
     185             : 
     186             : bool can_split_huge_page(struct page *page, int *pextra_pins);
     187             : int split_huge_page_to_list(struct page *page, struct list_head *list);
     188           0 : static inline int split_huge_page(struct page *page)
     189             : {
     190           0 :         return split_huge_page_to_list(page, NULL);
     191             : }
     192             : void deferred_split_huge_page(struct page *page);
     193             : 
     194             : void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
     195             :                 unsigned long address, bool freeze, struct page *page);
     196             : 
     197             : #define split_huge_pmd(__vma, __pmd, __address)                         \
     198             :         do {                                                            \
     199             :                 pmd_t *____pmd = (__pmd);                               \
     200             :                 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
     201             :                                         || pmd_devmap(*____pmd))        \
     202             :                         __split_huge_pmd(__vma, __pmd, __address,       \
     203             :                                                 false, NULL);           \
     204             :         }  while (0)
     205             : 
     206             : 
     207             : void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
     208             :                 bool freeze, struct page *page);
     209             : 
     210             : void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
     211             :                 unsigned long address);
     212             : 
     213             : #define split_huge_pud(__vma, __pud, __address)                         \
     214             :         do {                                                            \
     215             :                 pud_t *____pud = (__pud);                               \
     216             :                 if (pud_trans_huge(*____pud)                            \
     217             :                                         || pud_devmap(*____pud))        \
     218             :                         __split_huge_pud(__vma, __pud, __address);      \
     219             :         }  while (0)
     220             : 
     221             : int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
     222             :                      int advice);
     223             : void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
     224             :                            unsigned long end, long adjust_next);
     225             : spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
     226             : spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
     227             : 
     228      332484 : static inline int is_swap_pmd(pmd_t pmd)
     229             : {
     230      658211 :         return !pmd_none(pmd) && !pmd_present(pmd);
     231             : }
     232             : 
     233             : /* mmap_lock must be held on entry */
     234           0 : static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
     235             :                 struct vm_area_struct *vma)
     236             : {
     237           0 :         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
     238           0 :                 return __pmd_trans_huge_lock(pmd, vma);
     239             :         else
     240             :                 return NULL;
     241             : }
     242             : static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
     243             :                 struct vm_area_struct *vma)
     244             : {
     245             :         if (pud_trans_huge(*pud) || pud_devmap(*pud))
     246             :                 return __pud_trans_huge_lock(pud, vma);
     247             :         else
     248             :                 return NULL;
     249             : }
     250             : 
     251             : /**
     252             :  * thp_head - Head page of a transparent huge page.
     253             :  * @page: Any page (tail, head or regular) found in the page cache.
     254             :  */
     255        4058 : static inline struct page *thp_head(struct page *page)
     256             : {
     257        4058 :         return compound_head(page);
     258             : }
     259             : 
     260             : /**
     261             :  * thp_order - Order of a transparent huge page.
     262             :  * @page: Head page of a transparent huge page.
     263             :  */
     264         567 : static inline unsigned int thp_order(struct page *page)
     265             : {
     266         567 :         VM_BUG_ON_PGFLAGS(PageTail(page), page);
     267         567 :         if (PageHead(page))
     268           0 :                 return HPAGE_PMD_ORDER;
     269             :         return 0;
     270             : }
     271             : 
     272             : /**
     273             :  * thp_nr_pages - The number of regular pages in this huge page.
     274             :  * @page: The head page of a huge page.
     275             :  */
     276      356621 : static inline int thp_nr_pages(struct page *page)
     277             : {
     278      356621 :         VM_BUG_ON_PGFLAGS(PageTail(page), page);
     279      339486 :         if (PageHead(page))
     280        1135 :                 return HPAGE_PMD_NR;
     281             :         return 1;
     282             : }
     283             : 
     284             : struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
     285             :                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
     286             : struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
     287             :                 pud_t *pud, int flags, struct dev_pagemap **pgmap);
     288             : 
     289             : vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
     290             : 
     291             : extern struct page *huge_zero_page;
     292             : 
     293      974524 : static inline bool is_huge_zero_page(struct page *page)
     294             : {
     295      974502 :         return READ_ONCE(huge_zero_page) == page;
     296             : }
     297             : 
     298          22 : static inline bool is_huge_zero_pmd(pmd_t pmd)
     299             : {
     300          22 :         return is_huge_zero_page(pmd_page(pmd));
     301             : }
     302             : 
     303           0 : static inline bool is_huge_zero_pud(pud_t pud)
     304             : {
     305           0 :         return false;
     306             : }
     307             : 
     308             : struct page *mm_get_huge_zero_page(struct mm_struct *mm);
     309             : void mm_put_huge_zero_page(struct mm_struct *mm);
     310             : 
     311             : #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
     312             : 
     313           0 : static inline bool thp_migration_supported(void)
     314             : {
     315           0 :         return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
     316             : }
     317             : 
     318          38 : static inline struct list_head *page_deferred_list(struct page *page)
     319             : {
     320             :         /*
     321             :          * Global or memcg deferred list in the second tail pages is
     322             :          * occupied by compound_head.
     323             :          */
     324          38 :         return &page[2].deferred_list;
     325             : }
     326             : 
     327             : #else /* CONFIG_TRANSPARENT_HUGEPAGE */
     328             : #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
     329             : #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
     330             : #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
     331             : 
     332             : #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
     333             : #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
     334             : #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
     335             : 
     336             : static inline struct page *thp_head(struct page *page)
     337             : {
     338             :         VM_BUG_ON_PGFLAGS(PageTail(page), page);
     339             :         return page;
     340             : }
     341             : 
     342             : static inline unsigned int thp_order(struct page *page)
     343             : {
     344             :         VM_BUG_ON_PGFLAGS(PageTail(page), page);
     345             :         return 0;
     346             : }
     347             : 
     348             : static inline int thp_nr_pages(struct page *page)
     349             : {
     350             :         VM_BUG_ON_PGFLAGS(PageTail(page), page);
     351             :         return 1;
     352             : }
     353             : 
     354             : static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
     355             : {
     356             :         return false;
     357             : }
     358             : 
     359             : static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
     360             : {
     361             :         return false;
     362             : }
     363             : 
     364             : static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
     365             :                 unsigned long haddr)
     366             : {
     367             :         return false;
     368             : }
     369             : 
     370             : static inline void prep_transhuge_page(struct page *page) {}
     371             : 
     372             : static inline bool is_transparent_hugepage(struct page *page)
     373             : {
     374             :         return false;
     375             : }
     376             : 
     377             : #define transparent_hugepage_flags 0UL
     378             : 
     379             : #define thp_get_unmapped_area   NULL
     380             : 
     381             : static inline bool
     382             : can_split_huge_page(struct page *page, int *pextra_pins)
     383             : {
     384             :         BUILD_BUG();
     385             :         return false;
     386             : }
     387             : static inline int
     388             : split_huge_page_to_list(struct page *page, struct list_head *list)
     389             : {
     390             :         return 0;
     391             : }
     392             : static inline int split_huge_page(struct page *page)
     393             : {
     394             :         return 0;
     395             : }
     396             : static inline void deferred_split_huge_page(struct page *page) {}
     397             : #define split_huge_pmd(__vma, __pmd, __address) \
     398             :         do { } while (0)
     399             : 
     400             : static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
     401             :                 unsigned long address, bool freeze, struct page *page) {}
     402             : static inline void split_huge_pmd_address(struct vm_area_struct *vma,
     403             :                 unsigned long address, bool freeze, struct page *page) {}
     404             : 
     405             : #define split_huge_pud(__vma, __pmd, __address) \
     406             :         do { } while (0)
     407             : 
     408             : static inline int hugepage_madvise(struct vm_area_struct *vma,
     409             :                                    unsigned long *vm_flags, int advice)
     410             : {
     411             :         BUG();
     412             :         return 0;
     413             : }
     414             : static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
     415             :                                          unsigned long start,
     416             :                                          unsigned long end,
     417             :                                          long adjust_next)
     418             : {
     419             : }
     420             : static inline int is_swap_pmd(pmd_t pmd)
     421             : {
     422             :         return 0;
     423             : }
     424             : static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
     425             :                 struct vm_area_struct *vma)
     426             : {
     427             :         return NULL;
     428             : }
     429             : static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
     430             :                 struct vm_area_struct *vma)
     431             : {
     432             :         return NULL;
     433             : }
     434             : 
     435             : static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
     436             :                 pmd_t orig_pmd)
     437             : {
     438             :         return 0;
     439             : }
     440             : 
     441             : static inline bool is_huge_zero_page(struct page *page)
     442             : {
     443             :         return false;
     444             : }
     445             : 
     446             : static inline bool is_huge_zero_pud(pud_t pud)
     447             : {
     448             :         return false;
     449             : }
     450             : 
     451             : static inline void mm_put_huge_zero_page(struct mm_struct *mm)
     452             : {
     453             :         return;
     454             : }
     455             : 
     456             : static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
     457             :         unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
     458             : {
     459             :         return NULL;
     460             : }
     461             : 
     462             : static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
     463             :         unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
     464             : {
     465             :         return NULL;
     466             : }
     467             : 
     468             : static inline bool thp_migration_supported(void)
     469             : {
     470             :         return false;
     471             : }
     472             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
     473             : 
     474             : /**
     475             :  * thp_size - Size of a transparent huge page.
     476             :  * @page: Head page of a transparent huge page.
     477             :  *
     478             :  * Return: Number of bytes in this page.
     479             :  */
     480         567 : static inline unsigned long thp_size(struct page *page)
     481             : {
     482         567 :         return PAGE_SIZE << thp_order(page);
     483             : }
     484             : 
     485             : #endif /* _LINUX_HUGE_MM_H */

Generated by: LCOV version 1.14