LCOV - code coverage report
Current view: top level - include/linux - khugepaged.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 13 16 81.2 %
Date: 2021-04-22 12:43:58 Functions: 3 3 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_KHUGEPAGED_H
       3             : #define _LINUX_KHUGEPAGED_H
       4             : 
       5             : #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
       6             : #include <linux/shmem_fs.h>
       7             : 
       8             : 
       9             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
      10             : extern struct attribute_group khugepaged_attr_group;
      11             : 
      12             : extern int khugepaged_init(void);
      13             : extern void khugepaged_destroy(void);
      14             : extern int start_stop_khugepaged(void);
      15             : extern int __khugepaged_enter(struct mm_struct *mm);
      16             : extern void __khugepaged_exit(struct mm_struct *mm);
      17             : extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
      18             :                                       unsigned long vm_flags);
      19             : extern void khugepaged_min_free_kbytes_update(void);
      20             : #ifdef CONFIG_SHMEM
      21             : extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
      22             : #else
      23             : static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
      24             :                                            unsigned long addr)
      25             : {
      26             : }
      27             : #endif
      28             : 
      29             : #define khugepaged_enabled()                                           \
      30             :         (transparent_hugepage_flags &                                      \
      31             :          ((1<<TRANSPARENT_HUGEPAGE_FLAG) |                       \
      32             :           (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
      33             : #define khugepaged_always()                             \
      34             :         (transparent_hugepage_flags &                       \
      35             :          (1<<TRANSPARENT_HUGEPAGE_FLAG))
      36             : #define khugepaged_req_madv()                                   \
      37             :         (transparent_hugepage_flags &                               \
      38             :          (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
      39             : #define khugepaged_defrag()                                     \
      40             :         (transparent_hugepage_flags &                               \
      41             :          (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
      42             : 
      43         858 : static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
      44             : {
      45         858 :         if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
      46           5 :                 return __khugepaged_enter(mm);
      47             :         return 0;
      48             : }
      49             : 
      50        1994 : static inline void khugepaged_exit(struct mm_struct *mm)
      51             : {
      52        1994 :         if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
      53          19 :                 __khugepaged_exit(mm);
      54        1994 : }
      55             : 
      56          27 : static inline int khugepaged_enter(struct vm_area_struct *vma,
      57             :                                    unsigned long vm_flags)
      58             : {
      59          27 :         if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
      60          19 :                 if ((khugepaged_always() ||
      61           0 :                      (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
      62           0 :                      (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
      63          19 :                     !(vm_flags & VM_NOHUGEPAGE) &&
      64          19 :                     !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
      65          19 :                         if (__khugepaged_enter(vma->vm_mm))
      66           0 :                                 return -ENOMEM;
      67             :         return 0;
      68             : }
      69             : #else /* CONFIG_TRANSPARENT_HUGEPAGE */
      70             : static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
      71             : {
      72             :         return 0;
      73             : }
      74             : static inline void khugepaged_exit(struct mm_struct *mm)
      75             : {
      76             : }
      77             : static inline int khugepaged_enter(struct vm_area_struct *vma,
      78             :                                    unsigned long vm_flags)
      79             : {
      80             :         return 0;
      81             : }
      82             : static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
      83             :                                              unsigned long vm_flags)
      84             : {
      85             :         return 0;
      86             : }
      87             : static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
      88             :                                            unsigned long addr)
      89             : {
      90             : }
      91             : 
      92             : static inline void khugepaged_min_free_kbytes_update(void)
      93             : {
      94             : }
      95             : #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
      96             : 
      97             : #endif /* _LINUX_KHUGEPAGED_H */

Generated by: LCOV version 1.14