LCOV - code coverage report
Current view: top level - include/linux - page-flags.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 94 116 81.0 %
Date: 2021-04-22 12:43:58 Functions: 6 8 75.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * Macros for manipulating and testing page->flags
       4             :  */
       5             : 
       6             : #ifndef PAGE_FLAGS_H
       7             : #define PAGE_FLAGS_H
       8             : 
       9             : #include <linux/types.h>
      10             : #include <linux/bug.h>
      11             : #include <linux/mmdebug.h>
      12             : #ifndef __GENERATING_BOUNDS_H
      13             : #include <linux/mm_types.h>
      14             : #include <generated/bounds.h>
      15             : #endif /* !__GENERATING_BOUNDS_H */
      16             : 
      17             : /*
      18             :  * Various page->flags bits:
      19             :  *
      20             :  * PG_reserved is set for special pages. The "struct page" of such a page
      21             :  * should in general not be touched (e.g. set dirty) except by its owner.
      22             :  * Pages marked as PG_reserved include:
      23             :  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
      24             :  *   initrd, HW tables)
      25             :  * - Pages reserved or allocated early during boot (before the page allocator
      26             :  *   was initialized). This includes (depending on the architecture) the
      27             :  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
      28             :  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
      29             :  *   be given to the page allocator.
      30             :  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
      31             :  *   to read/write these pages might end badly. Don't touch!
      32             :  * - The zero page(s)
      33             :  * - Pages not added to the page allocator when onlining a section because
      34             :  *   they were excluded via the online_page_callback() or because they are
      35             :  *   PG_hwpoison.
      36             :  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
      37             :  *   control pages, vmcoreinfo)
      38             :  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
      39             :  *   not marked PG_reserved (as they might be in use by somebody else who does
      40             :  *   not respect the caching strategy).
      41             :  * - Pages part of an offline section (struct pages of offline sections should
      42             :  *   not be trusted as they will be initialized when first onlined).
      43             :  * - MCA pages on ia64
      44             :  * - Pages holding CPU notes for POWER Firmware Assisted Dump
      45             :  * - Device memory (e.g. PMEM, DAX, HMM)
      46             :  * Some PG_reserved pages will be excluded from the hibernation image.
      47             :  * PG_reserved does in general not hinder anybody from dumping or swapping
      48             :  * and is no longer required for remap_pfn_range(). ioremap might require it.
      49             :  * Consequently, PG_reserved for a page mapped into user space can indicate
      50             :  * the zero page, the vDSO, MMIO pages or device memory.
      51             :  *
      52             :  * The PG_private bitflag is set on pagecache pages if they contain filesystem
      53             :  * specific data (which is normally at page->private). It can be used by
      54             :  * private allocations for its own usage.
      55             :  *
      56             :  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
      57             :  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
      58             :  * is set before writeback starts and cleared when it finishes.
      59             :  *
      60             :  * PG_locked also pins a page in pagecache, and blocks truncation of the file
      61             :  * while it is held.
      62             :  *
      63             :  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
      64             :  * to become unlocked.
      65             :  *
      66             :  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
      67             :  * usually PageAnon or shmem pages but please note that even anonymous pages
      68             :  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
      69             :  * a result of MADV_FREE).
      70             :  *
      71             :  * PG_uptodate tells whether the page's contents is valid.  When a read
      72             :  * completes, the page becomes uptodate, unless a disk I/O error happened.
      73             :  *
      74             :  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
      75             :  * file-backed pagecache (see mm/vmscan.c).
      76             :  *
      77             :  * PG_error is set to indicate that an I/O error occurred on this page.
      78             :  *
      79             :  * PG_arch_1 is an architecture specific page state bit.  The generic code
      80             :  * guarantees that this bit is cleared for a page when it first is entered into
      81             :  * the page cache.
      82             :  *
      83             :  * PG_hwpoison indicates that a page got corrupted in hardware and contains
      84             :  * data with incorrect ECC bits that triggered a machine check. Accessing is
      85             :  * not safe since it may cause another machine check. Don't touch!
      86             :  */
      87             : 
      88             : /*
      89             :  * Don't use the pageflags directly.  Use the PageFoo macros.
      90             :  *
      91             :  * The page flags field is split into two parts, the main flags area
      92             :  * which extends from the low bits upwards, and the fields area which
      93             :  * extends from the high bits downwards.
      94             :  *
      95             :  *  | FIELD | ... | FLAGS |
      96             :  *  N-1           ^       0
      97             :  *               (NR_PAGEFLAGS)
      98             :  *
      99             :  * The fields area is reserved for fields mapping zone, node (for NUMA) and
     100             :  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
     101             :  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
     102             :  */
     103             : enum pageflags {
     104             :         PG_locked,              /* Page is locked. Don't touch. */
     105             :         PG_referenced,
     106             :         PG_uptodate,
     107             :         PG_dirty,
     108             :         PG_lru,
     109             :         PG_active,
     110             :         PG_workingset,
     111             :         PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
     112             :         PG_error,
     113             :         PG_slab,
     114             :         PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
     115             :         PG_arch_1,
     116             :         PG_reserved,
     117             :         PG_private,             /* If pagecache, has fs-private data */
     118             :         PG_private_2,           /* If pagecache, has fs aux data */
     119             :         PG_writeback,           /* Page is under writeback */
     120             :         PG_head,                /* A head page */
     121             :         PG_mappedtodisk,        /* Has blocks allocated on-disk */
     122             :         PG_reclaim,             /* To be reclaimed asap */
     123             :         PG_swapbacked,          /* Page is backed by RAM/swap */
     124             :         PG_unevictable,         /* Page is "unevictable"  */
     125             : #ifdef CONFIG_MMU
     126             :         PG_mlocked,             /* Page is vma mlocked */
     127             : #endif
     128             : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
     129             :         PG_uncached,            /* Page has been mapped as uncached */
     130             : #endif
     131             : #ifdef CONFIG_MEMORY_FAILURE
     132             :         PG_hwpoison,            /* hardware poisoned page. Don't touch */
     133             : #endif
     134             : #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
     135             :         PG_young,
     136             :         PG_idle,
     137             : #endif
     138             : #ifdef CONFIG_64BIT
     139             :         PG_arch_2,
     140             : #endif
     141             :         __NR_PAGEFLAGS,
     142             : 
     143             :         /* Filesystems */
     144             :         PG_checked = PG_owner_priv_1,
     145             : 
     146             :         /* SwapBacked */
     147             :         PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
     148             : 
     149             :         /* Two page bits are conscripted by FS-Cache to maintain local caching
     150             :          * state.  These bits are set on pages belonging to the netfs's inodes
     151             :          * when those inodes are being locally cached.
     152             :          */
     153             :         PG_fscache = PG_private_2,      /* page backed by cache */
     154             : 
     155             :         /* XEN */
     156             :         /* Pinned in Xen as a read-only pagetable page. */
     157             :         PG_pinned = PG_owner_priv_1,
     158             :         /* Pinned as part of domain save (see xen_mm_pin_all()). */
     159             :         PG_savepinned = PG_dirty,
     160             :         /* Has a grant mapping of another (foreign) domain's page. */
     161             :         PG_foreign = PG_owner_priv_1,
     162             :         /* Remapped by swiotlb-xen. */
     163             :         PG_xen_remapped = PG_owner_priv_1,
     164             : 
     165             :         /* SLOB */
     166             :         PG_slob_free = PG_private,
     167             : 
     168             :         /* Compound pages. Stored in first tail page's flags */
     169             :         PG_double_map = PG_workingset,
     170             : 
     171             :         /* non-lru isolated movable page */
     172             :         PG_isolated = PG_reclaim,
     173             : 
     174             :         /* Only valid for buddy pages. Used to track pages that are reported */
     175             :         PG_reported = PG_uptodate,
     176             : };
     177             : 
     178             : #ifndef __GENERATING_BOUNDS_H
     179             : 
     180             : struct page;    /* forward declaration */
     181             : 
     182    19901558 : static inline struct page *compound_head(struct page *page)
     183             : {
     184    19901558 :         unsigned long head = READ_ONCE(page->compound_head);
     185             : 
     186    11748467 :         if (unlikely(head & 1))
     187     2219363 :                 return (struct page *) (head - 1);
     188             :         return page;
     189             : }
     190             : 
     191     2569831 : static __always_inline int PageTail(struct page *page)
     192             : {
     193     2547225 :         return READ_ONCE(page->compound_head) & 1;
     194             : }
     195             : 
     196     2093068 : static __always_inline int PageCompound(struct page *page)
     197             : {
     198     2093049 :         return test_bit(PG_head, &page->flags) || PageTail(page);
     199             : }
     200             : 
     201             : #define PAGE_POISON_PATTERN     -1l
     202           0 : static inline int PagePoisoned(const struct page *page)
     203             : {
     204           0 :         return page->flags == PAGE_POISON_PATTERN;
     205             : }
     206             : 
     207             : #ifdef CONFIG_DEBUG_VM
     208             : void page_init_poison(struct page *page, size_t size);
     209             : #else
     210             : static inline void page_init_poison(struct page *page, size_t size)
     211             : {
     212             : }
     213             : #endif
     214             : 
     215             : /*
     216             :  * Page flags policies wrt compound pages
     217             :  *
     218             :  * PF_POISONED_CHECK
     219             :  *     check if this struct page poisoned/uninitialized
     220             :  *
     221             :  * PF_ANY:
     222             :  *     the page flag is relevant for small, head and tail pages.
     223             :  *
     224             :  * PF_HEAD:
     225             :  *     for compound page all operations related to the page flag applied to
     226             :  *     head page.
     227             :  *
     228             :  * PF_ONLY_HEAD:
     229             :  *     for compound page, callers only ever operate on the head page.
     230             :  *
     231             :  * PF_NO_TAIL:
     232             :  *     modifications of the page flag must be done on small or head pages,
     233             :  *     checks can be done on tail pages too.
     234             :  *
     235             :  * PF_NO_COMPOUND:
     236             :  *     the page flag is not relevant for compound pages.
     237             :  *
     238             :  * PF_SECOND:
     239             :  *     the page flag is stored in the first tail page.
     240             :  */
     241             : #define PF_POISONED_CHECK(page) ({                                      \
     242             :                 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);            \
     243             :                 page; })
     244             : #define PF_ANY(page, enforce)   PF_POISONED_CHECK(page)
     245             : #define PF_HEAD(page, enforce)  PF_POISONED_CHECK(compound_head(page))
     246             : #define PF_ONLY_HEAD(page, enforce) ({                                  \
     247             :                 VM_BUG_ON_PGFLAGS(PageTail(page), page);                \
     248             :                 PF_POISONED_CHECK(page); })
     249             : #define PF_NO_TAIL(page, enforce) ({                                    \
     250             :                 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);     \
     251             :                 PF_POISONED_CHECK(compound_head(page)); })
     252             : #define PF_NO_COMPOUND(page, enforce) ({                                \
     253             :                 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
     254             :                 PF_POISONED_CHECK(page); })
     255             : #define PF_SECOND(page, enforce) ({                                     \
     256             :                 VM_BUG_ON_PGFLAGS(!PageHead(page), page);               \
     257             :                 PF_POISONED_CHECK(&page[1]); })
     258             : 
     259             : /*
     260             :  * Macros to create function definitions for page flags
     261             :  */
     262             : #define TESTPAGEFLAG(uname, lname, policy)                              \
     263             : static __always_inline int Page##uname(struct page *page)               \
     264             :         { return test_bit(PG_##lname, &policy(page, 0)->flags); }
     265             : 
     266             : #define SETPAGEFLAG(uname, lname, policy)                               \
     267             : static __always_inline void SetPage##uname(struct page *page)           \
     268             :         { set_bit(PG_##lname, &policy(page, 1)->flags); }
     269             : 
     270             : #define CLEARPAGEFLAG(uname, lname, policy)                             \
     271             : static __always_inline void ClearPage##uname(struct page *page)         \
     272             :         { clear_bit(PG_##lname, &policy(page, 1)->flags); }
     273             : 
     274             : #define __SETPAGEFLAG(uname, lname, policy)                             \
     275             : static __always_inline void __SetPage##uname(struct page *page)         \
     276             :         { __set_bit(PG_##lname, &policy(page, 1)->flags); }
     277             : 
     278             : #define __CLEARPAGEFLAG(uname, lname, policy)                           \
     279             : static __always_inline void __ClearPage##uname(struct page *page)       \
     280             :         { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
     281             : 
     282             : #define TESTSETFLAG(uname, lname, policy)                               \
     283             : static __always_inline int TestSetPage##uname(struct page *page)        \
     284             :         { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
     285             : 
     286             : #define TESTCLEARFLAG(uname, lname, policy)                             \
     287             : static __always_inline int TestClearPage##uname(struct page *page)      \
     288             :         { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
     289             : 
     290             : #define PAGEFLAG(uname, lname, policy)                                  \
     291             :         TESTPAGEFLAG(uname, lname, policy)                              \
     292             :         SETPAGEFLAG(uname, lname, policy)                               \
     293             :         CLEARPAGEFLAG(uname, lname, policy)
     294             : 
     295             : #define __PAGEFLAG(uname, lname, policy)                                \
     296             :         TESTPAGEFLAG(uname, lname, policy)                              \
     297             :         __SETPAGEFLAG(uname, lname, policy)                             \
     298             :         __CLEARPAGEFLAG(uname, lname, policy)
     299             : 
     300             : #define TESTSCFLAG(uname, lname, policy)                                \
     301             :         TESTSETFLAG(uname, lname, policy)                               \
     302             :         TESTCLEARFLAG(uname, lname, policy)
     303             : 
     304             : #define TESTPAGEFLAG_FALSE(uname)                                       \
     305             : static inline int Page##uname(const struct page *page) { return 0; }
     306             : 
     307             : #define SETPAGEFLAG_NOOP(uname)                                         \
     308             : static inline void SetPage##uname(struct page *page) {  }
     309             : 
     310             : #define CLEARPAGEFLAG_NOOP(uname)                                       \
     311             : static inline void ClearPage##uname(struct page *page) {  }
     312             : 
     313             : #define __CLEARPAGEFLAG_NOOP(uname)                                     \
     314             : static inline void __ClearPage##uname(struct page *page) {  }
     315             : 
     316             : #define TESTSETFLAG_FALSE(uname)                                        \
     317             : static inline int TestSetPage##uname(struct page *page) { return 0; }
     318             : 
     319             : #define TESTCLEARFLAG_FALSE(uname)                                      \
     320             : static inline int TestClearPage##uname(struct page *page) { return 0; }
     321             : 
     322             : #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname)                 \
     323             :         SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
     324             : 
     325             : #define TESTSCFLAG_FALSE(uname)                                         \
     326             :         TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
     327             : 
     328       53497 : __PAGEFLAG(Locked, locked, PF_NO_TAIL)
     329      161957 : PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
     330       21545 : PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
     331      803935 : PAGEFLAG(Referenced, referenced, PF_HEAD)
     332           0 :         TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
     333        2694 :         __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
     334       16812 : PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
     335             :         __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
     336      545626 : PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
     337       17394 :         TESTCLEARFLAG(LRU, lru, PF_HEAD)
     338     2071475 : PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
     339           0 :         TESTCLEARFLAG(Active, active, PF_HEAD)
     340       12588 : PAGEFLAG(Workingset, workingset, PF_HEAD)
     341             :         TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
     342     1666758 : __PAGEFLAG(Slab, slab, PF_NO_TAIL)
     343             : __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
     344         496 : PAGEFLAG(Checked, checked, PF_NO_COMPOUND)         /* Used by some filesystems */
     345             : 
     346             : /* Xen */
     347             : PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
     348             :         TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
     349             : PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
     350             : PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
     351             : PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
     352             :         TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
     353             : 
     354        1078 : PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
     355      405384 :         __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
     356      381164 :         __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
     357     1745627 : PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
     358             :         __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
     359      140248 :         __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
     360             : 
     361             : /*
     362             :  * Private page markings that may be used by the filesystem that owns the page
     363             :  * for its own purposes.
     364             :  * - PG_private and PG_private_2 cause releasepage() and co to be invoked
     365             :  */
     366       65696 : PAGEFLAG(Private, private, PF_ANY)
     367             : PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
     368             : PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
     369             :         TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
     370             : 
     371             : /*
     372             :  * Only test-and-set exist for PG_writeback.  The unconditional operators are
     373             :  * risky: they bypass page accounting.
     374             :  */
     375        7901 : TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
     376        2568 :         TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
     377       23525 : PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
     378             : 
     379             : /* PG_readahead is only used for reads; PG_reclaim is only for writes */
     380        3061 : PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
     381             :         TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
     382        1817 : PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
     383             :         TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
     384             : 
     385             : #ifdef CONFIG_HIGHMEM
     386             : /*
     387             :  * Must use a macro here due to header dependency issues. page_zone() is not
     388             :  * available at this point.
     389             :  */
     390             : #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
     391             : #else
     392      501999 : PAGEFLAG_FALSE(HighMem)
     393             : #endif
     394             : 
     395             : #ifdef CONFIG_SWAP
     396             : static __always_inline int PageSwapCache(struct page *page)
     397             : {
     398             : #ifdef CONFIG_THP_SWAP
     399             :         page = compound_head(page);
     400             : #endif
     401             :         return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
     402             : 
     403             : }
     404             : SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
     405             : CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
     406             : #else
     407      116307 : PAGEFLAG_FALSE(SwapCache)
     408             : #endif
     409             : 
     410      979296 : PAGEFLAG(Unevictable, unevictable, PF_HEAD)
     411      129996 :         __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
     412       96343 :         TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
     413             : 
     414             : #ifdef CONFIG_MMU
     415      259463 : PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
     416             :         __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
     417          47 :         TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
     418             : #else
     419             : PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
     420             :         TESTSCFLAG_FALSE(Mlocked)
     421             : #endif
     422             : 
     423             : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
     424             : PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
     425             : #else
     426             : PAGEFLAG_FALSE(Uncached)
     427             : #endif
     428             : 
     429             : #ifdef CONFIG_MEMORY_FAILURE
     430             : PAGEFLAG(HWPoison, hwpoison, PF_ANY)
     431             : TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
     432             : #define __PG_HWPOISON (1UL << PG_hwpoison)
     433             : extern bool take_page_off_buddy(struct page *page);
     434             : #else
     435      158924 : PAGEFLAG_FALSE(HWPoison)
     436             : #define __PG_HWPOISON 0
     437             : #endif
     438             : 
     439             : #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
     440             : TESTPAGEFLAG(Young, young, PF_ANY)
     441             : SETPAGEFLAG(Young, young, PF_ANY)
     442             : TESTCLEARFLAG(Young, young, PF_ANY)
     443             : PAGEFLAG(Idle, idle, PF_ANY)
     444             : #endif
     445             : 
     446             : /*
     447             :  * PageReported() is used to track reported free pages within the Buddy
     448             :  * allocator. We can use the non-atomic version of the test and set
     449             :  * operations as both should be shielded with the zone lock to prevent
     450             :  * any possible races on the setting or clearing of the bit.
     451             :  */
     452       97180 : __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
     453             : 
     454             : /*
     455             :  * On an anonymous page mapped into a user virtual memory area,
     456             :  * page->mapping points to its anon_vma, not to a struct address_space;
     457             :  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
     458             :  *
     459             :  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
     460             :  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
     461             :  * bit; and then page->mapping points, not to an anon_vma, but to a private
     462             :  * structure which KSM associates with that merged page.  See ksm.h.
     463             :  *
     464             :  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
     465             :  * page and then page->mapping points a struct address_space.
     466             :  *
     467             :  * Please note that, confusingly, "page_mapping" refers to the inode
     468             :  * address_space which maps the page from disk; whereas "page_mapped"
     469             :  * refers to user virtual address space into which the page is mapped.
     470             :  */
     471             : #define PAGE_MAPPING_ANON       0x1
     472             : #define PAGE_MAPPING_MOVABLE    0x2
     473             : #define PAGE_MAPPING_KSM        (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
     474             : #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
     475             : 
     476      148341 : static __always_inline int PageMappingFlags(struct page *page)
     477             : {
     478      148341 :         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
     479             : }
     480             : 
     481     2906455 : static __always_inline int PageAnon(struct page *page)
     482             : {
     483     2906455 :         page = compound_head(page);
     484     2906465 :         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
     485             : }
     486             : 
     487           0 : static __always_inline int __PageMovable(struct page *page)
     488             : {
     489           0 :         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
     490             :                                 PAGE_MAPPING_MOVABLE;
     491             : }
     492             : 
     493             : #ifdef CONFIG_KSM
     494             : /*
     495             :  * A KSM page is one of those write-protected "shared pages" or "merged pages"
     496             :  * which KSM maps into multiple mms, wherever identical anonymous page content
     497             :  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
     498             :  * anon_vma, but to that page's node of the stable tree.
     499             :  */
     500       50600 : static __always_inline int PageKsm(struct page *page)
     501             : {
     502       50600 :         page = compound_head(page);
     503       50600 :         return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
     504             :                                 PAGE_MAPPING_KSM;
     505             : }
     506             : #else
     507             : TESTPAGEFLAG_FALSE(Ksm)
     508             : #endif
     509             : 
     510             : u64 stable_page_flags(struct page *page);
     511             : 
     512       35254 : static inline int PageUptodate(struct page *page)
     513             : {
     514       35254 :         int ret;
     515       35254 :         page = compound_head(page);
     516       35254 :         ret = test_bit(PG_uptodate, &(page)->flags);
     517             :         /*
     518             :          * Must ensure that the data we read out of the page is loaded
     519             :          * _after_ we've loaded page->flags to check for PageUptodate.
     520             :          * We can skip the barrier if the page is not uptodate, because
     521             :          * we wouldn't be reading anything from it.
     522             :          *
     523             :          * See SetPageUptodate() for the other side of the story.
     524             :          */
     525       35254 :         if (ret)
     526        3855 :                 smp_rmb();
     527             : 
     528       35254 :         return ret;
     529             : }
     530             : 
     531       68316 : static __always_inline void __SetPageUptodate(struct page *page)
     532             : {
     533       68316 :         VM_BUG_ON_PAGE(PageTail(page), page);
     534       68316 :         smp_wmb();
     535      136632 :         __set_bit(PG_uptodate, &page->flags);
     536             : }
     537             : 
     538       24369 : static __always_inline void SetPageUptodate(struct page *page)
     539             : {
     540       24369 :         VM_BUG_ON_PAGE(PageTail(page), page);
     541             :         /*
     542             :          * Memory barrier must be issued before setting the PG_uptodate bit,
     543             :          * so that all previous stores issued in order to bring the page
     544             :          * uptodate are actually visible before PageUptodate becomes true.
     545             :          */
     546       24369 :         smp_wmb();
     547       24369 :         set_bit(PG_uptodate, &page->flags);
     548       24322 : }
     549             : 
     550           0 : CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
     551             : 
     552             : int test_clear_page_writeback(struct page *page);
     553             : int __test_set_page_writeback(struct page *page, bool keep_write);
     554             : 
     555             : #define test_set_page_writeback(page)                   \
     556             :         __test_set_page_writeback(page, false)
     557             : #define test_set_page_writeback_keepwrite(page) \
     558             :         __test_set_page_writeback(page, true)
     559             : 
     560        1386 : static inline void set_page_writeback(struct page *page)
     561             : {
     562        1386 :         test_set_page_writeback(page);
     563        1386 : }
     564             : 
     565           0 : static inline void set_page_writeback_keepwrite(struct page *page)
     566             : {
     567           0 :         test_set_page_writeback_keepwrite(page);
     568           0 : }
     569             : 
     570      610552 : __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
     571             : 
     572       85991 : static __always_inline void set_compound_head(struct page *page, struct page *head)
     573             : {
     574       85991 :         WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
     575             : }
     576             : 
     577       68885 : static __always_inline void clear_compound_head(struct page *page)
     578             : {
     579       68885 :         WRITE_ONCE(page->compound_head, 0);
     580             : }
     581             : 
     582             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     583           0 : static inline void ClearPageCompound(struct page *page)
     584             : {
     585           0 :         BUG_ON(!PageHead(page));
     586           0 :         ClearPageHead(page);
     587           0 : }
     588             : #endif
     589             : 
     590             : #define PG_head_mask ((1UL << PG_head))
     591             : 
     592             : #ifdef CONFIG_HUGETLB_PAGE
     593             : int PageHuge(struct page *page);
     594             : int PageHeadHuge(struct page *page);
     595             : #else
     596      776511 : TESTPAGEFLAG_FALSE(Huge)
     597         136 : TESTPAGEFLAG_FALSE(HeadHuge)
     598             : #endif
     599             : 
     600             : 
     601             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     602             : /*
     603             :  * PageHuge() only returns true for hugetlbfs pages, but not for
     604             :  * normal or transparent huge pages.
     605             :  *
     606             :  * PageTransHuge() returns true for both transparent huge and
     607             :  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
     608             :  * called only in the core VM paths where hugetlbfs pages can't exist.
     609             :  */
     610        6045 : static inline int PageTransHuge(struct page *page)
     611             : {
     612        6045 :         VM_BUG_ON_PAGE(PageTail(page), page);
     613        6045 :         return PageHead(page);
     614             : }
     615             : 
     616             : /*
     617             :  * PageTransCompound returns true for both transparent huge pages
     618             :  * and hugetlbfs pages, so it should only be called when it's known
     619             :  * that hugetlbfs pages aren't involved.
     620             :  */
     621      935505 : static inline int PageTransCompound(struct page *page)
     622             : {
     623      935505 :         return PageCompound(page);
     624             : }
     625             : 
     626             : /*
     627             :  * PageTransCompoundMap is the same as PageTransCompound, but it also
     628             :  * guarantees the primary MMU has the entire compound page mapped
     629             :  * through pmd_trans_huge, which in turn guarantees the secondary MMUs
     630             :  * can also map the entire compound page. This allows the secondary
     631             :  * MMUs to call get_user_pages() only once for each compound page and
     632             :  * to immediately map the entire compound page with a single secondary
     633             :  * MMU fault. If there will be a pmd split later, the secondary MMUs
     634             :  * will get an update through the MMU notifier invalidation through
     635             :  * split_huge_pmd().
     636             :  *
     637             :  * Unlike PageTransCompound, this is safe to be called only while
     638             :  * split_huge_pmd() cannot run from under us, like if protected by the
     639             :  * MMU notifier, otherwise it may result in page->_mapcount check false
     640             :  * positives.
     641             :  *
     642             :  * We have to treat page cache THP differently since every subpage of it
     643             :  * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
     644             :  * mapped in the current process so comparing subpage's _mapcount to
     645             :  * compound_mapcount to filter out PTE mapped case.
     646             :  */
     647             : static inline int PageTransCompoundMap(struct page *page)
     648             : {
     649             :         struct page *head;
     650             : 
     651             :         if (!PageTransCompound(page))
     652             :                 return 0;
     653             : 
     654             :         if (PageAnon(page))
     655             :                 return atomic_read(&page->_mapcount) < 0;
     656             : 
     657             :         head = compound_head(page);
     658             :         /* File THP is PMD mapped and not PTE mapped */
     659             :         return atomic_read(&page->_mapcount) ==
     660             :                atomic_read(compound_mapcount_ptr(head));
     661             : }
     662             : 
     663             : /*
     664             :  * PageTransTail returns true for both transparent huge pages
     665             :  * and hugetlbfs pages, so it should only be called when it's known
     666             :  * that hugetlbfs pages aren't involved.
     667             :  */
     668         138 : static inline int PageTransTail(struct page *page)
     669             : {
     670         138 :         return PageTail(page);
     671             : }
     672             : 
     673             : /*
     674             :  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
     675             :  * as PMDs.
     676             :  *
     677             :  * This is required for optimization of rmap operations for THP: we can postpone
     678             :  * per small page mapcount accounting (and its overhead from atomic operations)
     679             :  * until the first PMD split.
     680             :  *
     681             :  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
     682             :  * by one. This reference will go away with last compound_mapcount.
     683             :  *
     684             :  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
     685             :  */
     686       13494 : PAGEFLAG(DoubleMap, double_map, PF_SECOND)
     687          19 :         TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
     688             : #else
     689             : TESTPAGEFLAG_FALSE(TransHuge)
     690             : TESTPAGEFLAG_FALSE(TransCompound)
     691             : TESTPAGEFLAG_FALSE(TransCompoundMap)
     692             : TESTPAGEFLAG_FALSE(TransTail)
     693             : PAGEFLAG_FALSE(DoubleMap)
     694             :         TESTSCFLAG_FALSE(DoubleMap)
     695             : #endif
     696             : 
     697             : /*
     698             :  * For pages that are never mapped to userspace (and aren't PageSlab),
     699             :  * page_type may be used.  Because it is initialised to -1, we invert the
     700             :  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
     701             :  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
     702             :  * low bits so that an underflow or overflow of page_mapcount() won't be
     703             :  * mistaken for a page type value.
     704             :  */
     705             : 
     706             : #define PAGE_TYPE_BASE  0xf0000000
     707             : /* Reserve              0x0000007f to catch underflows of page_mapcount */
     708             : #define PAGE_MAPCOUNT_RESERVE   -128
     709             : #define PG_buddy        0x00000080
     710             : #define PG_offline      0x00000100
     711             : #define PG_table        0x00000200
     712             : #define PG_guard        0x00000400
     713             : 
     714             : #define PageType(page, flag)                                            \
     715             :         ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
     716             : 
     717           0 : static inline int page_has_type(struct page *page)
     718             : {
     719           0 :         return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
     720             : }
     721             : 
     722             : #define PAGE_TYPE_OPS(uname, lname)                                     \
     723             : static __always_inline int Page##uname(struct page *page)               \
     724             : {                                                                       \
     725             :         return PageType(page, PG_##lname);                              \
     726             : }                                                                       \
     727             : static __always_inline void __SetPage##uname(struct page *page)         \
     728             : {                                                                       \
     729             :         VM_BUG_ON_PAGE(!PageType(page, 0), page);                       \
     730             :         page->page_type &= ~PG_##lname;                                  \
     731             : }                                                                       \
     732             : static __always_inline void __ClearPage##uname(struct page *page)       \
     733             : {                                                                       \
     734             :         VM_BUG_ON_PAGE(!Page##uname(page), page);                       \
     735             :         page->page_type |= PG_##lname;                                       \
     736             : }
     737             : 
     738             : /*
     739             :  * PageBuddy() indicates that the page is free and in the buddy system
     740             :  * (see mm/page_alloc.c).
     741             :  */
     742      283243 : PAGE_TYPE_OPS(Buddy, buddy)
     743             : 
     744             : /*
     745             :  * PageOffline() indicates that the page is logically offline although the
     746             :  * containing section is online. (e.g. inflated in a balloon driver or
     747             :  * not onlined when onlining the section).
     748             :  * The content of these pages is effectively stale. Such pages should not
     749             :  * be touched (read/write/dump/save) except by their owner.
     750             :  *
     751             :  * If a driver wants to allow to offline unmovable PageOffline() pages without
     752             :  * putting them back to the buddy, it can do so via the memory notifier by
     753             :  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
     754             :  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
     755             :  * pages (now with a reference count of zero) are treated like free pages,
     756             :  * allowing the containing memory block to get offlined. A driver that
     757             :  * relies on this feature is aware that re-onlining the memory block will
     758             :  * require to re-set the pages PageOffline() and not giving them to the
     759             :  * buddy via online_page_callback_t.
     760             :  */
     761           0 : PAGE_TYPE_OPS(Offline, offline)
     762             : 
     763             : /*
     764             :  * Marks pages in use as page tables.
     765             :  */
     766      118699 : PAGE_TYPE_OPS(Table, table)
     767             : 
     768             : /*
     769             :  * Marks guardpages used with debug_pagealloc.
     770             :  */
     771             : PAGE_TYPE_OPS(Guard, guard)
     772             : 
     773             : extern bool is_free_buddy_page(struct page *page);
     774             : 
     775           0 : __PAGEFLAG(Isolated, isolated, PF_ANY);
     776             : 
     777             : /*
     778             :  * If network-based swap is enabled, sl*b must keep track of whether pages
     779             :  * were allocated from pfmemalloc reserves.
     780             :  */
     781      646409 : static inline int PageSlabPfmemalloc(struct page *page)
     782             : {
     783     1292839 :         VM_BUG_ON_PAGE(!PageSlab(page), page);
     784      646430 :         return PageActive(page);
     785             : }
     786             : 
     787           0 : static inline void SetPageSlabPfmemalloc(struct page *page)
     788             : {
     789           0 :         VM_BUG_ON_PAGE(!PageSlab(page), page);
     790           0 :         SetPageActive(page);
     791           0 : }
     792             : 
     793       17351 : static inline void __ClearPageSlabPfmemalloc(struct page *page)
     794             : {
     795       34702 :         VM_BUG_ON_PAGE(!PageSlab(page), page);
     796       17351 :         __ClearPageActive(page);
     797       17351 : }
     798             : 
     799             : static inline void ClearPageSlabPfmemalloc(struct page *page)
     800             : {
     801             :         VM_BUG_ON_PAGE(!PageSlab(page), page);
     802             :         ClearPageActive(page);
     803             : }
     804             : 
     805             : #ifdef CONFIG_MMU
     806             : #define __PG_MLOCKED            (1UL << PG_mlocked)
     807             : #else
     808             : #define __PG_MLOCKED            0
     809             : #endif
     810             : 
     811             : /*
     812             :  * Flags checked when a page is freed.  Pages being freed should not have
     813             :  * these flags set.  If they are, there is a problem.
     814             :  */
     815             : #define PAGE_FLAGS_CHECK_AT_FREE                                \
     816             :         (1UL << PG_lru            | 1UL << PG_locked        |       \
     817             :          1UL << PG_private        | 1UL << PG_private_2     |       \
     818             :          1UL << PG_writeback      | 1UL << PG_reserved      |       \
     819             :          1UL << PG_slab           | 1UL << PG_active        |       \
     820             :          1UL << PG_unevictable    | __PG_MLOCKED)
     821             : 
     822             : /*
     823             :  * Flags checked when a page is prepped for return by the page allocator.
     824             :  * Pages being prepped should not have these flags set.  If they are set,
     825             :  * there has been a kernel bug or struct page corruption.
     826             :  *
     827             :  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
     828             :  * alloc-free cycle to prevent from reusing the page.
     829             :  */
     830             : #define PAGE_FLAGS_CHECK_AT_PREP        \
     831             :         (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
     832             : 
     833             : #define PAGE_FLAGS_PRIVATE                              \
     834             :         (1UL << PG_private | 1UL << PG_private_2)
     835             : /**
     836             :  * page_has_private - Determine if page has private stuff
     837             :  * @page: The page to be checked
     838             :  *
     839             :  * Determine if a page has private stuff, indicating that release routines
     840             :  * should be invoked upon it.
     841             :  */
     842        1981 : static inline int page_has_private(struct page *page)
     843             : {
     844        1981 :         return !!(page->flags & PAGE_FLAGS_PRIVATE);
     845             : }
     846             : 
     847             : #undef PF_ANY
     848             : #undef PF_HEAD
     849             : #undef PF_ONLY_HEAD
     850             : #undef PF_NO_TAIL
     851             : #undef PF_NO_COMPOUND
     852             : #undef PF_SECOND
     853             : #endif /* !__GENERATING_BOUNDS_H */
     854             : 
     855             : #endif  /* PAGE_FLAGS_H */

Generated by: LCOV version 1.14