LCOV - code coverage report
Current view: top level - include/linux - mm_inline.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 28 33 84.8 %
Date: 2021-04-22 12:43:58 Functions: 1 1 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef LINUX_MM_INLINE_H
       3             : #define LINUX_MM_INLINE_H
       4             : 
       5             : #include <linux/huge_mm.h>
       6             : #include <linux/swap.h>
       7             : 
       8             : /**
       9             :  * page_is_file_lru - should the page be on a file LRU or anon LRU?
      10             :  * @page: the page to test
      11             :  *
      12             :  * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
      13             :  * freed anonymous page (e.g. via MADV_FREE).  Returns 0 if @page is a normal
      14             :  * anonymous page, a tmpfs page or otherwise ram or swap backed page.  Used by
      15             :  * functions that manipulate the LRU lists, to sort a page onto the right LRU
      16             :  * list.
      17             :  *
      18             :  * We would like to get this info without a page flag, but the state
      19             :  * needs to survive until the page is last deleted from the LRU, which
      20             :  * could be as far down as __page_cache_release.
      21             :  */
      22      195610 : static inline int page_is_file_lru(struct page *page)
      23             : {
      24      195610 :         return !PageSwapBacked(page);
      25             : }
      26             : 
      27      195622 : static __always_inline void update_lru_size(struct lruvec *lruvec,
      28             :                                 enum lru_list lru, enum zone_type zid,
      29             :                                 int nr_pages)
      30             : {
      31      113478 :         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
      32             : 
      33      195622 :         __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
      34      195622 :         __mod_zone_page_state(&pgdat->node_zones[zid],
      35      195622 :                                 NR_ZONE_LRU_BASE + lru, nr_pages);
      36             : #ifdef CONFIG_MEMCG
      37             :         mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
      38             : #endif
      39           0 : }
      40             : 
      41             : /**
      42             :  * __clear_page_lru_flags - clear page lru flags before releasing a page
      43             :  * @page: the page that was on lru and now has a zero reference
      44             :  */
      45       64998 : static __always_inline void __clear_page_lru_flags(struct page *page)
      46             : {
      47      129996 :         VM_BUG_ON_PAGE(!PageLRU(page), page);
      48             : 
      49       64998 :         __ClearPageLRU(page);
      50             : 
      51             :         /* this shouldn't happen, so leave the flags to bad_page() */
      52      132729 :         if (PageActive(page) && PageUnevictable(page))
      53             :                 return;
      54             : 
      55       64998 :         __ClearPageActive(page);
      56      129996 :         __ClearPageUnevictable(page);
      57             : }
      58             : 
      59             : /**
      60             :  * page_lru - which LRU list should a page be on?
      61             :  * @page: the page to test
      62             :  *
      63             :  * Returns the LRU list a page should be on, as an index
      64             :  * into the array of LRU lists.
      65             :  */
      66      195622 : static __always_inline enum lru_list page_lru(struct page *page)
      67             : {
      68      195622 :         enum lru_list lru;
      69             : 
      70      303870 :         VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
      71             : 
      72      391244 :         if (PageUnevictable(page))
      73             :                 return LRU_UNEVICTABLE;
      74             : 
      75      195590 :         lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
      76      391180 :         if (PageActive(page))
      77       26088 :                 lru += LRU_ACTIVE;
      78             : 
      79             :         return lru;
      80             : }
      81             : 
      82      113462 : static __always_inline void add_page_to_lru_list(struct page *page,
      83             :                                 struct lruvec *lruvec)
      84             : {
      85      113462 :         enum lru_list lru = page_lru(page);
      86             : 
      87      226924 :         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
      88      113462 :         list_add(&page->lru, &lruvec->lists[lru]);
      89             : }
      90             : 
      91           0 : static __always_inline void add_page_to_lru_list_tail(struct page *page,
      92             :                                 struct lruvec *lruvec)
      93             : {
      94           0 :         enum lru_list lru = page_lru(page);
      95             : 
      96           0 :         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
      97           0 :         list_add_tail(&page->lru, &lruvec->lists[lru]);
      98             : }
      99             : 
     100       82160 : static __always_inline void del_page_from_lru_list(struct page *page,
     101             :                                 struct lruvec *lruvec)
     102             : {
     103       82160 :         list_del(&page->lru);
     104      246480 :         update_lru_size(lruvec, page_lru(page), page_zonenum(page),
     105       82160 :                         -thp_nr_pages(page));
     106             : }
     107             : #endif

Generated by: LCOV version 1.14