LCOV - code coverage report
Current view: top level - include/linux - memcontrol.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 89 147 60.5 %
Date: 2021-04-22 12:43:58 Functions: 6 8 75.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* memcontrol.h - Memory Controller
       3             :  *
       4             :  * Copyright IBM Corporation, 2007
       5             :  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
       6             :  *
       7             :  * Copyright 2007 OpenVZ SWsoft Inc
       8             :  * Author: Pavel Emelianov <xemul@openvz.org>
       9             :  */
      10             : 
      11             : #ifndef _LINUX_MEMCONTROL_H
      12             : #define _LINUX_MEMCONTROL_H
      13             : #include <linux/cgroup.h>
      14             : #include <linux/vm_event_item.h>
      15             : #include <linux/hardirq.h>
      16             : #include <linux/jump_label.h>
      17             : #include <linux/page_counter.h>
      18             : #include <linux/vmpressure.h>
      19             : #include <linux/eventfd.h>
      20             : #include <linux/mm.h>
      21             : #include <linux/vmstat.h>
      22             : #include <linux/writeback.h>
      23             : #include <linux/page-flags.h>
      24             : 
      25             : struct mem_cgroup;
      26             : struct obj_cgroup;
      27             : struct page;
      28             : struct mm_struct;
      29             : struct kmem_cache;
      30             : 
      31             : /* Cgroup-specific page state, on top of universal node page state */
      32             : enum memcg_stat_item {
      33             :         MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
      34             :         MEMCG_SOCK,
      35             :         MEMCG_PERCPU_B,
      36             :         MEMCG_NR_STAT,
      37             : };
      38             : 
      39             : enum memcg_memory_event {
      40             :         MEMCG_LOW,
      41             :         MEMCG_HIGH,
      42             :         MEMCG_MAX,
      43             :         MEMCG_OOM,
      44             :         MEMCG_OOM_KILL,
      45             :         MEMCG_SWAP_HIGH,
      46             :         MEMCG_SWAP_MAX,
      47             :         MEMCG_SWAP_FAIL,
      48             :         MEMCG_NR_MEMORY_EVENTS,
      49             : };
      50             : 
      51             : struct mem_cgroup_reclaim_cookie {
      52             :         pg_data_t *pgdat;
      53             :         unsigned int generation;
      54             : };
      55             : 
      56             : #ifdef CONFIG_MEMCG
      57             : 
      58             : #define MEM_CGROUP_ID_SHIFT     16
      59             : #define MEM_CGROUP_ID_MAX       USHRT_MAX
      60             : 
      61             : struct mem_cgroup_id {
      62             :         int id;
      63             :         refcount_t ref;
      64             : };
      65             : 
      66             : /*
      67             :  * Per memcg event counter is incremented at every pagein/pageout. With THP,
      68             :  * it will be incremented by the number of pages. This counter is used
      69             :  * to trigger some periodic events. This is straightforward and better
      70             :  * than using jiffies etc. to handle periodic memcg event.
      71             :  */
      72             : enum mem_cgroup_events_target {
      73             :         MEM_CGROUP_TARGET_THRESH,
      74             :         MEM_CGROUP_TARGET_SOFTLIMIT,
      75             :         MEM_CGROUP_NTARGETS,
      76             : };
      77             : 
      78             : struct memcg_vmstats_percpu {
      79             :         long stat[MEMCG_NR_STAT];
      80             :         unsigned long events[NR_VM_EVENT_ITEMS];
      81             :         unsigned long nr_page_events;
      82             :         unsigned long targets[MEM_CGROUP_NTARGETS];
      83             : };
      84             : 
      85             : struct mem_cgroup_reclaim_iter {
      86             :         struct mem_cgroup *position;
      87             :         /* scan generation, increased every round-trip */
      88             :         unsigned int generation;
      89             : };
      90             : 
      91             : struct lruvec_stat {
      92             :         long count[NR_VM_NODE_STAT_ITEMS];
      93             : };
      94             : 
      95             : struct batched_lruvec_stat {
      96             :         s32 count[NR_VM_NODE_STAT_ITEMS];
      97             : };
      98             : 
      99             : /*
     100             :  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
     101             :  * which have elements charged to this memcg.
     102             :  */
     103             : struct memcg_shrinker_map {
     104             :         struct rcu_head rcu;
     105             :         unsigned long map[];
     106             : };
     107             : 
     108             : /*
     109             :  * per-node information in memory controller.
     110             :  */
     111             : struct mem_cgroup_per_node {
     112             :         struct lruvec           lruvec;
     113             : 
     114             :         /*
     115             :          * Legacy local VM stats. This should be struct lruvec_stat and
     116             :          * cannot be optimized to struct batched_lruvec_stat. Because
     117             :          * the threshold of the lruvec_stat_cpu can be as big as
     118             :          * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
     119             :          * filed has no upper limit.
     120             :          */
     121             :         struct lruvec_stat __percpu *lruvec_stat_local;
     122             : 
     123             :         /* Subtree VM stats (batched updates) */
     124             :         struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
     125             :         atomic_long_t           lruvec_stat[NR_VM_NODE_STAT_ITEMS];
     126             : 
     127             :         unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
     128             : 
     129             :         struct mem_cgroup_reclaim_iter  iter;
     130             : 
     131             :         struct memcg_shrinker_map __rcu *shrinker_map;
     132             : 
     133             :         struct rb_node          tree_node;      /* RB tree node */
     134             :         unsigned long           usage_in_excess;/* Set to the value by which */
     135             :                                                 /* the soft limit is exceeded*/
     136             :         bool                    on_tree;
     137             :         struct mem_cgroup       *memcg;         /* Back pointer, we cannot */
     138             :                                                 /* use container_of        */
     139             : };
     140             : 
     141             : struct mem_cgroup_threshold {
     142             :         struct eventfd_ctx *eventfd;
     143             :         unsigned long threshold;
     144             : };
     145             : 
     146             : /* For threshold */
     147             : struct mem_cgroup_threshold_ary {
     148             :         /* An array index points to threshold just below or equal to usage. */
     149             :         int current_threshold;
     150             :         /* Size of entries[] */
     151             :         unsigned int size;
     152             :         /* Array of thresholds */
     153             :         struct mem_cgroup_threshold entries[];
     154             : };
     155             : 
     156             : struct mem_cgroup_thresholds {
     157             :         /* Primary thresholds array */
     158             :         struct mem_cgroup_threshold_ary *primary;
     159             :         /*
     160             :          * Spare threshold array.
     161             :          * This is needed to make mem_cgroup_unregister_event() "never fail".
     162             :          * It must be able to store at least primary->size - 1 entries.
     163             :          */
     164             :         struct mem_cgroup_threshold_ary *spare;
     165             : };
     166             : 
     167             : enum memcg_kmem_state {
     168             :         KMEM_NONE,
     169             :         KMEM_ALLOCATED,
     170             :         KMEM_ONLINE,
     171             : };
     172             : 
     173             : #if defined(CONFIG_SMP)
     174             : struct memcg_padding {
     175             :         char x[0];
     176             : } ____cacheline_internodealigned_in_smp;
     177             : #define MEMCG_PADDING(name)      struct memcg_padding name;
     178             : #else
     179             : #define MEMCG_PADDING(name)
     180             : #endif
     181             : 
     182             : /*
     183             :  * Remember four most recent foreign writebacks with dirty pages in this
     184             :  * cgroup.  Inode sharing is expected to be uncommon and, even if we miss
     185             :  * one in a given round, we're likely to catch it later if it keeps
     186             :  * foreign-dirtying, so a fairly low count should be enough.
     187             :  *
     188             :  * See mem_cgroup_track_foreign_dirty_slowpath() for details.
     189             :  */
     190             : #define MEMCG_CGWB_FRN_CNT      4
     191             : 
     192             : struct memcg_cgwb_frn {
     193             :         u64 bdi_id;                     /* bdi->id of the foreign inode */
     194             :         int memcg_id;                   /* memcg->css.id of foreign inode */
     195             :         u64 at;                         /* jiffies_64 at the time of dirtying */
     196             :         struct wb_completion done;      /* tracks in-flight foreign writebacks */
     197             : };
     198             : 
     199             : /*
     200             :  * Bucket for arbitrarily byte-sized objects charged to a memory
     201             :  * cgroup. The bucket can be reparented in one piece when the cgroup
     202             :  * is destroyed, without having to round up the individual references
     203             :  * of all live memory objects in the wild.
     204             :  */
     205             : struct obj_cgroup {
     206             :         struct percpu_ref refcnt;
     207             :         struct mem_cgroup *memcg;
     208             :         atomic_t nr_charged_bytes;
     209             :         union {
     210             :                 struct list_head list;
     211             :                 struct rcu_head rcu;
     212             :         };
     213             : };
     214             : 
     215             : /*
     216             :  * The memory controller data structure. The memory controller controls both
     217             :  * page cache and RSS per cgroup. We would eventually like to provide
     218             :  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
     219             :  * to help the administrator determine what knobs to tune.
     220             :  */
     221             : struct mem_cgroup {
     222             :         struct cgroup_subsys_state css;
     223             : 
     224             :         /* Private memcg ID. Used to ID objects that outlive the cgroup */
     225             :         struct mem_cgroup_id id;
     226             : 
     227             :         /* Accounted resources */
     228             :         struct page_counter memory;             /* Both v1 & v2 */
     229             : 
     230             :         union {
     231             :                 struct page_counter swap;       /* v2 only */
     232             :                 struct page_counter memsw;      /* v1 only */
     233             :         };
     234             : 
     235             :         /* Legacy consumer-oriented counters */
     236             :         struct page_counter kmem;               /* v1 only */
     237             :         struct page_counter tcpmem;             /* v1 only */
     238             : 
     239             :         /* Range enforcement for interrupt charges */
     240             :         struct work_struct high_work;
     241             : 
     242             :         unsigned long soft_limit;
     243             : 
     244             :         /* vmpressure notifications */
     245             :         struct vmpressure vmpressure;
     246             : 
     247             :         /*
     248             :          * Should the OOM killer kill all belonging tasks, had it kill one?
     249             :          */
     250             :         bool oom_group;
     251             : 
     252             :         /* protected by memcg_oom_lock */
     253             :         bool            oom_lock;
     254             :         int             under_oom;
     255             : 
     256             :         int     swappiness;
     257             :         /* OOM-Killer disable */
     258             :         int             oom_kill_disable;
     259             : 
     260             :         /* memory.events and memory.events.local */
     261             :         struct cgroup_file events_file;
     262             :         struct cgroup_file events_local_file;
     263             : 
     264             :         /* handle for "memory.swap.events" */
     265             :         struct cgroup_file swap_events_file;
     266             : 
     267             :         /* protect arrays of thresholds */
     268             :         struct mutex thresholds_lock;
     269             : 
     270             :         /* thresholds for memory usage. RCU-protected */
     271             :         struct mem_cgroup_thresholds thresholds;
     272             : 
     273             :         /* thresholds for mem+swap usage. RCU-protected */
     274             :         struct mem_cgroup_thresholds memsw_thresholds;
     275             : 
     276             :         /* For oom notifier event fd */
     277             :         struct list_head oom_notify;
     278             : 
     279             :         /*
     280             :          * Should we move charges of a task when a task is moved into this
     281             :          * mem_cgroup ? And what type of charges should we move ?
     282             :          */
     283             :         unsigned long move_charge_at_immigrate;
     284             :         /* taken only while moving_account > 0 */
     285             :         spinlock_t              move_lock;
     286             :         unsigned long           move_lock_flags;
     287             : 
     288             :         MEMCG_PADDING(_pad1_);
     289             : 
     290             :         atomic_long_t           vmstats[MEMCG_NR_STAT];
     291             :         atomic_long_t           vmevents[NR_VM_EVENT_ITEMS];
     292             : 
     293             :         /* memory.events */
     294             :         atomic_long_t           memory_events[MEMCG_NR_MEMORY_EVENTS];
     295             :         atomic_long_t           memory_events_local[MEMCG_NR_MEMORY_EVENTS];
     296             : 
     297             :         unsigned long           socket_pressure;
     298             : 
     299             :         /* Legacy tcp memory accounting */
     300             :         bool                    tcpmem_active;
     301             :         int                     tcpmem_pressure;
     302             : 
     303             : #ifdef CONFIG_MEMCG_KMEM
     304             :         int kmemcg_id;
     305             :         enum memcg_kmem_state kmem_state;
     306             :         struct obj_cgroup __rcu *objcg;
     307             :         struct list_head objcg_list; /* list of inherited objcgs */
     308             : #endif
     309             : 
     310             :         MEMCG_PADDING(_pad2_);
     311             : 
     312             :         /*
     313             :          * set > 0 if pages under this cgroup are moving to other cgroup.
     314             :          */
     315             :         atomic_t                moving_account;
     316             :         struct task_struct      *move_lock_task;
     317             : 
     318             :         /* Legacy local VM stats and events */
     319             :         struct memcg_vmstats_percpu __percpu *vmstats_local;
     320             : 
     321             :         /* Subtree VM stats and events (batched updates) */
     322             :         struct memcg_vmstats_percpu __percpu *vmstats_percpu;
     323             : 
     324             : #ifdef CONFIG_CGROUP_WRITEBACK
     325             :         struct list_head cgwb_list;
     326             :         struct wb_domain cgwb_domain;
     327             :         struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
     328             : #endif
     329             : 
     330             :         /* List of events which userspace want to receive */
     331             :         struct list_head event_list;
     332             :         spinlock_t event_list_lock;
     333             : 
     334             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     335             :         struct deferred_split deferred_split_queue;
     336             : #endif
     337             : 
     338             :         struct mem_cgroup_per_node *nodeinfo[0];
     339             :         /* WARNING: nodeinfo must be the last member here */
     340             : };
     341             : 
     342             : /*
     343             :  * size of first charge trial. "32" comes from vmscan.c's magic value.
     344             :  * TODO: maybe necessary to use big numbers in big irons.
     345             :  */
     346             : #define MEMCG_CHARGE_BATCH 32U
     347             : 
     348             : extern struct mem_cgroup *root_mem_cgroup;
     349             : 
     350             : enum page_memcg_data_flags {
     351             :         /* page->memcg_data is a pointer to an objcgs vector */
     352             :         MEMCG_DATA_OBJCGS = (1UL << 0),
     353             :         /* page has been accounted as a non-slab kernel page */
     354             :         MEMCG_DATA_KMEM = (1UL << 1),
     355             :         /* the next bit after the last actual flag */
     356             :         __NR_MEMCG_DATA_FLAGS  = (1UL << 2),
     357             : };
     358             : 
     359             : #define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
     360             : 
     361             : /*
     362             :  * page_memcg - get the memory cgroup associated with a page
     363             :  * @page: a pointer to the page struct
     364             :  *
     365             :  * Returns a pointer to the memory cgroup associated with the page,
     366             :  * or NULL. This function assumes that the page is known to have a
     367             :  * proper memory cgroup pointer. It's not safe to call this function
     368             :  * against some type of pages, e.g. slab pages or ex-slab pages.
     369             :  *
     370             :  * Any of the following ensures page and memcg binding stability:
     371             :  * - the page lock
     372             :  * - LRU isolation
     373             :  * - lock_page_memcg()
     374             :  * - exclusive reference
     375             :  */
     376             : static inline struct mem_cgroup *page_memcg(struct page *page)
     377             : {
     378             :         unsigned long memcg_data = page->memcg_data;
     379             : 
     380             :         VM_BUG_ON_PAGE(PageSlab(page), page);
     381             :         VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
     382             : 
     383             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     384             : }
     385             : 
     386             : /*
     387             :  * page_memcg_rcu - locklessly get the memory cgroup associated with a page
     388             :  * @page: a pointer to the page struct
     389             :  *
     390             :  * Returns a pointer to the memory cgroup associated with the page,
     391             :  * or NULL. This function assumes that the page is known to have a
     392             :  * proper memory cgroup pointer. It's not safe to call this function
     393             :  * against some type of pages, e.g. slab pages or ex-slab pages.
     394             :  */
     395             : static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
     396             : {
     397             :         VM_BUG_ON_PAGE(PageSlab(page), page);
     398             :         WARN_ON_ONCE(!rcu_read_lock_held());
     399             : 
     400             :         return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) &
     401             :                                      ~MEMCG_DATA_FLAGS_MASK);
     402             : }
     403             : 
     404             : /*
     405             :  * page_memcg_check - get the memory cgroup associated with a page
     406             :  * @page: a pointer to the page struct
     407             :  *
     408             :  * Returns a pointer to the memory cgroup associated with the page,
     409             :  * or NULL. This function unlike page_memcg() can take any  page
     410             :  * as an argument. It has to be used in cases when it's not known if a page
     411             :  * has an associated memory cgroup pointer or an object cgroups vector.
     412             :  *
     413             :  * Any of the following ensures page and memcg binding stability:
     414             :  * - the page lock
     415             :  * - LRU isolation
     416             :  * - lock_page_memcg()
     417             :  * - exclusive reference
     418             :  */
     419             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
     420             : {
     421             :         /*
     422             :          * Because page->memcg_data might be changed asynchronously
     423             :          * for slab pages, READ_ONCE() should be used here.
     424             :          */
     425             :         unsigned long memcg_data = READ_ONCE(page->memcg_data);
     426             : 
     427             :         if (memcg_data & MEMCG_DATA_OBJCGS)
     428             :                 return NULL;
     429             : 
     430             :         return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     431             : }
     432             : 
     433             : /*
     434             :  * PageMemcgKmem - check if the page has MemcgKmem flag set
     435             :  * @page: a pointer to the page struct
     436             :  *
     437             :  * Checks if the page has MemcgKmem flag set. The caller must ensure that
     438             :  * the page has an associated memory cgroup. It's not safe to call this function
     439             :  * against some types of pages, e.g. slab pages.
     440             :  */
     441             : static inline bool PageMemcgKmem(struct page *page)
     442             : {
     443             :         VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
     444             :         return page->memcg_data & MEMCG_DATA_KMEM;
     445             : }
     446             : 
     447             : #ifdef CONFIG_MEMCG_KMEM
     448             : /*
     449             :  * page_objcgs - get the object cgroups vector associated with a page
     450             :  * @page: a pointer to the page struct
     451             :  *
     452             :  * Returns a pointer to the object cgroups vector associated with the page,
     453             :  * or NULL. This function assumes that the page is known to have an
     454             :  * associated object cgroups vector. It's not safe to call this function
     455             :  * against pages, which might have an associated memory cgroup: e.g.
     456             :  * kernel stack pages.
     457             :  */
     458             : static inline struct obj_cgroup **page_objcgs(struct page *page)
     459             : {
     460             :         unsigned long memcg_data = READ_ONCE(page->memcg_data);
     461             : 
     462             :         VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
     463             :         VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
     464             : 
     465             :         return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     466             : }
     467             : 
     468             : /*
     469             :  * page_objcgs_check - get the object cgroups vector associated with a page
     470             :  * @page: a pointer to the page struct
     471             :  *
     472             :  * Returns a pointer to the object cgroups vector associated with the page,
     473             :  * or NULL. This function is safe to use if the page can be directly associated
     474             :  * with a memory cgroup.
     475             :  */
     476             : static inline struct obj_cgroup **page_objcgs_check(struct page *page)
     477             : {
     478             :         unsigned long memcg_data = READ_ONCE(page->memcg_data);
     479             : 
     480             :         if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
     481             :                 return NULL;
     482             : 
     483             :         VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
     484             : 
     485             :         return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
     486             : }
     487             : 
     488             : #else
     489             : static inline struct obj_cgroup **page_objcgs(struct page *page)
     490             : {
     491             :         return NULL;
     492             : }
     493             : 
     494             : static inline struct obj_cgroup **page_objcgs_check(struct page *page)
     495             : {
     496             :         return NULL;
     497             : }
     498             : #endif
     499             : 
     500             : static __always_inline bool memcg_stat_item_in_bytes(int idx)
     501             : {
     502             :         if (idx == MEMCG_PERCPU_B)
     503             :                 return true;
     504             :         return vmstat_item_in_bytes(idx);
     505             : }
     506             : 
     507             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
     508             : {
     509             :         return (memcg == root_mem_cgroup);
     510             : }
     511             : 
     512             : static inline bool mem_cgroup_disabled(void)
     513             : {
     514             :         return !cgroup_subsys_enabled(memory_cgrp_subsys);
     515             : }
     516             : 
     517             : static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
     518             :                                                   struct mem_cgroup *memcg,
     519             :                                                   bool in_low_reclaim)
     520             : {
     521             :         if (mem_cgroup_disabled())
     522             :                 return 0;
     523             : 
     524             :         /*
     525             :          * There is no reclaim protection applied to a targeted reclaim.
     526             :          * We are special casing this specific case here because
     527             :          * mem_cgroup_protected calculation is not robust enough to keep
     528             :          * the protection invariant for calculated effective values for
     529             :          * parallel reclaimers with different reclaim target. This is
     530             :          * especially a problem for tail memcgs (as they have pages on LRU)
     531             :          * which would want to have effective values 0 for targeted reclaim
     532             :          * but a different value for external reclaim.
     533             :          *
     534             :          * Example
     535             :          * Let's have global and A's reclaim in parallel:
     536             :          *  |
     537             :          *  A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
     538             :          *  |\
     539             :          *  | C (low = 1G, usage = 2.5G)
     540             :          *  B (low = 1G, usage = 0.5G)
     541             :          *
     542             :          * For the global reclaim
     543             :          * A.elow = A.low
     544             :          * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
     545             :          * C.elow = min(C.usage, C.low)
     546             :          *
     547             :          * With the effective values resetting we have A reclaim
     548             :          * A.elow = 0
     549             :          * B.elow = B.low
     550             :          * C.elow = C.low
     551             :          *
     552             :          * If the global reclaim races with A's reclaim then
     553             :          * B.elow = C.elow = 0 because children_low_usage > A.elow)
     554             :          * is possible and reclaiming B would be violating the protection.
     555             :          *
     556             :          */
     557             :         if (root == memcg)
     558             :                 return 0;
     559             : 
     560             :         if (in_low_reclaim)
     561             :                 return READ_ONCE(memcg->memory.emin);
     562             : 
     563             :         return max(READ_ONCE(memcg->memory.emin),
     564             :                    READ_ONCE(memcg->memory.elow));
     565             : }
     566             : 
     567             : void mem_cgroup_calculate_protection(struct mem_cgroup *root,
     568             :                                      struct mem_cgroup *memcg);
     569             : 
     570             : static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
     571             : {
     572             :         /*
     573             :          * The root memcg doesn't account charges, and doesn't support
     574             :          * protection.
     575             :          */
     576             :         return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
     577             : 
     578             : }
     579             : 
     580             : static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
     581             : {
     582             :         if (!mem_cgroup_supports_protection(memcg))
     583             :                 return false;
     584             : 
     585             :         return READ_ONCE(memcg->memory.elow) >=
     586             :                 page_counter_read(&memcg->memory);
     587             : }
     588             : 
     589             : static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
     590             : {
     591             :         if (!mem_cgroup_supports_protection(memcg))
     592             :                 return false;
     593             : 
     594             :         return READ_ONCE(memcg->memory.emin) >=
     595             :                 page_counter_read(&memcg->memory);
     596             : }
     597             : 
     598             : int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
     599             : 
     600             : void mem_cgroup_uncharge(struct page *page);
     601             : void mem_cgroup_uncharge_list(struct list_head *page_list);
     602             : 
     603             : void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
     604             : 
     605             : static struct mem_cgroup_per_node *
     606             : mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
     607             : {
     608             :         return memcg->nodeinfo[nid];
     609             : }
     610             : 
     611             : /**
     612             :  * mem_cgroup_lruvec - get the lru list vector for a memcg & node
     613             :  * @memcg: memcg of the wanted lruvec
     614             :  * @pgdat: pglist_data
     615             :  *
     616             :  * Returns the lru list vector holding pages for a given @memcg &
     617             :  * @pgdat combination. This can be the node lruvec, if the memory
     618             :  * controller is disabled.
     619             :  */
     620             : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
     621             :                                                struct pglist_data *pgdat)
     622             : {
     623             :         struct mem_cgroup_per_node *mz;
     624             :         struct lruvec *lruvec;
     625             : 
     626             :         if (mem_cgroup_disabled()) {
     627             :                 lruvec = &pgdat->__lruvec;
     628             :                 goto out;
     629             :         }
     630             : 
     631             :         if (!memcg)
     632             :                 memcg = root_mem_cgroup;
     633             : 
     634             :         mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
     635             :         lruvec = &mz->lruvec;
     636             : out:
     637             :         /*
     638             :          * Since a node can be onlined after the mem_cgroup was created,
     639             :          * we have to be prepared to initialize lruvec->pgdat here;
     640             :          * and if offlined then reonlined, we need to reinitialize it.
     641             :          */
     642             :         if (unlikely(lruvec->pgdat != pgdat))
     643             :                 lruvec->pgdat = pgdat;
     644             :         return lruvec;
     645             : }
     646             : 
     647             : /**
     648             :  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
     649             :  * @page: the page
     650             :  * @pgdat: pgdat of the page
     651             :  *
     652             :  * This function relies on page->mem_cgroup being stable.
     653             :  */
     654             : static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
     655             :                                                 struct pglist_data *pgdat)
     656             : {
     657             :         struct mem_cgroup *memcg = page_memcg(page);
     658             : 
     659             :         VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
     660             :         return mem_cgroup_lruvec(memcg, pgdat);
     661             : }
     662             : 
     663             : static inline bool lruvec_holds_page_lru_lock(struct page *page,
     664             :                                               struct lruvec *lruvec)
     665             : {
     666             :         pg_data_t *pgdat = page_pgdat(page);
     667             :         const struct mem_cgroup *memcg;
     668             :         struct mem_cgroup_per_node *mz;
     669             : 
     670             :         if (mem_cgroup_disabled())
     671             :                 return lruvec == &pgdat->__lruvec;
     672             : 
     673             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     674             :         memcg = page_memcg(page) ? : root_mem_cgroup;
     675             : 
     676             :         return lruvec->pgdat == pgdat && mz->memcg == memcg;
     677             : }
     678             : 
     679             : struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
     680             : 
     681             : struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
     682             : 
     683             : struct lruvec *lock_page_lruvec(struct page *page);
     684             : struct lruvec *lock_page_lruvec_irq(struct page *page);
     685             : struct lruvec *lock_page_lruvec_irqsave(struct page *page,
     686             :                                                 unsigned long *flags);
     687             : 
     688             : #ifdef CONFIG_DEBUG_VM
     689             : void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
     690             : #else
     691             : static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
     692             : {
     693             : }
     694             : #endif
     695             : 
     696             : static inline
     697             : struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
     698             :         return css ? container_of(css, struct mem_cgroup, css) : NULL;
     699             : }
     700             : 
     701             : static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
     702             : {
     703             :         return percpu_ref_tryget(&objcg->refcnt);
     704             : }
     705             : 
     706             : static inline void obj_cgroup_get(struct obj_cgroup *objcg)
     707             : {
     708             :         percpu_ref_get(&objcg->refcnt);
     709             : }
     710             : 
     711             : static inline void obj_cgroup_put(struct obj_cgroup *objcg)
     712             : {
     713             :         percpu_ref_put(&objcg->refcnt);
     714             : }
     715             : 
     716             : /*
     717             :  * After the initialization objcg->memcg is always pointing at
     718             :  * a valid memcg, but can be atomically swapped to the parent memcg.
     719             :  *
     720             :  * The caller must ensure that the returned memcg won't be released:
     721             :  * e.g. acquire the rcu_read_lock or css_set_lock.
     722             :  */
     723             : static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
     724             : {
     725             :         return READ_ONCE(objcg->memcg);
     726             : }
     727             : 
     728             : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
     729             : {
     730             :         if (memcg)
     731             :                 css_put(&memcg->css);
     732             : }
     733             : 
     734             : #define mem_cgroup_from_counter(counter, member)        \
     735             :         container_of(counter, struct mem_cgroup, member)
     736             : 
     737             : struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
     738             :                                    struct mem_cgroup *,
     739             :                                    struct mem_cgroup_reclaim_cookie *);
     740             : void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
     741             : int mem_cgroup_scan_tasks(struct mem_cgroup *,
     742             :                           int (*)(struct task_struct *, void *), void *);
     743             : 
     744             : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
     745             : {
     746             :         if (mem_cgroup_disabled())
     747             :                 return 0;
     748             : 
     749             :         return memcg->id.id;
     750             : }
     751             : struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
     752             : 
     753             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
     754             : {
     755             :         return mem_cgroup_from_css(seq_css(m));
     756             : }
     757             : 
     758             : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
     759             : {
     760             :         struct mem_cgroup_per_node *mz;
     761             : 
     762             :         if (mem_cgroup_disabled())
     763             :                 return NULL;
     764             : 
     765             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     766             :         return mz->memcg;
     767             : }
     768             : 
     769             : /**
     770             :  * parent_mem_cgroup - find the accounting parent of a memcg
     771             :  * @memcg: memcg whose parent to find
     772             :  *
     773             :  * Returns the parent memcg, or NULL if this is the root or the memory
     774             :  * controller is in legacy no-hierarchy mode.
     775             :  */
     776             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
     777             : {
     778             :         if (!memcg->memory.parent)
     779             :                 return NULL;
     780             :         return mem_cgroup_from_counter(memcg->memory.parent, memory);
     781             : }
     782             : 
     783             : static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
     784             :                               struct mem_cgroup *root)
     785             : {
     786             :         if (root == memcg)
     787             :                 return true;
     788             :         return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
     789             : }
     790             : 
     791             : static inline bool mm_match_cgroup(struct mm_struct *mm,
     792             :                                    struct mem_cgroup *memcg)
     793             : {
     794             :         struct mem_cgroup *task_memcg;
     795             :         bool match = false;
     796             : 
     797             :         rcu_read_lock();
     798             :         task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
     799             :         if (task_memcg)
     800             :                 match = mem_cgroup_is_descendant(task_memcg, memcg);
     801             :         rcu_read_unlock();
     802             :         return match;
     803             : }
     804             : 
     805             : struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
     806             : ino_t page_cgroup_ino(struct page *page);
     807             : 
     808             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
     809             : {
     810             :         if (mem_cgroup_disabled())
     811             :                 return true;
     812             :         return !!(memcg->css.flags & CSS_ONLINE);
     813             : }
     814             : 
     815             : /*
     816             :  * For memory reclaim.
     817             :  */
     818             : int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
     819             : 
     820             : void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
     821             :                 int zid, int nr_pages);
     822             : 
     823             : static inline
     824             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
     825             :                 enum lru_list lru, int zone_idx)
     826             : {
     827             :         struct mem_cgroup_per_node *mz;
     828             : 
     829             :         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     830             :         return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
     831             : }
     832             : 
     833             : void mem_cgroup_handle_over_high(void);
     834             : 
     835             : unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
     836             : 
     837             : unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
     838             : 
     839             : void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
     840             :                                 struct task_struct *p);
     841             : 
     842             : void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
     843             : 
     844             : static inline void mem_cgroup_enter_user_fault(void)
     845             : {
     846             :         WARN_ON(current->in_user_fault);
     847             :         current->in_user_fault = 1;
     848             : }
     849             : 
     850             : static inline void mem_cgroup_exit_user_fault(void)
     851             : {
     852             :         WARN_ON(!current->in_user_fault);
     853             :         current->in_user_fault = 0;
     854             : }
     855             : 
     856             : static inline bool task_in_memcg_oom(struct task_struct *p)
     857             : {
     858             :         return p->memcg_in_oom;
     859             : }
     860             : 
     861             : bool mem_cgroup_oom_synchronize(bool wait);
     862             : struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
     863             :                                             struct mem_cgroup *oom_domain);
     864             : void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
     865             : 
     866             : #ifdef CONFIG_MEMCG_SWAP
     867             : extern bool cgroup_memory_noswap;
     868             : #endif
     869             : 
     870             : struct mem_cgroup *lock_page_memcg(struct page *page);
     871             : void __unlock_page_memcg(struct mem_cgroup *memcg);
     872             : void unlock_page_memcg(struct page *page);
     873             : 
     874             : /*
     875             :  * idx can be of type enum memcg_stat_item or node_stat_item.
     876             :  * Keep in sync with memcg_exact_page_state().
     877             :  */
     878             : static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
     879             : {
     880             :         long x = atomic_long_read(&memcg->vmstats[idx]);
     881             : #ifdef CONFIG_SMP
     882             :         if (x < 0)
     883             :                 x = 0;
     884             : #endif
     885             :         return x;
     886             : }
     887             : 
     888             : /*
     889             :  * idx can be of type enum memcg_stat_item or node_stat_item.
     890             :  * Keep in sync with memcg_exact_page_state().
     891             :  */
     892             : static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
     893             :                                                    int idx)
     894             : {
     895             :         long x = 0;
     896             :         int cpu;
     897             : 
     898             :         for_each_possible_cpu(cpu)
     899             :                 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
     900             : #ifdef CONFIG_SMP
     901             :         if (x < 0)
     902             :                 x = 0;
     903             : #endif
     904             :         return x;
     905             : }
     906             : 
     907             : void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
     908             : 
     909             : /* idx can be of type enum memcg_stat_item or node_stat_item */
     910             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
     911             :                                    int idx, int val)
     912             : {
     913             :         unsigned long flags;
     914             : 
     915             :         local_irq_save(flags);
     916             :         __mod_memcg_state(memcg, idx, val);
     917             :         local_irq_restore(flags);
     918             : }
     919             : 
     920             : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
     921             :                                               enum node_stat_item idx)
     922             : {
     923             :         struct mem_cgroup_per_node *pn;
     924             :         long x;
     925             : 
     926             :         if (mem_cgroup_disabled())
     927             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
     928             : 
     929             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     930             :         x = atomic_long_read(&pn->lruvec_stat[idx]);
     931             : #ifdef CONFIG_SMP
     932             :         if (x < 0)
     933             :                 x = 0;
     934             : #endif
     935             :         return x;
     936             : }
     937             : 
     938             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
     939             :                                                     enum node_stat_item idx)
     940             : {
     941             :         struct mem_cgroup_per_node *pn;
     942             :         long x = 0;
     943             :         int cpu;
     944             : 
     945             :         if (mem_cgroup_disabled())
     946             :                 return node_page_state(lruvec_pgdat(lruvec), idx);
     947             : 
     948             :         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
     949             :         for_each_possible_cpu(cpu)
     950             :                 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
     951             : #ifdef CONFIG_SMP
     952             :         if (x < 0)
     953             :                 x = 0;
     954             : #endif
     955             :         return x;
     956             : }
     957             : 
     958             : void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
     959             :                               int val);
     960             : void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
     961             : 
     962             : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
     963             :                                          int val)
     964             : {
     965             :         unsigned long flags;
     966             : 
     967             :         local_irq_save(flags);
     968             :         __mod_lruvec_kmem_state(p, idx, val);
     969             :         local_irq_restore(flags);
     970             : }
     971             : 
     972             : static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
     973             :                                           enum node_stat_item idx, int val)
     974             : {
     975             :         unsigned long flags;
     976             : 
     977             :         local_irq_save(flags);
     978             :         __mod_memcg_lruvec_state(lruvec, idx, val);
     979             :         local_irq_restore(flags);
     980             : }
     981             : 
     982             : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
     983             :                                                 gfp_t gfp_mask,
     984             :                                                 unsigned long *total_scanned);
     985             : 
     986             : void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
     987             :                           unsigned long count);
     988             : 
     989             : static inline void count_memcg_events(struct mem_cgroup *memcg,
     990             :                                       enum vm_event_item idx,
     991             :                                       unsigned long count)
     992             : {
     993             :         unsigned long flags;
     994             : 
     995             :         local_irq_save(flags);
     996             :         __count_memcg_events(memcg, idx, count);
     997             :         local_irq_restore(flags);
     998             : }
     999             : 
    1000             : static inline void count_memcg_page_event(struct page *page,
    1001             :                                           enum vm_event_item idx)
    1002             : {
    1003             :         struct mem_cgroup *memcg = page_memcg(page);
    1004             : 
    1005             :         if (memcg)
    1006             :                 count_memcg_events(memcg, idx, 1);
    1007             : }
    1008             : 
    1009             : static inline void count_memcg_event_mm(struct mm_struct *mm,
    1010             :                                         enum vm_event_item idx)
    1011             : {
    1012             :         struct mem_cgroup *memcg;
    1013             : 
    1014             :         if (mem_cgroup_disabled())
    1015             :                 return;
    1016             : 
    1017             :         rcu_read_lock();
    1018             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1019             :         if (likely(memcg))
    1020             :                 count_memcg_events(memcg, idx, 1);
    1021             :         rcu_read_unlock();
    1022             : }
    1023             : 
    1024             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1025             :                                       enum memcg_memory_event event)
    1026             : {
    1027             :         bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
    1028             :                           event == MEMCG_SWAP_FAIL;
    1029             : 
    1030             :         atomic_long_inc(&memcg->memory_events_local[event]);
    1031             :         if (!swap_event)
    1032             :                 cgroup_file_notify(&memcg->events_local_file);
    1033             : 
    1034             :         do {
    1035             :                 atomic_long_inc(&memcg->memory_events[event]);
    1036             :                 if (swap_event)
    1037             :                         cgroup_file_notify(&memcg->swap_events_file);
    1038             :                 else
    1039             :                         cgroup_file_notify(&memcg->events_file);
    1040             : 
    1041             :                 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
    1042             :                         break;
    1043             :                 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
    1044             :                         break;
    1045             :         } while ((memcg = parent_mem_cgroup(memcg)) &&
    1046             :                  !mem_cgroup_is_root(memcg));
    1047             : }
    1048             : 
    1049             : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1050             :                                          enum memcg_memory_event event)
    1051             : {
    1052             :         struct mem_cgroup *memcg;
    1053             : 
    1054             :         if (mem_cgroup_disabled())
    1055             :                 return;
    1056             : 
    1057             :         rcu_read_lock();
    1058             :         memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
    1059             :         if (likely(memcg))
    1060             :                 memcg_memory_event(memcg, event);
    1061             :         rcu_read_unlock();
    1062             : }
    1063             : 
    1064             : void split_page_memcg(struct page *head, unsigned int nr);
    1065             : 
    1066             : #else /* CONFIG_MEMCG */
    1067             : 
    1068             : #define MEM_CGROUP_ID_SHIFT     0
    1069             : #define MEM_CGROUP_ID_MAX       0
    1070             : 
    1071             : struct mem_cgroup;
    1072             : 
    1073        5842 : static inline struct mem_cgroup *page_memcg(struct page *page)
    1074             : {
    1075        5842 :         return NULL;
    1076             : }
    1077             : 
    1078       24154 : static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
    1079             : {
    1080       24154 :         WARN_ON_ONCE(!rcu_read_lock_held());
    1081       24150 :         return NULL;
    1082             : }
    1083             : 
    1084             : static inline struct mem_cgroup *page_memcg_check(struct page *page)
    1085             : {
    1086             :         return NULL;
    1087             : }
    1088             : 
    1089             : static inline bool PageMemcgKmem(struct page *page)
    1090             : {
    1091             :         return false;
    1092             : }
    1093             : 
    1094             : static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
    1095             : {
    1096             :         return true;
    1097             : }
    1098             : 
    1099       24150 : static inline bool mem_cgroup_disabled(void)
    1100             : {
    1101       24150 :         return true;
    1102             : }
    1103             : 
    1104             : static inline void memcg_memory_event(struct mem_cgroup *memcg,
    1105             :                                       enum memcg_memory_event event)
    1106             : {
    1107             : }
    1108             : 
    1109           0 : static inline void memcg_memory_event_mm(struct mm_struct *mm,
    1110             :                                          enum memcg_memory_event event)
    1111             : {
    1112           0 : }
    1113             : 
    1114           0 : static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
    1115             :                                                   struct mem_cgroup *memcg,
    1116             :                                                   bool in_low_reclaim)
    1117             : {
    1118           0 :         return 0;
    1119             : }
    1120             : 
    1121           0 : static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
    1122             :                                                    struct mem_cgroup *memcg)
    1123             : {
    1124           0 : }
    1125             : 
    1126           0 : static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
    1127             : {
    1128           0 :         return false;
    1129             : }
    1130             : 
    1131           0 : static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
    1132             : {
    1133           0 :         return false;
    1134             : }
    1135             : 
    1136       70455 : static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
    1137             :                                     gfp_t gfp_mask)
    1138             : {
    1139       70455 :         return 0;
    1140             : }
    1141             : 
    1142       32142 : static inline void mem_cgroup_uncharge(struct page *page)
    1143             : {
    1144       32142 : }
    1145             : 
    1146       26287 : static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
    1147             : {
    1148       26287 : }
    1149             : 
    1150           0 : static inline void mem_cgroup_migrate(struct page *old, struct page *new)
    1151             : {
    1152           0 : }
    1153             : 
    1154           2 : static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
    1155             :                                                struct pglist_data *pgdat)
    1156             : {
    1157           2 :         return &pgdat->__lruvec;
    1158             : }
    1159             : 
    1160       25434 : static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
    1161             :                                                     struct pglist_data *pgdat)
    1162             : {
    1163       25434 :         return &pgdat->__lruvec;
    1164             : }
    1165             : 
    1166      151036 : static inline bool lruvec_holds_page_lru_lock(struct page *page,
    1167             :                                               struct lruvec *lruvec)
    1168             : {
    1169      151036 :         pg_data_t *pgdat = page_pgdat(page);
    1170             : 
    1171      151036 :         return lruvec == &pgdat->__lruvec;
    1172             : }
    1173             : 
    1174             : static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
    1175             : {
    1176             :         return NULL;
    1177             : }
    1178             : 
    1179           0 : static inline bool mm_match_cgroup(struct mm_struct *mm,
    1180             :                 struct mem_cgroup *memcg)
    1181             : {
    1182           0 :         return true;
    1183             : }
    1184             : 
    1185          20 : static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
    1186             : {
    1187          20 :         return NULL;
    1188             : }
    1189             : 
    1190           7 : static inline void mem_cgroup_put(struct mem_cgroup *memcg)
    1191             : {
    1192           7 : }
    1193             : 
    1194           0 : static inline struct lruvec *lock_page_lruvec(struct page *page)
    1195             : {
    1196           0 :         struct pglist_data *pgdat = page_pgdat(page);
    1197             : 
    1198           0 :         spin_lock(&pgdat->__lruvec.lru_lock);
    1199           0 :         return &pgdat->__lruvec;
    1200             : }
    1201             : 
    1202          13 : static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
    1203             : {
    1204          13 :         struct pglist_data *pgdat = page_pgdat(page);
    1205             : 
    1206          13 :         spin_lock_irq(&pgdat->__lruvec.lru_lock);
    1207          13 :         return &pgdat->__lruvec;
    1208             : }
    1209             : 
    1210       27669 : static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
    1211             :                 unsigned long *flagsp)
    1212             : {
    1213       27669 :         struct pglist_data *pgdat = page_pgdat(page);
    1214             : 
    1215       27669 :         spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
    1216       27669 :         return &pgdat->__lruvec;
    1217             : }
    1218             : 
    1219             : static inline struct mem_cgroup *
    1220           0 : mem_cgroup_iter(struct mem_cgroup *root,
    1221             :                 struct mem_cgroup *prev,
    1222             :                 struct mem_cgroup_reclaim_cookie *reclaim)
    1223             : {
    1224           0 :         return NULL;
    1225             : }
    1226             : 
    1227             : static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
    1228             :                                          struct mem_cgroup *prev)
    1229             : {
    1230             : }
    1231             : 
    1232             : static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
    1233             :                 int (*fn)(struct task_struct *, void *), void *arg)
    1234             : {
    1235             :         return 0;
    1236             : }
    1237             : 
    1238           0 : static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
    1239             : {
    1240           0 :         return 0;
    1241             : }
    1242             : 
    1243           0 : static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
    1244             : {
    1245           0 :         WARN_ON_ONCE(id);
    1246             :         /* XXX: This should always return root_mem_cgroup */
    1247           0 :         return NULL;
    1248             : }
    1249             : 
    1250             : static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
    1251             : {
    1252             :         return NULL;
    1253             : }
    1254             : 
    1255       41295 : static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
    1256             : {
    1257       17135 :         return NULL;
    1258             : }
    1259             : 
    1260             : static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
    1261             : {
    1262             :         return true;
    1263             : }
    1264             : 
    1265             : static inline
    1266             : unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
    1267             :                 enum lru_list lru, int zone_idx)
    1268             : {
    1269             :         return 0;
    1270             : }
    1271             : 
    1272           0 : static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
    1273             : {
    1274           0 :         return 0;
    1275             : }
    1276             : 
    1277             : static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
    1278             : {
    1279             :         return 0;
    1280             : }
    1281             : 
    1282             : static inline void
    1283           0 : mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
    1284             : {
    1285           0 : }
    1286             : 
    1287             : static inline void
    1288             : mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
    1289             : {
    1290             : }
    1291             : 
    1292     1728636 : static inline struct mem_cgroup *lock_page_memcg(struct page *page)
    1293             : {
    1294     1728636 :         return NULL;
    1295             : }
    1296             : 
    1297        1284 : static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
    1298             : {
    1299        1284 : }
    1300             : 
    1301     1726892 : static inline void unlock_page_memcg(struct page *page)
    1302             : {
    1303     1725608 : }
    1304             : 
    1305       51836 : static inline void mem_cgroup_handle_over_high(void)
    1306             : {
    1307       51836 : }
    1308             : 
    1309             : static inline void mem_cgroup_enter_user_fault(void)
    1310             : {
    1311             : }
    1312             : 
    1313      162524 : static inline void mem_cgroup_exit_user_fault(void)
    1314             : {
    1315      162524 : }
    1316             : 
    1317      162524 : static inline bool task_in_memcg_oom(struct task_struct *p)
    1318             : {
    1319      162524 :         return false;
    1320             : }
    1321             : 
    1322           0 : static inline bool mem_cgroup_oom_synchronize(bool wait)
    1323             : {
    1324           0 :         return false;
    1325             : }
    1326             : 
    1327           0 : static inline struct mem_cgroup *mem_cgroup_get_oom_group(
    1328             :         struct task_struct *victim, struct mem_cgroup *oom_domain)
    1329             : {
    1330           0 :         return NULL;
    1331             : }
    1332             : 
    1333             : static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
    1334             : {
    1335             : }
    1336             : 
    1337             : static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
    1338             : {
    1339             :         return 0;
    1340             : }
    1341             : 
    1342             : static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
    1343             :                                                    int idx)
    1344             : {
    1345             :         return 0;
    1346             : }
    1347             : 
    1348             : static inline void __mod_memcg_state(struct mem_cgroup *memcg,
    1349             :                                      int idx,
    1350             :                                      int nr)
    1351             : {
    1352             : }
    1353             : 
    1354             : static inline void mod_memcg_state(struct mem_cgroup *memcg,
    1355             :                                    int idx,
    1356             :                                    int nr)
    1357             : {
    1358             : }
    1359             : 
    1360           0 : static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
    1361             :                                               enum node_stat_item idx)
    1362             : {
    1363           0 :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1364             : }
    1365             : 
    1366             : static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
    1367             :                                                     enum node_stat_item idx)
    1368             : {
    1369             :         return node_page_state(lruvec_pgdat(lruvec), idx);
    1370             : }
    1371             : 
    1372             : static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
    1373             :                                             enum node_stat_item idx, int val)
    1374             : {
    1375             : }
    1376             : 
    1377           0 : static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1378             :                                            int val)
    1379             : {
    1380           0 :         struct page *page = virt_to_head_page(p);
    1381             : 
    1382           0 :         __mod_node_page_state(page_pgdat(page), idx, val);
    1383           0 : }
    1384             : 
    1385        1752 : static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
    1386             :                                          int val)
    1387             : {
    1388        1752 :         struct page *page = virt_to_head_page(p);
    1389             : 
    1390        1752 :         mod_node_page_state(page_pgdat(page), idx, val);
    1391        1752 : }
    1392             : 
    1393             : static inline
    1394           0 : unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
    1395             :                                             gfp_t gfp_mask,
    1396             :                                             unsigned long *total_scanned)
    1397             : {
    1398           0 :         return 0;
    1399             : }
    1400             : 
    1401          12 : static inline void split_page_memcg(struct page *head, unsigned int nr)
    1402             : {
    1403          12 : }
    1404             : 
    1405             : static inline void count_memcg_events(struct mem_cgroup *memcg,
    1406             :                                       enum vm_event_item idx,
    1407             :                                       unsigned long count)
    1408             : {
    1409             : }
    1410             : 
    1411       17135 : static inline void __count_memcg_events(struct mem_cgroup *memcg,
    1412             :                                         enum vm_event_item idx,
    1413             :                                         unsigned long count)
    1414             : {
    1415           0 : }
    1416             : 
    1417           2 : static inline void count_memcg_page_event(struct page *page,
    1418             :                                           int idx)
    1419             : {
    1420           2 : }
    1421             : 
    1422             : static inline
    1423      171951 : void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
    1424             : {
    1425      171951 : }
    1426             : 
    1427           0 : static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
    1428             : {
    1429           0 : }
    1430             : #endif /* CONFIG_MEMCG */
    1431             : 
    1432           0 : static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1433             : {
    1434           0 :         __mod_lruvec_kmem_state(p, idx, 1);
    1435           0 : }
    1436             : 
    1437           0 : static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
    1438             : {
    1439           0 :         __mod_lruvec_kmem_state(p, idx, -1);
    1440           0 : }
    1441             : 
    1442       24160 : static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
    1443             : {
    1444       24160 :         struct mem_cgroup *memcg;
    1445             : 
    1446       24160 :         memcg = lruvec_memcg(lruvec);
    1447       24160 :         if (!memcg)
    1448       24160 :                 return NULL;
    1449             :         memcg = parent_mem_cgroup(memcg);
    1450             :         if (!memcg)
    1451             :                 return NULL;
    1452             :         return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
    1453             : }
    1454             : 
    1455           0 : static inline void unlock_page_lruvec(struct lruvec *lruvec)
    1456             : {
    1457           0 :         spin_unlock(&lruvec->lru_lock);
    1458             : }
    1459             : 
    1460          13 : static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
    1461             : {
    1462          13 :         spin_unlock_irq(&lruvec->lru_lock);
    1463           2 : }
    1464             : 
    1465       27669 : static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
    1466             :                 unsigned long flags)
    1467             : {
    1468       27669 :         spin_unlock_irqrestore(&lruvec->lru_lock, flags);
    1469       21887 : }
    1470             : 
    1471             : /* Don't lock again iff page's lruvec locked */
    1472          16 : static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
    1473             :                 struct lruvec *locked_lruvec)
    1474             : {
    1475          16 :         if (locked_lruvec) {
    1476          14 :                 if (lruvec_holds_page_lru_lock(page, locked_lruvec))
    1477             :                         return locked_lruvec;
    1478             : 
    1479           0 :                 unlock_page_lruvec_irq(locked_lruvec);
    1480             :         }
    1481             : 
    1482           2 :         return lock_page_lruvec_irq(page);
    1483             : }
    1484             : 
    1485             : /* Don't lock again iff page's lruvec locked */
    1486      178601 : static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
    1487             :                 struct lruvec *locked_lruvec, unsigned long *flags)
    1488             : {
    1489      178601 :         if (locked_lruvec) {
    1490      151022 :                 if (lruvec_holds_page_lru_lock(page, locked_lruvec))
    1491             :                         return locked_lruvec;
    1492             : 
    1493           0 :                 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
    1494             :         }
    1495             : 
    1496       27579 :         return lock_page_lruvec_irqsave(page, flags);
    1497             : }
    1498             : 
    1499             : #ifdef CONFIG_CGROUP_WRITEBACK
    1500             : 
    1501             : struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
    1502             : void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
    1503             :                          unsigned long *pheadroom, unsigned long *pdirty,
    1504             :                          unsigned long *pwriteback);
    1505             : 
    1506             : void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
    1507             :                                              struct bdi_writeback *wb);
    1508             : 
    1509             : static inline void mem_cgroup_track_foreign_dirty(struct page *page,
    1510             :                                                   struct bdi_writeback *wb)
    1511             : {
    1512             :         if (mem_cgroup_disabled())
    1513             :                 return;
    1514             : 
    1515             :         if (unlikely(&page_memcg(page)->css != wb->memcg_css))
    1516             :                 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
    1517             : }
    1518             : 
    1519             : void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
    1520             : 
    1521             : #else   /* CONFIG_CGROUP_WRITEBACK */
    1522             : 
    1523        1284 : static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
    1524             : {
    1525        1284 :         return NULL;
    1526             : }
    1527             : 
    1528             : static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
    1529             :                                        unsigned long *pfilepages,
    1530             :                                        unsigned long *pheadroom,
    1531             :                                        unsigned long *pdirty,
    1532             :                                        unsigned long *pwriteback)
    1533             : {
    1534             : }
    1535             : 
    1536        2493 : static inline void mem_cgroup_track_foreign_dirty(struct page *page,
    1537             :                                                   struct bdi_writeback *wb)
    1538             : {
    1539             : }
    1540             : 
    1541           0 : static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
    1542             : {
    1543           0 : }
    1544             : 
    1545             : #endif  /* CONFIG_CGROUP_WRITEBACK */
    1546             : 
    1547             : struct sock;
    1548             : bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
    1549             : void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
    1550             : #ifdef CONFIG_MEMCG
    1551             : extern struct static_key_false memcg_sockets_enabled_key;
    1552             : #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
    1553             : void mem_cgroup_sk_alloc(struct sock *sk);
    1554             : void mem_cgroup_sk_free(struct sock *sk);
    1555             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1556             : {
    1557             :         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
    1558             :                 return true;
    1559             :         do {
    1560             :                 if (time_before(jiffies, memcg->socket_pressure))
    1561             :                         return true;
    1562             :         } while ((memcg = parent_mem_cgroup(memcg)));
    1563             :         return false;
    1564             : }
    1565             : 
    1566             : extern int memcg_expand_shrinker_maps(int new_id);
    1567             : 
    1568             : extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
    1569             :                                    int nid, int shrinker_id);
    1570             : #else
    1571             : #define mem_cgroup_sockets_enabled 0
    1572         833 : static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
    1573         738 : static inline void mem_cgroup_sk_free(struct sock *sk) { };
    1574             : static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
    1575             : {
    1576             :         return false;
    1577             : }
    1578             : 
    1579             : static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
    1580             :                                           int nid, int shrinker_id)
    1581             : {
    1582             : }
    1583             : #endif
    1584             : 
    1585             : #ifdef CONFIG_MEMCG_KMEM
    1586             : int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
    1587             : void __memcg_kmem_uncharge_page(struct page *page, int order);
    1588             : 
    1589             : struct obj_cgroup *get_obj_cgroup_from_current(void);
    1590             : 
    1591             : int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
    1592             : void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
    1593             : 
    1594             : extern struct static_key_false memcg_kmem_enabled_key;
    1595             : 
    1596             : extern int memcg_nr_cache_ids;
    1597             : void memcg_get_cache_ids(void);
    1598             : void memcg_put_cache_ids(void);
    1599             : 
    1600             : /*
    1601             :  * Helper macro to loop through all memcg-specific caches. Callers must still
    1602             :  * check if the cache is valid (it is either valid or NULL).
    1603             :  * the slab_mutex must be held when looping through those caches
    1604             :  */
    1605             : #define for_each_memcg_cache_index(_idx)        \
    1606             :         for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
    1607             : 
    1608             : static inline bool memcg_kmem_enabled(void)
    1609             : {
    1610             :         return static_branch_likely(&memcg_kmem_enabled_key);
    1611             : }
    1612             : 
    1613             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1614             :                                          int order)
    1615             : {
    1616             :         if (memcg_kmem_enabled())
    1617             :                 return __memcg_kmem_charge_page(page, gfp, order);
    1618             :         return 0;
    1619             : }
    1620             : 
    1621             : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1622             : {
    1623             :         if (memcg_kmem_enabled())
    1624             :                 __memcg_kmem_uncharge_page(page, order);
    1625             : }
    1626             : 
    1627             : /*
    1628             :  * A helper for accessing memcg's kmem_id, used for getting
    1629             :  * corresponding LRU lists.
    1630             :  */
    1631             : static inline int memcg_cache_id(struct mem_cgroup *memcg)
    1632             : {
    1633             :         return memcg ? memcg->kmemcg_id : -1;
    1634             : }
    1635             : 
    1636             : struct mem_cgroup *mem_cgroup_from_obj(void *p);
    1637             : 
    1638             : #else
    1639             : 
    1640             : static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1641             :                                          int order)
    1642             : {
    1643             :         return 0;
    1644             : }
    1645             : 
    1646           0 : static inline void memcg_kmem_uncharge_page(struct page *page, int order)
    1647             : {
    1648           0 : }
    1649             : 
    1650             : static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
    1651             :                                            int order)
    1652             : {
    1653             :         return 0;
    1654             : }
    1655             : 
    1656             : static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
    1657             : {
    1658             : }
    1659             : 
    1660             : #define for_each_memcg_cache_index(_idx)        \
    1661             :         for (; NULL; )
    1662             : 
    1663      192956 : static inline bool memcg_kmem_enabled(void)
    1664             : {
    1665      192956 :         return false;
    1666             : }
    1667             : 
    1668           3 : static inline int memcg_cache_id(struct mem_cgroup *memcg)
    1669             : {
    1670           3 :         return -1;
    1671             : }
    1672             : 
    1673         445 : static inline void memcg_get_cache_ids(void)
    1674             : {
    1675         445 : }
    1676             : 
    1677         198 : static inline void memcg_put_cache_ids(void)
    1678             : {
    1679         247 : }
    1680             : 
    1681             : static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
    1682             : {
    1683             :        return NULL;
    1684             : }
    1685             : 
    1686             : #endif /* CONFIG_MEMCG_KMEM */
    1687             : 
    1688             : #endif /* _LINUX_MEMCONTROL_H */

Generated by: LCOV version 1.14