LCOV - code coverage report
Current view: top level - mm - vmstat.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 316 770 41.0 %
Date: 2021-04-22 12:43:58 Functions: 32 79 40.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  *  linux/mm/vmstat.c
       4             :  *
       5             :  *  Manages VM statistics
       6             :  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
       7             :  *
       8             :  *  zoned VM statistics
       9             :  *  Copyright (C) 2006 Silicon Graphics, Inc.,
      10             :  *              Christoph Lameter <christoph@lameter.com>
      11             :  *  Copyright (C) 2008-2014 Christoph Lameter
      12             :  */
      13             : #include <linux/fs.h>
      14             : #include <linux/mm.h>
      15             : #include <linux/err.h>
      16             : #include <linux/module.h>
      17             : #include <linux/slab.h>
      18             : #include <linux/cpu.h>
      19             : #include <linux/cpumask.h>
      20             : #include <linux/vmstat.h>
      21             : #include <linux/proc_fs.h>
      22             : #include <linux/seq_file.h>
      23             : #include <linux/debugfs.h>
      24             : #include <linux/sched.h>
      25             : #include <linux/math64.h>
      26             : #include <linux/writeback.h>
      27             : #include <linux/compaction.h>
      28             : #include <linux/mm_inline.h>
      29             : #include <linux/page_ext.h>
      30             : #include <linux/page_owner.h>
      31             : 
      32             : #include "internal.h"
      33             : 
      34             : #define NUMA_STATS_THRESHOLD (U16_MAX - 2)
      35             : 
      36             : #ifdef CONFIG_NUMA
      37             : int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
      38             : 
      39             : /* zero numa counters within a zone */
      40           0 : static void zero_zone_numa_counters(struct zone *zone)
      41             : {
      42           0 :         int item, cpu;
      43             : 
      44           0 :         for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
      45           0 :                 atomic_long_set(&zone->vm_numa_stat[item], 0);
      46           0 :                 for_each_online_cpu(cpu)
      47           0 :                         per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
      48           0 :                                                 = 0;
      49             :         }
      50           0 : }
      51             : 
      52             : /* zero numa counters of all the populated zones */
      53           0 : static void zero_zones_numa_counters(void)
      54             : {
      55           0 :         struct zone *zone;
      56             : 
      57           0 :         for_each_populated_zone(zone)
      58           0 :                 zero_zone_numa_counters(zone);
      59           0 : }
      60             : 
      61             : /* zero global numa counters */
      62           0 : static void zero_global_numa_counters(void)
      63             : {
      64           0 :         int item;
      65             : 
      66           0 :         for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
      67           0 :                 atomic_long_set(&vm_numa_stat[item], 0);
      68           0 : }
      69             : 
      70           0 : static void invalid_numa_statistics(void)
      71             : {
      72           0 :         zero_zones_numa_counters();
      73           0 :         zero_global_numa_counters();
      74             : }
      75             : 
      76             : static DEFINE_MUTEX(vm_numa_stat_lock);
      77             : 
      78           0 : int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
      79             :                 void *buffer, size_t *length, loff_t *ppos)
      80             : {
      81           0 :         int ret, oldval;
      82             : 
      83           0 :         mutex_lock(&vm_numa_stat_lock);
      84           0 :         if (write)
      85           0 :                 oldval = sysctl_vm_numa_stat;
      86           0 :         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
      87           0 :         if (ret || !write)
      88           0 :                 goto out;
      89             : 
      90           0 :         if (oldval == sysctl_vm_numa_stat)
      91           0 :                 goto out;
      92           0 :         else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
      93           0 :                 static_branch_enable(&vm_numa_stat_key);
      94           0 :                 pr_info("enable numa statistics\n");
      95             :         } else {
      96           0 :                 static_branch_disable(&vm_numa_stat_key);
      97           0 :                 invalid_numa_statistics();
      98           0 :                 pr_info("disable numa statistics, and clear numa counters\n");
      99             :         }
     100             : 
     101           0 : out:
     102           0 :         mutex_unlock(&vm_numa_stat_lock);
     103           0 :         return ret;
     104             : }
     105             : #endif
     106             : 
     107             : #ifdef CONFIG_VM_EVENT_COUNTERS
     108             : DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
     109             : EXPORT_PER_CPU_SYMBOL(vm_event_states);
     110             : 
     111           0 : static void sum_vm_events(unsigned long *ret)
     112             : {
     113           0 :         int cpu;
     114           0 :         int i;
     115             : 
     116           0 :         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
     117             : 
     118           0 :         for_each_online_cpu(cpu) {
     119           0 :                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
     120             : 
     121           0 :                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
     122           0 :                         ret[i] += this->event[i];
     123             :         }
     124           0 : }
     125             : 
     126             : /*
     127             :  * Accumulate the vm event counters across all CPUs.
     128             :  * The result is unavoidably approximate - it can change
     129             :  * during and after execution of this function.
     130             : */
     131           0 : void all_vm_events(unsigned long *ret)
     132             : {
     133           0 :         get_online_cpus();
     134           0 :         sum_vm_events(ret);
     135           0 :         put_online_cpus();
     136           0 : }
     137             : EXPORT_SYMBOL_GPL(all_vm_events);
     138             : 
     139             : /*
     140             :  * Fold the foreign cpu events into our own.
     141             :  *
     142             :  * This is adding to the events on one processor
     143             :  * but keeps the global counts constant.
     144             :  */
     145           0 : void vm_events_fold_cpu(int cpu)
     146             : {
     147           0 :         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
     148           0 :         int i;
     149             : 
     150           0 :         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
     151           0 :                 count_vm_events(i, fold_state->event[i]);
     152           0 :                 fold_state->event[i] = 0;
     153             :         }
     154           0 : }
     155             : 
     156             : #endif /* CONFIG_VM_EVENT_COUNTERS */
     157             : 
     158             : /*
     159             :  * Manage combined zone based / global counters
     160             :  *
     161             :  * vm_stat contains the global counters
     162             :  */
     163             : atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
     164             : atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
     165             : atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
     166             : EXPORT_SYMBOL(vm_zone_stat);
     167             : EXPORT_SYMBOL(vm_numa_stat);
     168             : EXPORT_SYMBOL(vm_node_stat);
     169             : 
     170             : #ifdef CONFIG_SMP
     171             : 
     172           0 : int calculate_pressure_threshold(struct zone *zone)
     173             : {
     174           0 :         int threshold;
     175           0 :         int watermark_distance;
     176             : 
     177             :         /*
     178             :          * As vmstats are not up to date, there is drift between the estimated
     179             :          * and real values. For high thresholds and a high number of CPUs, it
     180             :          * is possible for the min watermark to be breached while the estimated
     181             :          * value looks fine. The pressure threshold is a reduced value such
     182             :          * that even the maximum amount of drift will not accidentally breach
     183             :          * the min watermark
     184             :          */
     185           0 :         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
     186           0 :         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
     187             : 
     188             :         /*
     189             :          * Maximum threshold is 125
     190             :          */
     191           0 :         threshold = min(125, threshold);
     192             : 
     193           0 :         return threshold;
     194             : }
     195             : 
     196           5 : int calculate_normal_threshold(struct zone *zone)
     197             : {
     198           5 :         int threshold;
     199           5 :         int mem;        /* memory in 128 MB units */
     200             : 
     201             :         /*
     202             :          * The threshold scales with the number of processors and the amount
     203             :          * of memory per zone. More memory means that we can defer updates for
     204             :          * longer, more processors could lead to more contention.
     205             :          * fls() is used to have a cheap way of logarithmic scaling.
     206             :          *
     207             :          * Some sample thresholds:
     208             :          *
     209             :          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
     210             :          * ------------------------------------------------------------------
     211             :          * 8            1               1       0.9-1 GB        4
     212             :          * 16           2               2       0.9-1 GB        4
     213             :          * 20           2               2       1-2 GB          5
     214             :          * 24           2               2       2-4 GB          6
     215             :          * 28           2               2       4-8 GB          7
     216             :          * 32           2               2       8-16 GB         8
     217             :          * 4            2               2       <128M                1
     218             :          * 30           4               3       2-4 GB          5
     219             :          * 48           4               3       8-16 GB         8
     220             :          * 32           8               4       1-2 GB          4
     221             :          * 32           8               4       0.9-1GB         4
     222             :          * 10           16              5       <128M                1
     223             :          * 40           16              5       900M            4
     224             :          * 70           64              7       2-4 GB          5
     225             :          * 84           64              7       4-8 GB          6
     226             :          * 108          512             9       4-8 GB          6
     227             :          * 125          1024            10      8-16 GB         8
     228             :          * 125          1024            10      16-32 GB        9
     229             :          */
     230             : 
     231           5 :         mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
     232             : 
     233           5 :         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
     234             : 
     235             :         /*
     236             :          * Maximum threshold is 125
     237             :          */
     238           5 :         threshold = min(125, threshold);
     239             : 
     240           5 :         return threshold;
     241             : }
     242             : 
     243             : /*
     244             :  * Refresh the thresholds for each zone.
     245             :  */
     246           4 : void refresh_zone_stat_thresholds(void)
     247             : {
     248           4 :         struct pglist_data *pgdat;
     249           4 :         struct zone *zone;
     250           4 :         int cpu;
     251           4 :         int threshold;
     252             : 
     253             :         /* Zero current pgdat thresholds */
     254           8 :         for_each_online_pgdat(pgdat) {
     255          17 :                 for_each_online_cpu(cpu) {
     256          13 :                         per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
     257             :                 }
     258             :         }
     259             : 
     260          16 :         for_each_populated_zone(zone) {
     261           4 :                 struct pglist_data *pgdat = zone->zone_pgdat;
     262           4 :                 unsigned long max_drift, tolerate_drift;
     263             : 
     264           4 :                 threshold = calculate_normal_threshold(zone);
     265             : 
     266          21 :                 for_each_online_cpu(cpu) {
     267          13 :                         int pgdat_threshold;
     268             : 
     269          13 :                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
     270          13 :                                                         = threshold;
     271             : 
     272             :                         /* Base nodestat threshold on the largest populated zone. */
     273          13 :                         pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
     274          13 :                         per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
     275          13 :                                 = max(threshold, pgdat_threshold);
     276             :                 }
     277             : 
     278             :                 /*
     279             :                  * Only set percpu_drift_mark if there is a danger that
     280             :                  * NR_FREE_PAGES reports the low watermark is ok when in fact
     281             :                  * the min watermark could be breached by an allocation
     282             :                  */
     283           4 :                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
     284           4 :                 max_drift = num_online_cpus() * threshold;
     285           4 :                 if (max_drift > tolerate_drift)
     286           3 :                         zone->percpu_drift_mark = high_wmark_pages(zone) +
     287             :                                         max_drift;
     288             :         }
     289           4 : }
     290             : 
     291           1 : void set_pgdat_percpu_threshold(pg_data_t *pgdat,
     292             :                                 int (*calculate_pressure)(struct zone *))
     293             : {
     294           1 :         struct zone *zone;
     295           1 :         int cpu;
     296           1 :         int threshold;
     297           1 :         int i;
     298             : 
     299           2 :         for (i = 0; i < pgdat->nr_zones; i++) {
     300           1 :                 zone = &pgdat->node_zones[i];
     301           1 :                 if (!zone->percpu_drift_mark)
     302           0 :                         continue;
     303             : 
     304           1 :                 threshold = (*calculate_pressure)(zone);
     305           6 :                 for_each_online_cpu(cpu)
     306           4 :                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
     307           4 :                                                         = threshold;
     308             :         }
     309           1 : }
     310             : 
     311             : /*
     312             :  * For use when we know that interrupts are disabled,
     313             :  * or when we know that preemption is disabled and that
     314             :  * particular counter cannot be updated from interrupt context.
     315             :  */
     316      257179 : void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
     317             :                            long delta)
     318             : {
     319      257179 :         struct per_cpu_pageset __percpu *pcp = zone->pageset;
     320      257179 :         s8 __percpu *p = pcp->vm_stat_diff + item;
     321      257179 :         long x;
     322      257179 :         long t;
     323             : 
     324      257179 :         x = delta + __this_cpu_read(*p);
     325             : 
     326      257179 :         t = __this_cpu_read(pcp->stat_threshold);
     327             : 
     328      257179 :         if (unlikely(abs(x) > t)) {
     329       12587 :                 zone_page_state_add(x, zone, item);
     330       12587 :                 x = 0;
     331             :         }
     332      257179 :         __this_cpu_write(*p, x);
     333      257179 : }
     334             : EXPORT_SYMBOL(__mod_zone_page_state);
     335             : 
     336      561713 : void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
     337             :                                 long delta)
     338             : {
     339      561713 :         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
     340      561713 :         s8 __percpu *p = pcp->vm_node_stat_diff + item;
     341      561713 :         long x;
     342      561713 :         long t;
     343             : 
     344      561713 :         if (vmstat_item_in_bytes(item)) {
     345             :                 /*
     346             :                  * Only cgroups use subpage accounting right now; at
     347             :                  * the global level, these items still change in
     348             :                  * multiples of whole pages. Store them as pages
     349             :                  * internally to keep the per-cpu counters compact.
     350             :                  */
     351           0 :                 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
     352           0 :                 delta >>= PAGE_SHIFT;
     353             :         }
     354             : 
     355      561713 :         x = delta + __this_cpu_read(*p);
     356             : 
     357      561713 :         t = __this_cpu_read(pcp->stat_threshold);
     358             : 
     359      561713 :         if (unlikely(abs(x) > t)) {
     360       15270 :                 node_page_state_add(x, pgdat, item);
     361       15270 :                 x = 0;
     362             :         }
     363      561713 :         __this_cpu_write(*p, x);
     364      561713 : }
     365             : EXPORT_SYMBOL(__mod_node_page_state);
     366             : 
     367             : /*
     368             :  * Optimized increment and decrement functions.
     369             :  *
     370             :  * These are only for a single page and therefore can take a struct page *
     371             :  * argument instead of struct zone *. This allows the inclusion of the code
     372             :  * generated for page_zone(page) into the optimized functions.
     373             :  *
     374             :  * No overflow check is necessary and therefore the differential can be
     375             :  * incremented or decremented in place which may allow the compilers to
     376             :  * generate better code.
     377             :  * The increment or decrement is known and therefore one boundary check can
     378             :  * be omitted.
     379             :  *
     380             :  * NOTE: These functions are very performance sensitive. Change only
     381             :  * with care.
     382             :  *
     383             :  * Some processors have inc/dec instructions that are atomic vs an interrupt.
     384             :  * However, the code must first determine the differential location in a zone
     385             :  * based on the processor number and then inc/dec the counter. There is no
     386             :  * guarantee without disabling preemption that the processor will not change
     387             :  * in between and therefore the atomicity vs. interrupt cannot be exploited
     388             :  * in a useful way here.
     389             :  */
     390        2515 : void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
     391             : {
     392        2515 :         struct per_cpu_pageset __percpu *pcp = zone->pageset;
     393        2515 :         s8 __percpu *p = pcp->vm_stat_diff + item;
     394        2515 :         s8 v, t;
     395             : 
     396        2515 :         v = __this_cpu_inc_return(*p);
     397        2515 :         t = __this_cpu_read(pcp->stat_threshold);
     398        2515 :         if (unlikely(v > t)) {
     399          43 :                 s8 overstep = t >> 1;
     400             : 
     401          43 :                 zone_page_state_add(v + overstep, zone, item);
     402        2515 :                 __this_cpu_write(*p, -overstep);
     403             :         }
     404        2515 : }
     405             : 
     406        2515 : void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
     407             : {
     408        2515 :         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
     409        2515 :         s8 __percpu *p = pcp->vm_node_stat_diff + item;
     410        2515 :         s8 v, t;
     411             : 
     412        2515 :         VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
     413             : 
     414        2515 :         v = __this_cpu_inc_return(*p);
     415        2515 :         t = __this_cpu_read(pcp->stat_threshold);
     416        2515 :         if (unlikely(v > t)) {
     417          54 :                 s8 overstep = t >> 1;
     418             : 
     419          54 :                 node_page_state_add(v + overstep, pgdat, item);
     420        2515 :                 __this_cpu_write(*p, -overstep);
     421             :         }
     422        2515 : }
     423             : 
     424        2515 : void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
     425             : {
     426        2515 :         __inc_zone_state(page_zone(page), item);
     427        2515 : }
     428             : EXPORT_SYMBOL(__inc_zone_page_state);
     429             : 
     430        2515 : void __inc_node_page_state(struct page *page, enum node_stat_item item)
     431             : {
     432        2515 :         __inc_node_state(page_pgdat(page), item);
     433        2515 : }
     434             : EXPORT_SYMBOL(__inc_node_page_state);
     435             : 
     436           0 : void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
     437             : {
     438           0 :         struct per_cpu_pageset __percpu *pcp = zone->pageset;
     439           0 :         s8 __percpu *p = pcp->vm_stat_diff + item;
     440           0 :         s8 v, t;
     441             : 
     442           0 :         v = __this_cpu_dec_return(*p);
     443           0 :         t = __this_cpu_read(pcp->stat_threshold);
     444           0 :         if (unlikely(v < - t)) {
     445           0 :                 s8 overstep = t >> 1;
     446             : 
     447           0 :                 zone_page_state_add(v - overstep, zone, item);
     448           0 :                 __this_cpu_write(*p, overstep);
     449             :         }
     450           0 : }
     451             : 
     452           0 : void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
     453             : {
     454           0 :         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
     455           0 :         s8 __percpu *p = pcp->vm_node_stat_diff + item;
     456           0 :         s8 v, t;
     457             : 
     458           0 :         VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
     459             : 
     460           0 :         v = __this_cpu_dec_return(*p);
     461           0 :         t = __this_cpu_read(pcp->stat_threshold);
     462           0 :         if (unlikely(v < - t)) {
     463           0 :                 s8 overstep = t >> 1;
     464             : 
     465           0 :                 node_page_state_add(v - overstep, pgdat, item);
     466           0 :                 __this_cpu_write(*p, overstep);
     467             :         }
     468           0 : }
     469             : 
     470           0 : void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
     471             : {
     472           0 :         __dec_zone_state(page_zone(page), item);
     473           0 : }
     474             : EXPORT_SYMBOL(__dec_zone_page_state);
     475             : 
     476           0 : void __dec_node_page_state(struct page *page, enum node_stat_item item)
     477             : {
     478           0 :         __dec_node_state(page_pgdat(page), item);
     479           0 : }
     480             : EXPORT_SYMBOL(__dec_node_page_state);
     481             : 
     482             : #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
     483             : /*
     484             :  * If we have cmpxchg_local support then we do not need to incur the overhead
     485             :  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
     486             :  *
     487             :  * mod_state() modifies the zone counter state through atomic per cpu
     488             :  * operations.
     489             :  *
     490             :  * Overstep mode specifies how overstep should handled:
     491             :  *     0       No overstepping
     492             :  *     1       Overstepping half of threshold
     493             :  *     -1      Overstepping minus half of threshold
     494             : */
     495        4351 : static inline void mod_zone_state(struct zone *zone,
     496             :        enum zone_stat_item item, long delta, int overstep_mode)
     497             : {
     498        4351 :         struct per_cpu_pageset __percpu *pcp = zone->pageset;
     499        4351 :         s8 __percpu *p = pcp->vm_stat_diff + item;
     500        4351 :         long o, n, t, z;
     501             : 
     502        4351 :         do {
     503        4351 :                 z = 0;  /* overflow to zone counters */
     504             : 
     505             :                 /*
     506             :                  * The fetching of the stat_threshold is racy. We may apply
     507             :                  * a counter threshold to the wrong the cpu if we get
     508             :                  * rescheduled while executing here. However, the next
     509             :                  * counter update will apply the threshold again and
     510             :                  * therefore bring the counter under the threshold again.
     511             :                  *
     512             :                  * Most of the time the thresholds are the same anyways
     513             :                  * for all cpus in a zone.
     514             :                  */
     515        4351 :                 t = this_cpu_read(pcp->stat_threshold);
     516             : 
     517        4351 :                 o = this_cpu_read(*p);
     518        4351 :                 n = delta + o;
     519             : 
     520        4351 :                 if (abs(n) > t) {
     521          34 :                         int os = overstep_mode * (t >> 1) ;
     522             : 
     523             :                         /* Overflow must be added to zone counters */
     524          34 :                         z = n + os;
     525          34 :                         n = -os;
     526             :                 }
     527        4351 :         } while (this_cpu_cmpxchg(*p, o, n) != o);
     528             : 
     529        4351 :         if (z)
     530          34 :                 zone_page_state_add(z, zone, item);
     531        4351 : }
     532             : 
     533           1 : void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
     534             :                          long delta)
     535             : {
     536           1 :         mod_zone_state(zone, item, delta, 0);
     537           1 : }
     538             : EXPORT_SYMBOL(mod_zone_page_state);
     539             : 
     540        1284 : void inc_zone_page_state(struct page *page, enum zone_stat_item item)
     541             : {
     542        1284 :         mod_zone_state(page_zone(page), item, 1, 1);
     543        1284 : }
     544             : EXPORT_SYMBOL(inc_zone_page_state);
     545             : 
     546        3066 : void dec_zone_page_state(struct page *page, enum zone_stat_item item)
     547             : {
     548        3066 :         mod_zone_state(page_zone(page), item, -1, -1);
     549        3066 : }
     550             : EXPORT_SYMBOL(dec_zone_page_state);
     551             : 
     552      107430 : static inline void mod_node_state(struct pglist_data *pgdat,
     553             :        enum node_stat_item item, int delta, int overstep_mode)
     554             : {
     555      107430 :         struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
     556      107430 :         s8 __percpu *p = pcp->vm_node_stat_diff + item;
     557      107430 :         long o, n, t, z;
     558             : 
     559      107430 :         if (vmstat_item_in_bytes(item)) {
     560             :                 /*
     561             :                  * Only cgroups use subpage accounting right now; at
     562             :                  * the global level, these items still change in
     563             :                  * multiples of whole pages. Store them as pages
     564             :                  * internally to keep the per-cpu counters compact.
     565             :                  */
     566       44785 :                 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
     567       44785 :                 delta >>= PAGE_SHIFT;
     568             :         }
     569             : 
     570      107430 :         do {
     571      107430 :                 z = 0;  /* overflow to node counters */
     572             : 
     573             :                 /*
     574             :                  * The fetching of the stat_threshold is racy. We may apply
     575             :                  * a counter threshold to the wrong the cpu if we get
     576             :                  * rescheduled while executing here. However, the next
     577             :                  * counter update will apply the threshold again and
     578             :                  * therefore bring the counter under the threshold again.
     579             :                  *
     580             :                  * Most of the time the thresholds are the same anyways
     581             :                  * for all cpus in a node.
     582             :                  */
     583      107430 :                 t = this_cpu_read(pcp->stat_threshold);
     584             : 
     585      107430 :                 o = this_cpu_read(*p);
     586      107431 :                 n = delta + o;
     587             : 
     588      107431 :                 if (abs(n) > t) {
     589        8370 :                         int os = overstep_mode * (t >> 1) ;
     590             : 
     591             :                         /* Overflow must be added to node counters */
     592        8370 :                         z = n + os;
     593        8370 :                         n = -os;
     594             :                 }
     595      107431 :         } while (this_cpu_cmpxchg(*p, o, n) != o);
     596             : 
     597      107431 :         if (z)
     598        8370 :                 node_page_state_add(z, pgdat, item);
     599      107431 : }
     600             : 
     601      106142 : void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
     602             :                                         long delta)
     603             : {
     604      106142 :         mod_node_state(pgdat, item, delta, 0);
     605      106146 : }
     606             : EXPORT_SYMBOL(mod_node_page_state);
     607             : 
     608           0 : void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
     609             : {
     610           0 :         mod_node_state(pgdat, item, 1, 1);
     611           0 : }
     612             : 
     613        1284 : void inc_node_page_state(struct page *page, enum node_stat_item item)
     614             : {
     615        1284 :         mod_node_state(page_pgdat(page), item, 1, 1);
     616        1284 : }
     617             : EXPORT_SYMBOL(inc_node_page_state);
     618             : 
     619           2 : void dec_node_page_state(struct page *page, enum node_stat_item item)
     620             : {
     621           2 :         mod_node_state(page_pgdat(page), item, -1, -1);
     622           2 : }
     623             : EXPORT_SYMBOL(dec_node_page_state);
     624             : #else
     625             : /*
     626             :  * Use interrupt disable to serialize counter updates
     627             :  */
     628             : void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
     629             :                          long delta)
     630             : {
     631             :         unsigned long flags;
     632             : 
     633             :         local_irq_save(flags);
     634             :         __mod_zone_page_state(zone, item, delta);
     635             :         local_irq_restore(flags);
     636             : }
     637             : EXPORT_SYMBOL(mod_zone_page_state);
     638             : 
     639             : void inc_zone_page_state(struct page *page, enum zone_stat_item item)
     640             : {
     641             :         unsigned long flags;
     642             :         struct zone *zone;
     643             : 
     644             :         zone = page_zone(page);
     645             :         local_irq_save(flags);
     646             :         __inc_zone_state(zone, item);
     647             :         local_irq_restore(flags);
     648             : }
     649             : EXPORT_SYMBOL(inc_zone_page_state);
     650             : 
     651             : void dec_zone_page_state(struct page *page, enum zone_stat_item item)
     652             : {
     653             :         unsigned long flags;
     654             : 
     655             :         local_irq_save(flags);
     656             :         __dec_zone_page_state(page, item);
     657             :         local_irq_restore(flags);
     658             : }
     659             : EXPORT_SYMBOL(dec_zone_page_state);
     660             : 
     661             : void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
     662             : {
     663             :         unsigned long flags;
     664             : 
     665             :         local_irq_save(flags);
     666             :         __inc_node_state(pgdat, item);
     667             :         local_irq_restore(flags);
     668             : }
     669             : EXPORT_SYMBOL(inc_node_state);
     670             : 
     671             : void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
     672             :                                         long delta)
     673             : {
     674             :         unsigned long flags;
     675             : 
     676             :         local_irq_save(flags);
     677             :         __mod_node_page_state(pgdat, item, delta);
     678             :         local_irq_restore(flags);
     679             : }
     680             : EXPORT_SYMBOL(mod_node_page_state);
     681             : 
     682             : void inc_node_page_state(struct page *page, enum node_stat_item item)
     683             : {
     684             :         unsigned long flags;
     685             :         struct pglist_data *pgdat;
     686             : 
     687             :         pgdat = page_pgdat(page);
     688             :         local_irq_save(flags);
     689             :         __inc_node_state(pgdat, item);
     690             :         local_irq_restore(flags);
     691             : }
     692             : EXPORT_SYMBOL(inc_node_page_state);
     693             : 
     694             : void dec_node_page_state(struct page *page, enum node_stat_item item)
     695             : {
     696             :         unsigned long flags;
     697             : 
     698             :         local_irq_save(flags);
     699             :         __dec_node_page_state(page, item);
     700             :         local_irq_restore(flags);
     701             : }
     702             : EXPORT_SYMBOL(dec_node_page_state);
     703             : #endif
     704             : 
     705             : /*
     706             :  * Fold a differential into the global counters.
     707             :  * Returns the number of counters updated.
     708             :  */
     709             : #ifdef CONFIG_NUMA
     710         412 : static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
     711             : {
     712         412 :         int i;
     713         412 :         int changes = 0;
     714             : 
     715        4532 :         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
     716        4120 :                 if (zone_diff[i]) {
     717        1231 :                         atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
     718        1231 :                         changes++;
     719             :         }
     720             : 
     721        2884 :         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
     722        2472 :                 if (numa_diff[i]) {
     723         797 :                         atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
     724         797 :                         changes++;
     725             :         }
     726             : 
     727       16061 :         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
     728       15649 :                 if (node_diff[i]) {
     729        2618 :                         atomic_long_add(node_diff[i], &vm_node_stat[i]);
     730        2618 :                         changes++;
     731             :         }
     732         412 :         return changes;
     733             : }
     734             : #else
     735             : static int fold_diff(int *zone_diff, int *node_diff)
     736             : {
     737             :         int i;
     738             :         int changes = 0;
     739             : 
     740             :         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
     741             :                 if (zone_diff[i]) {
     742             :                         atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
     743             :                         changes++;
     744             :         }
     745             : 
     746             :         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
     747             :                 if (node_diff[i]) {
     748             :                         atomic_long_add(node_diff[i], &vm_node_stat[i]);
     749             :                         changes++;
     750             :         }
     751             :         return changes;
     752             : }
     753             : #endif /* CONFIG_NUMA */
     754             : 
     755             : /*
     756             :  * Update the zone counters for the current cpu.
     757             :  *
     758             :  * Note that refresh_cpu_vm_stats strives to only access
     759             :  * node local memory. The per cpu pagesets on remote zones are placed
     760             :  * in the memory local to the processor using that pageset. So the
     761             :  * loop over all zones will access a series of cachelines local to
     762             :  * the processor.
     763             :  *
     764             :  * The call to zone_page_state_add updates the cachelines with the
     765             :  * statistics in the remote zone struct as well as the global cachelines
     766             :  * with the global counters. These could cause remote node cache line
     767             :  * bouncing and will have to be only done when necessary.
     768             :  *
     769             :  * The function returns the number of global counters updated.
     770             :  */
     771         412 : static int refresh_cpu_vm_stats(bool do_pagesets)
     772             : {
     773         412 :         struct pglist_data *pgdat;
     774         412 :         struct zone *zone;
     775         412 :         int i;
     776         412 :         int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
     777             : #ifdef CONFIG_NUMA
     778         412 :         int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
     779             : #endif
     780         412 :         int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
     781         412 :         int changes = 0;
     782             : 
     783        1648 :         for_each_populated_zone(zone) {
     784         412 :                 struct per_cpu_pageset __percpu *p = zone->pageset;
     785             : 
     786        4532 :                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
     787        4120 :                         int v;
     788             : 
     789        4120 :                         v = this_cpu_xchg(p->vm_stat_diff[i], 0);
     790        4120 :                         if (v) {
     791             : 
     792        1231 :                                 atomic_long_add(v, &zone->vm_stat[i]);
     793        1231 :                                 global_zone_diff[i] += v;
     794             : #ifdef CONFIG_NUMA
     795             :                                 /* 3 seconds idle till flush */
     796        4120 :                                 __this_cpu_write(p->expire, 3);
     797             : #endif
     798             :                         }
     799             :                 }
     800             : #ifdef CONFIG_NUMA
     801        2884 :                 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
     802        2472 :                         int v;
     803             : 
     804        2472 :                         v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
     805        2472 :                         if (v) {
     806             : 
     807         797 :                                 atomic_long_add(v, &zone->vm_numa_stat[i]);
     808         797 :                                 global_numa_diff[i] += v;
     809        2472 :                                 __this_cpu_write(p->expire, 3);
     810             :                         }
     811             :                 }
     812             : 
     813         412 :                 if (do_pagesets) {
     814         157 :                         cond_resched();
     815             :                         /*
     816             :                          * Deal with draining the remote pageset of this
     817             :                          * processor
     818             :                          *
     819             :                          * Check if there are pages remaining in this pageset
     820             :                          * if not then there is nothing to expire.
     821             :                          */
     822         157 :                         if (!__this_cpu_read(p->expire) ||
     823         155 :                                !__this_cpu_read(p->pcp.count))
     824           2 :                                 continue;
     825             : 
     826             :                         /*
     827             :                          * We never drain zones local to this processor.
     828             :                          */
     829         155 :                         if (zone_to_nid(zone) == numa_node_id()) {
     830         155 :                                 __this_cpu_write(p->expire, 0);
     831         155 :                                 continue;
     832             :                         }
     833             : 
     834           0 :                         if (__this_cpu_dec_return(p->expire))
     835           0 :                                 continue;
     836             : 
     837           0 :                         if (__this_cpu_read(p->pcp.count)) {
     838           0 :                                 drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
     839           0 :                                 changes++;
     840             :                         }
     841             :                 }
     842             : #endif
     843             :         }
     844             : 
     845         824 :         for_each_online_pgdat(pgdat) {
     846         412 :                 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
     847             : 
     848       16065 :                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
     849       15653 :                         int v;
     850             : 
     851       15653 :                         v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
     852       15653 :                         if (v) {
     853        2619 :                                 atomic_long_add(v, &pgdat->vm_stat[i]);
     854        2619 :                                 global_node_diff[i] += v;
     855             :                         }
     856             :                 }
     857             :         }
     858             : 
     859             : #ifdef CONFIG_NUMA
     860         412 :         changes += fold_diff(global_zone_diff, global_numa_diff,
     861             :                              global_node_diff);
     862             : #else
     863             :         changes += fold_diff(global_zone_diff, global_node_diff);
     864             : #endif
     865         412 :         return changes;
     866             : }
     867             : 
     868             : /*
     869             :  * Fold the data for an offline cpu into the global array.
     870             :  * There cannot be any access by the offline cpu and therefore
     871             :  * synchronization is simplified.
     872             :  */
     873           0 : void cpu_vm_stats_fold(int cpu)
     874             : {
     875           0 :         struct pglist_data *pgdat;
     876           0 :         struct zone *zone;
     877           0 :         int i;
     878           0 :         int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
     879             : #ifdef CONFIG_NUMA
     880           0 :         int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
     881             : #endif
     882           0 :         int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
     883             : 
     884           0 :         for_each_populated_zone(zone) {
     885           0 :                 struct per_cpu_pageset *p;
     886             : 
     887           0 :                 p = per_cpu_ptr(zone->pageset, cpu);
     888             : 
     889           0 :                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
     890           0 :                         if (p->vm_stat_diff[i]) {
     891           0 :                                 int v;
     892             : 
     893           0 :                                 v = p->vm_stat_diff[i];
     894           0 :                                 p->vm_stat_diff[i] = 0;
     895           0 :                                 atomic_long_add(v, &zone->vm_stat[i]);
     896           0 :                                 global_zone_diff[i] += v;
     897             :                         }
     898             : 
     899             : #ifdef CONFIG_NUMA
     900           0 :                 for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
     901           0 :                         if (p->vm_numa_stat_diff[i]) {
     902           0 :                                 int v;
     903             : 
     904           0 :                                 v = p->vm_numa_stat_diff[i];
     905           0 :                                 p->vm_numa_stat_diff[i] = 0;
     906           0 :                                 atomic_long_add(v, &zone->vm_numa_stat[i]);
     907           0 :                                 global_numa_diff[i] += v;
     908             :                         }
     909             : #endif
     910             :         }
     911             : 
     912           0 :         for_each_online_pgdat(pgdat) {
     913           0 :                 struct per_cpu_nodestat *p;
     914             : 
     915           0 :                 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
     916             : 
     917           0 :                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
     918           0 :                         if (p->vm_node_stat_diff[i]) {
     919           0 :                                 int v;
     920             : 
     921           0 :                                 v = p->vm_node_stat_diff[i];
     922           0 :                                 p->vm_node_stat_diff[i] = 0;
     923           0 :                                 atomic_long_add(v, &pgdat->vm_stat[i]);
     924           0 :                                 global_node_diff[i] += v;
     925             :                         }
     926             :         }
     927             : 
     928             : #ifdef CONFIG_NUMA
     929           0 :         fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
     930             : #else
     931             :         fold_diff(global_zone_diff, global_node_diff);
     932             : #endif
     933           0 : }
     934             : 
     935             : /*
     936             :  * this is only called if !populated_zone(zone), which implies no other users of
     937             :  * pset->vm_stat_diff[] exsist.
     938             :  */
     939           0 : void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
     940             : {
     941           0 :         int i;
     942             : 
     943           0 :         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
     944           0 :                 if (pset->vm_stat_diff[i]) {
     945           0 :                         int v = pset->vm_stat_diff[i];
     946           0 :                         pset->vm_stat_diff[i] = 0;
     947           0 :                         atomic_long_add(v, &zone->vm_stat[i]);
     948           0 :                         atomic_long_add(v, &vm_zone_stat[i]);
     949             :                 }
     950             : 
     951             : #ifdef CONFIG_NUMA
     952           0 :         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
     953           0 :                 if (pset->vm_numa_stat_diff[i]) {
     954           0 :                         int v = pset->vm_numa_stat_diff[i];
     955             : 
     956           0 :                         pset->vm_numa_stat_diff[i] = 0;
     957           0 :                         atomic_long_add(v, &zone->vm_numa_stat[i]);
     958           0 :                         atomic_long_add(v, &vm_numa_stat[i]);
     959             :                 }
     960             : #endif
     961           0 : }
     962             : #endif
     963             : 
     964             : #ifdef CONFIG_NUMA
     965      375138 : void __inc_numa_state(struct zone *zone,
     966             :                                  enum numa_stat_item item)
     967             : {
     968      375138 :         struct per_cpu_pageset __percpu *pcp = zone->pageset;
     969      375138 :         u16 __percpu *p = pcp->vm_numa_stat_diff + item;
     970      375138 :         u16 v;
     971             : 
     972      375138 :         v = __this_cpu_inc_return(*p);
     973             : 
     974      375138 :         if (unlikely(v > NUMA_STATS_THRESHOLD)) {
     975           0 :                 zone_numa_state_add(v, zone, item);
     976      375138 :                 __this_cpu_write(*p, 0);
     977             :         }
     978      375138 : }
     979             : 
     980             : /*
     981             :  * Determine the per node value of a stat item. This function
     982             :  * is called frequently in a NUMA machine, so try to be as
     983             :  * frugal as possible.
     984             :  */
     985           0 : unsigned long sum_zone_node_page_state(int node,
     986             :                                  enum zone_stat_item item)
     987             : {
     988           0 :         struct zone *zones = NODE_DATA(node)->node_zones;
     989           0 :         int i;
     990           0 :         unsigned long count = 0;
     991             : 
     992           0 :         for (i = 0; i < MAX_NR_ZONES; i++)
     993           0 :                 count += zone_page_state(zones + i, item);
     994             : 
     995           0 :         return count;
     996             : }
     997             : 
     998             : /*
     999             :  * Determine the per node value of a numa stat item. To avoid deviation,
    1000             :  * the per cpu stat number in vm_numa_stat_diff[] is also included.
    1001             :  */
    1002           0 : unsigned long sum_zone_numa_state(int node,
    1003             :                                  enum numa_stat_item item)
    1004             : {
    1005           0 :         struct zone *zones = NODE_DATA(node)->node_zones;
    1006           0 :         int i;
    1007           0 :         unsigned long count = 0;
    1008             : 
    1009           0 :         for (i = 0; i < MAX_NR_ZONES; i++)
    1010           0 :                 count += zone_numa_state_snapshot(zones + i, item);
    1011             : 
    1012           0 :         return count;
    1013             : }
    1014             : 
    1015             : /*
    1016             :  * Determine the per node value of a stat item.
    1017             :  */
    1018        6124 : unsigned long node_page_state_pages(struct pglist_data *pgdat,
    1019             :                                     enum node_stat_item item)
    1020             : {
    1021           0 :         long x = atomic_long_read(&pgdat->vm_stat[item]);
    1022             : #ifdef CONFIG_SMP
    1023        6124 :         if (x < 0)
    1024             :                 x = 0;
    1025             : #endif
    1026        6124 :         return x;
    1027             : }
    1028             : 
    1029        6124 : unsigned long node_page_state(struct pglist_data *pgdat,
    1030             :                               enum node_stat_item item)
    1031             : {
    1032        6124 :         VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
    1033             : 
    1034        6124 :         return node_page_state_pages(pgdat, item);
    1035             : }
    1036             : #endif
    1037             : 
    1038             : #ifdef CONFIG_COMPACTION
    1039             : 
    1040             : struct contig_page_info {
    1041             :         unsigned long free_pages;
    1042             :         unsigned long free_blocks_total;
    1043             :         unsigned long free_blocks_suitable;
    1044             : };
    1045             : 
    1046             : /*
    1047             :  * Calculate the number of free pages in a zone, how many contiguous
    1048             :  * pages are free and how many are large enough to satisfy an allocation of
    1049             :  * the target size. Note that this function makes no attempt to estimate
    1050             :  * how many suitable free blocks there *might* be if MOVABLE pages were
    1051             :  * migrated. Calculating that is possible, but expensive and can be
    1052             :  * figured out from userspace
    1053             :  */
    1054         234 : static void fill_contig_page_info(struct zone *zone,
    1055             :                                 unsigned int suitable_order,
    1056             :                                 struct contig_page_info *info)
    1057             : {
    1058         234 :         unsigned int order;
    1059             : 
    1060         234 :         info->free_pages = 0;
    1061         234 :         info->free_blocks_total = 0;
    1062         234 :         info->free_blocks_suitable = 0;
    1063             : 
    1064        2808 :         for (order = 0; order < MAX_ORDER; order++) {
    1065        2574 :                 unsigned long blocks;
    1066             : 
    1067             :                 /* Count number of free blocks */
    1068        2574 :                 blocks = zone->free_area[order].nr_free;
    1069        2574 :                 info->free_blocks_total += blocks;
    1070             : 
    1071             :                 /* Count free base pages */
    1072        2574 :                 info->free_pages += blocks << order;
    1073             : 
    1074             :                 /* Count the suitable free blocks */
    1075        2574 :                 if (order >= suitable_order)
    1076         468 :                         info->free_blocks_suitable += blocks <<
    1077         468 :                                                 (order - suitable_order);
    1078             :         }
    1079         234 : }
    1080             : 
    1081             : /*
    1082             :  * A fragmentation index only makes sense if an allocation of a requested
    1083             :  * size would fail. If that is true, the fragmentation index indicates
    1084             :  * whether external fragmentation or a lack of memory was the problem.
    1085             :  * The value can be used to determine if page reclaim or compaction
    1086             :  * should be used
    1087             :  */
    1088           0 : static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
    1089             : {
    1090           0 :         unsigned long requested = 1UL << order;
    1091             : 
    1092           0 :         if (WARN_ON_ONCE(order >= MAX_ORDER))
    1093             :                 return 0;
    1094             : 
    1095           0 :         if (!info->free_blocks_total)
    1096             :                 return 0;
    1097             : 
    1098             :         /* Fragmentation index only makes sense when a request would fail */
    1099           0 :         if (info->free_blocks_suitable)
    1100             :                 return -1000;
    1101             : 
    1102             :         /*
    1103             :          * Index is between 0 and 1 so return within 3 decimal places
    1104             :          *
    1105             :          * 0 => allocation would fail due to lack of memory
    1106             :          * 1 => allocation would fail due to fragmentation
    1107             :          */
    1108           0 :         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
    1109             : }
    1110             : 
    1111             : /*
    1112             :  * Calculates external fragmentation within a zone wrt the given order.
    1113             :  * It is defined as the percentage of pages found in blocks of size
    1114             :  * less than 1 << order. It returns values in range [0, 100].
    1115             :  */
    1116         234 : unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
    1117             : {
    1118         234 :         struct contig_page_info info;
    1119             : 
    1120         234 :         fill_contig_page_info(zone, order, &info);
    1121         234 :         if (info.free_pages == 0)
    1122             :                 return 0;
    1123             : 
    1124          78 :         return div_u64((info.free_pages -
    1125          78 :                         (info.free_blocks_suitable << order)) * 100,
    1126             :                         info.free_pages);
    1127             : }
    1128             : 
    1129             : /* Same as __fragmentation index but allocs contig_page_info on stack */
    1130           0 : int fragmentation_index(struct zone *zone, unsigned int order)
    1131             : {
    1132           0 :         struct contig_page_info info;
    1133             : 
    1134           0 :         fill_contig_page_info(zone, order, &info);
    1135           0 :         return __fragmentation_index(order, &info);
    1136             : }
    1137             : #endif
    1138             : 
    1139             : #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
    1140             :     defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
    1141             : #ifdef CONFIG_ZONE_DMA
    1142             : #define TEXT_FOR_DMA(xx) xx "_dma",
    1143             : #else
    1144             : #define TEXT_FOR_DMA(xx)
    1145             : #endif
    1146             : 
    1147             : #ifdef CONFIG_ZONE_DMA32
    1148             : #define TEXT_FOR_DMA32(xx) xx "_dma32",
    1149             : #else
    1150             : #define TEXT_FOR_DMA32(xx)
    1151             : #endif
    1152             : 
    1153             : #ifdef CONFIG_HIGHMEM
    1154             : #define TEXT_FOR_HIGHMEM(xx) xx "_high",
    1155             : #else
    1156             : #define TEXT_FOR_HIGHMEM(xx)
    1157             : #endif
    1158             : 
    1159             : #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
    1160             :                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
    1161             : 
    1162             : const char * const vmstat_text[] = {
    1163             :         /* enum zone_stat_item counters */
    1164             :         "nr_free_pages",
    1165             :         "nr_zone_inactive_anon",
    1166             :         "nr_zone_active_anon",
    1167             :         "nr_zone_inactive_file",
    1168             :         "nr_zone_active_file",
    1169             :         "nr_zone_unevictable",
    1170             :         "nr_zone_write_pending",
    1171             :         "nr_mlock",
    1172             :         "nr_bounce",
    1173             : #if IS_ENABLED(CONFIG_ZSMALLOC)
    1174             :         "nr_zspages",
    1175             : #endif
    1176             :         "nr_free_cma",
    1177             : 
    1178             :         /* enum numa_stat_item counters */
    1179             : #ifdef CONFIG_NUMA
    1180             :         "numa_hit",
    1181             :         "numa_miss",
    1182             :         "numa_foreign",
    1183             :         "numa_interleave",
    1184             :         "numa_local",
    1185             :         "numa_other",
    1186             : #endif
    1187             : 
    1188             :         /* enum node_stat_item counters */
    1189             :         "nr_inactive_anon",
    1190             :         "nr_active_anon",
    1191             :         "nr_inactive_file",
    1192             :         "nr_active_file",
    1193             :         "nr_unevictable",
    1194             :         "nr_slab_reclaimable",
    1195             :         "nr_slab_unreclaimable",
    1196             :         "nr_isolated_anon",
    1197             :         "nr_isolated_file",
    1198             :         "workingset_nodes",
    1199             :         "workingset_refault_anon",
    1200             :         "workingset_refault_file",
    1201             :         "workingset_activate_anon",
    1202             :         "workingset_activate_file",
    1203             :         "workingset_restore_anon",
    1204             :         "workingset_restore_file",
    1205             :         "workingset_nodereclaim",
    1206             :         "nr_anon_pages",
    1207             :         "nr_mapped",
    1208             :         "nr_file_pages",
    1209             :         "nr_dirty",
    1210             :         "nr_writeback",
    1211             :         "nr_writeback_temp",
    1212             :         "nr_shmem",
    1213             :         "nr_shmem_hugepages",
    1214             :         "nr_shmem_pmdmapped",
    1215             :         "nr_file_hugepages",
    1216             :         "nr_file_pmdmapped",
    1217             :         "nr_anon_transparent_hugepages",
    1218             :         "nr_vmscan_write",
    1219             :         "nr_vmscan_immediate_reclaim",
    1220             :         "nr_dirtied",
    1221             :         "nr_written",
    1222             :         "nr_kernel_misc_reclaimable",
    1223             :         "nr_foll_pin_acquired",
    1224             :         "nr_foll_pin_released",
    1225             :         "nr_kernel_stack",
    1226             : #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
    1227             :         "nr_shadow_call_stack",
    1228             : #endif
    1229             :         "nr_page_table_pages",
    1230             : #ifdef CONFIG_SWAP
    1231             :         "nr_swapcached",
    1232             : #endif
    1233             : 
    1234             :         /* enum writeback_stat_item counters */
    1235             :         "nr_dirty_threshold",
    1236             :         "nr_dirty_background_threshold",
    1237             : 
    1238             : #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
    1239             :         /* enum vm_event_item counters */
    1240             :         "pgpgin",
    1241             :         "pgpgout",
    1242             :         "pswpin",
    1243             :         "pswpout",
    1244             : 
    1245             :         TEXTS_FOR_ZONES("pgalloc")
    1246             :         TEXTS_FOR_ZONES("allocstall")
    1247             :         TEXTS_FOR_ZONES("pgskip")
    1248             : 
    1249             :         "pgfree",
    1250             :         "pgactivate",
    1251             :         "pgdeactivate",
    1252             :         "pglazyfree",
    1253             : 
    1254             :         "pgfault",
    1255             :         "pgmajfault",
    1256             :         "pglazyfreed",
    1257             : 
    1258             :         "pgrefill",
    1259             :         "pgreuse",
    1260             :         "pgsteal_kswapd",
    1261             :         "pgsteal_direct",
    1262             :         "pgscan_kswapd",
    1263             :         "pgscan_direct",
    1264             :         "pgscan_direct_throttle",
    1265             :         "pgscan_anon",
    1266             :         "pgscan_file",
    1267             :         "pgsteal_anon",
    1268             :         "pgsteal_file",
    1269             : 
    1270             : #ifdef CONFIG_NUMA
    1271             :         "zone_reclaim_failed",
    1272             : #endif
    1273             :         "pginodesteal",
    1274             :         "slabs_scanned",
    1275             :         "kswapd_inodesteal",
    1276             :         "kswapd_low_wmark_hit_quickly",
    1277             :         "kswapd_high_wmark_hit_quickly",
    1278             :         "pageoutrun",
    1279             : 
    1280             :         "pgrotated",
    1281             : 
    1282             :         "drop_pagecache",
    1283             :         "drop_slab",
    1284             :         "oom_kill",
    1285             : 
    1286             : #ifdef CONFIG_NUMA_BALANCING
    1287             :         "numa_pte_updates",
    1288             :         "numa_huge_pte_updates",
    1289             :         "numa_hint_faults",
    1290             :         "numa_hint_faults_local",
    1291             :         "numa_pages_migrated",
    1292             : #endif
    1293             : #ifdef CONFIG_MIGRATION
    1294             :         "pgmigrate_success",
    1295             :         "pgmigrate_fail",
    1296             :         "thp_migration_success",
    1297             :         "thp_migration_fail",
    1298             :         "thp_migration_split",
    1299             : #endif
    1300             : #ifdef CONFIG_COMPACTION
    1301             :         "compact_migrate_scanned",
    1302             :         "compact_free_scanned",
    1303             :         "compact_isolated",
    1304             :         "compact_stall",
    1305             :         "compact_fail",
    1306             :         "compact_success",
    1307             :         "compact_daemon_wake",
    1308             :         "compact_daemon_migrate_scanned",
    1309             :         "compact_daemon_free_scanned",
    1310             : #endif
    1311             : 
    1312             : #ifdef CONFIG_HUGETLB_PAGE
    1313             :         "htlb_buddy_alloc_success",
    1314             :         "htlb_buddy_alloc_fail",
    1315             : #endif
    1316             :         "unevictable_pgs_culled",
    1317             :         "unevictable_pgs_scanned",
    1318             :         "unevictable_pgs_rescued",
    1319             :         "unevictable_pgs_mlocked",
    1320             :         "unevictable_pgs_munlocked",
    1321             :         "unevictable_pgs_cleared",
    1322             :         "unevictable_pgs_stranded",
    1323             : 
    1324             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1325             :         "thp_fault_alloc",
    1326             :         "thp_fault_fallback",
    1327             :         "thp_fault_fallback_charge",
    1328             :         "thp_collapse_alloc",
    1329             :         "thp_collapse_alloc_failed",
    1330             :         "thp_file_alloc",
    1331             :         "thp_file_fallback",
    1332             :         "thp_file_fallback_charge",
    1333             :         "thp_file_mapped",
    1334             :         "thp_split_page",
    1335             :         "thp_split_page_failed",
    1336             :         "thp_deferred_split_page",
    1337             :         "thp_split_pmd",
    1338             : #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
    1339             :         "thp_split_pud",
    1340             : #endif
    1341             :         "thp_zero_page_alloc",
    1342             :         "thp_zero_page_alloc_failed",
    1343             :         "thp_swpout",
    1344             :         "thp_swpout_fallback",
    1345             : #endif
    1346             : #ifdef CONFIG_MEMORY_BALLOON
    1347             :         "balloon_inflate",
    1348             :         "balloon_deflate",
    1349             : #ifdef CONFIG_BALLOON_COMPACTION
    1350             :         "balloon_migrate",
    1351             : #endif
    1352             : #endif /* CONFIG_MEMORY_BALLOON */
    1353             : #ifdef CONFIG_DEBUG_TLBFLUSH
    1354             :         "nr_tlb_remote_flush",
    1355             :         "nr_tlb_remote_flush_received",
    1356             :         "nr_tlb_local_flush_all",
    1357             :         "nr_tlb_local_flush_one",
    1358             : #endif /* CONFIG_DEBUG_TLBFLUSH */
    1359             : 
    1360             : #ifdef CONFIG_DEBUG_VM_VMACACHE
    1361             :         "vmacache_find_calls",
    1362             :         "vmacache_find_hits",
    1363             : #endif
    1364             : #ifdef CONFIG_SWAP
    1365             :         "swap_ra",
    1366             :         "swap_ra_hit",
    1367             : #endif
    1368             : #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
    1369             : };
    1370             : #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
    1371             : 
    1372             : #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
    1373             :      defined(CONFIG_PROC_FS)
    1374           0 : static void *frag_start(struct seq_file *m, loff_t *pos)
    1375             : {
    1376           0 :         pg_data_t *pgdat;
    1377           0 :         loff_t node = *pos;
    1378             : 
    1379           0 :         for (pgdat = first_online_pgdat();
    1380           0 :              pgdat && node;
    1381           0 :              pgdat = next_online_pgdat(pgdat))
    1382           0 :                 --node;
    1383             : 
    1384           0 :         return pgdat;
    1385             : }
    1386             : 
    1387           0 : static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
    1388             : {
    1389           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    1390             : 
    1391           0 :         (*pos)++;
    1392           0 :         return next_online_pgdat(pgdat);
    1393             : }
    1394             : 
    1395           0 : static void frag_stop(struct seq_file *m, void *arg)
    1396             : {
    1397           0 : }
    1398             : 
    1399             : /*
    1400             :  * Walk zones in a node and print using a callback.
    1401             :  * If @assert_populated is true, only use callback for zones that are populated.
    1402             :  */
    1403           0 : static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
    1404             :                 bool assert_populated, bool nolock,
    1405             :                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
    1406             : {
    1407           0 :         struct zone *zone;
    1408           0 :         struct zone *node_zones = pgdat->node_zones;
    1409           0 :         unsigned long flags;
    1410             : 
    1411           0 :         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
    1412           0 :                 if (assert_populated && !populated_zone(zone))
    1413           0 :                         continue;
    1414             : 
    1415           0 :                 if (!nolock)
    1416           0 :                         spin_lock_irqsave(&zone->lock, flags);
    1417           0 :                 print(m, pgdat, zone);
    1418           0 :                 if (!nolock)
    1419           0 :                         spin_unlock_irqrestore(&zone->lock, flags);
    1420             :         }
    1421           0 : }
    1422             : #endif
    1423             : 
    1424             : #ifdef CONFIG_PROC_FS
    1425           0 : static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
    1426             :                                                 struct zone *zone)
    1427             : {
    1428           0 :         int order;
    1429             : 
    1430           0 :         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
    1431           0 :         for (order = 0; order < MAX_ORDER; ++order)
    1432           0 :                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
    1433           0 :         seq_putc(m, '\n');
    1434           0 : }
    1435             : 
    1436             : /*
    1437             :  * This walks the free areas for each zone.
    1438             :  */
    1439           0 : static int frag_show(struct seq_file *m, void *arg)
    1440             : {
    1441           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    1442           0 :         walk_zones_in_node(m, pgdat, true, false, frag_show_print);
    1443           0 :         return 0;
    1444             : }
    1445             : 
    1446           0 : static void pagetypeinfo_showfree_print(struct seq_file *m,
    1447             :                                         pg_data_t *pgdat, struct zone *zone)
    1448             : {
    1449           0 :         int order, mtype;
    1450             : 
    1451           0 :         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
    1452           0 :                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
    1453             :                                         pgdat->node_id,
    1454             :                                         zone->name,
    1455             :                                         migratetype_names[mtype]);
    1456           0 :                 for (order = 0; order < MAX_ORDER; ++order) {
    1457           0 :                         unsigned long freecount = 0;
    1458           0 :                         struct free_area *area;
    1459           0 :                         struct list_head *curr;
    1460           0 :                         bool overflow = false;
    1461             : 
    1462           0 :                         area = &(zone->free_area[order]);
    1463             : 
    1464           0 :                         list_for_each(curr, &area->free_list[mtype]) {
    1465             :                                 /*
    1466             :                                  * Cap the free_list iteration because it might
    1467             :                                  * be really large and we are under a spinlock
    1468             :                                  * so a long time spent here could trigger a
    1469             :                                  * hard lockup detector. Anyway this is a
    1470             :                                  * debugging tool so knowing there is a handful
    1471             :                                  * of pages of this order should be more than
    1472             :                                  * sufficient.
    1473             :                                  */
    1474           0 :                                 if (++freecount >= 100000) {
    1475             :                                         overflow = true;
    1476             :                                         break;
    1477             :                                 }
    1478             :                         }
    1479           0 :                         seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
    1480           0 :                         spin_unlock_irq(&zone->lock);
    1481           0 :                         cond_resched();
    1482           0 :                         spin_lock_irq(&zone->lock);
    1483             :                 }
    1484           0 :                 seq_putc(m, '\n');
    1485             :         }
    1486           0 : }
    1487             : 
    1488             : /* Print out the free pages at each order for each migatetype */
    1489           0 : static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
    1490             : {
    1491           0 :         int order;
    1492           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    1493             : 
    1494             :         /* Print header */
    1495           0 :         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
    1496           0 :         for (order = 0; order < MAX_ORDER; ++order)
    1497           0 :                 seq_printf(m, "%6d ", order);
    1498           0 :         seq_putc(m, '\n');
    1499             : 
    1500           0 :         walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
    1501             : 
    1502           0 :         return 0;
    1503             : }
    1504             : 
    1505           0 : static void pagetypeinfo_showblockcount_print(struct seq_file *m,
    1506             :                                         pg_data_t *pgdat, struct zone *zone)
    1507             : {
    1508           0 :         int mtype;
    1509           0 :         unsigned long pfn;
    1510           0 :         unsigned long start_pfn = zone->zone_start_pfn;
    1511           0 :         unsigned long end_pfn = zone_end_pfn(zone);
    1512           0 :         unsigned long count[MIGRATE_TYPES] = { 0, };
    1513             : 
    1514           0 :         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
    1515           0 :                 struct page *page;
    1516             : 
    1517           0 :                 page = pfn_to_online_page(pfn);
    1518           0 :                 if (!page)
    1519           0 :                         continue;
    1520             : 
    1521           0 :                 if (page_zone(page) != zone)
    1522           0 :                         continue;
    1523             : 
    1524           0 :                 mtype = get_pageblock_migratetype(page);
    1525             : 
    1526           0 :                 if (mtype < MIGRATE_TYPES)
    1527           0 :                         count[mtype]++;
    1528             :         }
    1529             : 
    1530             :         /* Print counts */
    1531           0 :         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
    1532           0 :         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
    1533           0 :                 seq_printf(m, "%12lu ", count[mtype]);
    1534           0 :         seq_putc(m, '\n');
    1535           0 : }
    1536             : 
    1537             : /* Print out the number of pageblocks for each migratetype */
    1538           0 : static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
    1539             : {
    1540           0 :         int mtype;
    1541           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    1542             : 
    1543           0 :         seq_printf(m, "\n%-23s", "Number of blocks type ");
    1544           0 :         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
    1545           0 :                 seq_printf(m, "%12s ", migratetype_names[mtype]);
    1546           0 :         seq_putc(m, '\n');
    1547           0 :         walk_zones_in_node(m, pgdat, true, false,
    1548             :                 pagetypeinfo_showblockcount_print);
    1549             : 
    1550           0 :         return 0;
    1551             : }
    1552             : 
    1553             : /*
    1554             :  * Print out the number of pageblocks for each migratetype that contain pages
    1555             :  * of other types. This gives an indication of how well fallbacks are being
    1556             :  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
    1557             :  * to determine what is going on
    1558             :  */
    1559           0 : static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
    1560             : {
    1561             : #ifdef CONFIG_PAGE_OWNER
    1562             :         int mtype;
    1563             : 
    1564             :         if (!static_branch_unlikely(&page_owner_inited))
    1565             :                 return;
    1566             : 
    1567             :         drain_all_pages(NULL);
    1568             : 
    1569             :         seq_printf(m, "\n%-23s", "Number of mixed blocks ");
    1570             :         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
    1571             :                 seq_printf(m, "%12s ", migratetype_names[mtype]);
    1572             :         seq_putc(m, '\n');
    1573             : 
    1574             :         walk_zones_in_node(m, pgdat, true, true,
    1575             :                 pagetypeinfo_showmixedcount_print);
    1576             : #endif /* CONFIG_PAGE_OWNER */
    1577           0 : }
    1578             : 
    1579             : /*
    1580             :  * This prints out statistics in relation to grouping pages by mobility.
    1581             :  * It is expensive to collect so do not constantly read the file.
    1582             :  */
    1583           0 : static int pagetypeinfo_show(struct seq_file *m, void *arg)
    1584             : {
    1585           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    1586             : 
    1587             :         /* check memoryless node */
    1588           0 :         if (!node_state(pgdat->node_id, N_MEMORY))
    1589             :                 return 0;
    1590             : 
    1591           0 :         seq_printf(m, "Page block order: %d\n", pageblock_order);
    1592           0 :         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
    1593           0 :         seq_putc(m, '\n');
    1594           0 :         pagetypeinfo_showfree(m, pgdat);
    1595           0 :         pagetypeinfo_showblockcount(m, pgdat);
    1596           0 :         pagetypeinfo_showmixedcount(m, pgdat);
    1597             : 
    1598           0 :         return 0;
    1599             : }
    1600             : 
    1601             : static const struct seq_operations fragmentation_op = {
    1602             :         .start  = frag_start,
    1603             :         .next   = frag_next,
    1604             :         .stop   = frag_stop,
    1605             :         .show   = frag_show,
    1606             : };
    1607             : 
    1608             : static const struct seq_operations pagetypeinfo_op = {
    1609             :         .start  = frag_start,
    1610             :         .next   = frag_next,
    1611             :         .stop   = frag_stop,
    1612             :         .show   = pagetypeinfo_show,
    1613             : };
    1614             : 
    1615           0 : static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
    1616             : {
    1617           0 :         int zid;
    1618             : 
    1619           0 :         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    1620           0 :                 struct zone *compare = &pgdat->node_zones[zid];
    1621             : 
    1622           0 :                 if (populated_zone(compare))
    1623           0 :                         return zone == compare;
    1624             :         }
    1625             : 
    1626             :         return false;
    1627             : }
    1628             : 
    1629           0 : static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
    1630             :                                                         struct zone *zone)
    1631             : {
    1632           0 :         int i;
    1633           0 :         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
    1634           0 :         if (is_zone_first_populated(pgdat, zone)) {
    1635           0 :                 seq_printf(m, "\n  per-node stats");
    1636           0 :                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
    1637           0 :                         unsigned long pages = node_page_state_pages(pgdat, i);
    1638             : 
    1639           0 :                         if (vmstat_item_print_in_thp(i))
    1640           0 :                                 pages /= HPAGE_PMD_NR;
    1641           0 :                         seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
    1642             :                                    pages);
    1643             :                 }
    1644             :         }
    1645           0 :         seq_printf(m,
    1646             :                    "\n  pages free     %lu"
    1647             :                    "\n        min      %lu"
    1648             :                    "\n        low      %lu"
    1649             :                    "\n        high     %lu"
    1650             :                    "\n        spanned  %lu"
    1651             :                    "\n        present  %lu"
    1652             :                    "\n        managed  %lu"
    1653             :                    "\n        cma      %lu",
    1654             :                    zone_page_state(zone, NR_FREE_PAGES),
    1655           0 :                    min_wmark_pages(zone),
    1656           0 :                    low_wmark_pages(zone),
    1657           0 :                    high_wmark_pages(zone),
    1658             :                    zone->spanned_pages,
    1659             :                    zone->present_pages,
    1660             :                    zone_managed_pages(zone),
    1661             :                    zone_cma_pages(zone));
    1662             : 
    1663           0 :         seq_printf(m,
    1664             :                    "\n        protection: (%ld",
    1665             :                    zone->lowmem_reserve[0]);
    1666           0 :         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
    1667           0 :                 seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
    1668           0 :         seq_putc(m, ')');
    1669             : 
    1670             :         /* If unpopulated, no other information is useful */
    1671           0 :         if (!populated_zone(zone)) {
    1672           0 :                 seq_putc(m, '\n');
    1673           0 :                 return;
    1674             :         }
    1675             : 
    1676           0 :         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
    1677           0 :                 seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
    1678             :                            zone_page_state(zone, i));
    1679             : 
    1680             : #ifdef CONFIG_NUMA
    1681           0 :         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
    1682           0 :                 seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
    1683             :                            zone_numa_state_snapshot(zone, i));
    1684             : #endif
    1685             : 
    1686           0 :         seq_printf(m, "\n  pagesets");
    1687           0 :         for_each_online_cpu(i) {
    1688           0 :                 struct per_cpu_pageset *pageset;
    1689             : 
    1690           0 :                 pageset = per_cpu_ptr(zone->pageset, i);
    1691           0 :                 seq_printf(m,
    1692             :                            "\n    cpu: %i"
    1693             :                            "\n              count: %i"
    1694             :                            "\n              high:  %i"
    1695             :                            "\n              batch: %i",
    1696             :                            i,
    1697             :                            pageset->pcp.count,
    1698             :                            pageset->pcp.high,
    1699             :                            pageset->pcp.batch);
    1700             : #ifdef CONFIG_SMP
    1701           0 :                 seq_printf(m, "\n  vm stats threshold: %d",
    1702           0 :                                 pageset->stat_threshold);
    1703             : #endif
    1704             :         }
    1705           0 :         seq_printf(m,
    1706             :                    "\n  node_unreclaimable:  %u"
    1707             :                    "\n  start_pfn:           %lu",
    1708           0 :                    pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
    1709             :                    zone->zone_start_pfn);
    1710           0 :         seq_putc(m, '\n');
    1711             : }
    1712             : 
    1713             : /*
    1714             :  * Output information about zones in @pgdat.  All zones are printed regardless
    1715             :  * of whether they are populated or not: lowmem_reserve_ratio operates on the
    1716             :  * set of all zones and userspace would not be aware of such zones if they are
    1717             :  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
    1718             :  */
    1719           0 : static int zoneinfo_show(struct seq_file *m, void *arg)
    1720             : {
    1721           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    1722           0 :         walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
    1723           0 :         return 0;
    1724             : }
    1725             : 
    1726             : static const struct seq_operations zoneinfo_op = {
    1727             :         .start  = frag_start, /* iterate over all zones. The same as in
    1728             :                                * fragmentation. */
    1729             :         .next   = frag_next,
    1730             :         .stop   = frag_stop,
    1731             :         .show   = zoneinfo_show,
    1732             : };
    1733             : 
    1734             : #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
    1735             :                          NR_VM_NUMA_STAT_ITEMS + \
    1736             :                          NR_VM_NODE_STAT_ITEMS + \
    1737             :                          NR_VM_WRITEBACK_STAT_ITEMS + \
    1738             :                          (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
    1739             :                           NR_VM_EVENT_ITEMS : 0))
    1740             : 
    1741           0 : static void *vmstat_start(struct seq_file *m, loff_t *pos)
    1742             : {
    1743           0 :         unsigned long *v;
    1744           0 :         int i;
    1745             : 
    1746           0 :         if (*pos >= NR_VMSTAT_ITEMS)
    1747             :                 return NULL;
    1748             : 
    1749           0 :         BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
    1750           0 :         v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
    1751           0 :         m->private = v;
    1752           0 :         if (!v)
    1753           0 :                 return ERR_PTR(-ENOMEM);
    1754           0 :         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
    1755           0 :                 v[i] = global_zone_page_state(i);
    1756           0 :         v += NR_VM_ZONE_STAT_ITEMS;
    1757             : 
    1758             : #ifdef CONFIG_NUMA
    1759           0 :         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
    1760           0 :                 v[i] = global_numa_state(i);
    1761           0 :         v += NR_VM_NUMA_STAT_ITEMS;
    1762             : #endif
    1763             : 
    1764           0 :         for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
    1765           0 :                 v[i] = global_node_page_state_pages(i);
    1766           0 :                 if (vmstat_item_print_in_thp(i))
    1767           0 :                         v[i] /= HPAGE_PMD_NR;
    1768             :         }
    1769           0 :         v += NR_VM_NODE_STAT_ITEMS;
    1770             : 
    1771           0 :         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
    1772             :                             v + NR_DIRTY_THRESHOLD);
    1773           0 :         v += NR_VM_WRITEBACK_STAT_ITEMS;
    1774             : 
    1775             : #ifdef CONFIG_VM_EVENT_COUNTERS
    1776           0 :         all_vm_events(v);
    1777           0 :         v[PGPGIN] /= 2;         /* sectors -> kbytes */
    1778           0 :         v[PGPGOUT] /= 2;
    1779             : #endif
    1780           0 :         return (unsigned long *)m->private + *pos;
    1781             : }
    1782             : 
    1783           0 : static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
    1784             : {
    1785           0 :         (*pos)++;
    1786           0 :         if (*pos >= NR_VMSTAT_ITEMS)
    1787             :                 return NULL;
    1788           0 :         return (unsigned long *)m->private + *pos;
    1789             : }
    1790             : 
    1791           0 : static int vmstat_show(struct seq_file *m, void *arg)
    1792             : {
    1793           0 :         unsigned long *l = arg;
    1794           0 :         unsigned long off = l - (unsigned long *)m->private;
    1795             : 
    1796           0 :         seq_puts(m, vmstat_text[off]);
    1797           0 :         seq_put_decimal_ull(m, " ", *l);
    1798           0 :         seq_putc(m, '\n');
    1799             : 
    1800           0 :         if (off == NR_VMSTAT_ITEMS - 1) {
    1801             :                 /*
    1802             :                  * We've come to the end - add any deprecated counters to avoid
    1803             :                  * breaking userspace which might depend on them being present.
    1804             :                  */
    1805           0 :                 seq_puts(m, "nr_unstable 0\n");
    1806             :         }
    1807           0 :         return 0;
    1808             : }
    1809             : 
    1810           0 : static void vmstat_stop(struct seq_file *m, void *arg)
    1811             : {
    1812           0 :         kfree(m->private);
    1813           0 :         m->private = NULL;
    1814           0 : }
    1815             : 
    1816             : static const struct seq_operations vmstat_op = {
    1817             :         .start  = vmstat_start,
    1818             :         .next   = vmstat_next,
    1819             :         .stop   = vmstat_stop,
    1820             :         .show   = vmstat_show,
    1821             : };
    1822             : #endif /* CONFIG_PROC_FS */
    1823             : 
    1824             : #ifdef CONFIG_SMP
    1825             : static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
    1826             : int sysctl_stat_interval __read_mostly = HZ;
    1827             : 
    1828             : #ifdef CONFIG_PROC_FS
    1829           0 : static void refresh_vm_stats(struct work_struct *work)
    1830             : {
    1831           0 :         refresh_cpu_vm_stats(true);
    1832           0 : }
    1833             : 
    1834           0 : int vmstat_refresh(struct ctl_table *table, int write,
    1835             :                    void *buffer, size_t *lenp, loff_t *ppos)
    1836             : {
    1837           0 :         long val;
    1838           0 :         int err;
    1839           0 :         int i;
    1840             : 
    1841             :         /*
    1842             :          * The regular update, every sysctl_stat_interval, may come later
    1843             :          * than expected: leaving a significant amount in per_cpu buckets.
    1844             :          * This is particularly misleading when checking a quantity of HUGE
    1845             :          * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
    1846             :          * which can equally be echo'ed to or cat'ted from (by root),
    1847             :          * can be used to update the stats just before reading them.
    1848             :          *
    1849             :          * Oh, and since global_zone_page_state() etc. are so careful to hide
    1850             :          * transiently negative values, report an error here if any of
    1851             :          * the stats is negative, so we know to go looking for imbalance.
    1852             :          */
    1853           0 :         err = schedule_on_each_cpu(refresh_vm_stats);
    1854           0 :         if (err)
    1855             :                 return err;
    1856           0 :         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
    1857           0 :                 val = atomic_long_read(&vm_zone_stat[i]);
    1858           0 :                 if (val < 0) {
    1859           0 :                         pr_warn("%s: %s %ld\n",
    1860             :                                 __func__, zone_stat_name(i), val);
    1861           0 :                         err = -EINVAL;
    1862             :                 }
    1863             :         }
    1864             : #ifdef CONFIG_NUMA
    1865           0 :         for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
    1866           0 :                 val = atomic_long_read(&vm_numa_stat[i]);
    1867           0 :                 if (val < 0) {
    1868           0 :                         pr_warn("%s: %s %ld\n",
    1869             :                                 __func__, numa_stat_name(i), val);
    1870           0 :                         err = -EINVAL;
    1871             :                 }
    1872             :         }
    1873             : #endif
    1874           0 :         if (err)
    1875             :                 return err;
    1876           0 :         if (write)
    1877           0 :                 *ppos += *lenp;
    1878             :         else
    1879           0 :                 *lenp = 0;
    1880             :         return 0;
    1881             : }
    1882             : #endif /* CONFIG_PROC_FS */
    1883             : 
    1884         157 : static void vmstat_update(struct work_struct *w)
    1885             : {
    1886         157 :         if (refresh_cpu_vm_stats(true)) {
    1887             :                 /*
    1888             :                  * Counters were updated so we expect more updates
    1889             :                  * to occur in the future. Keep on running the
    1890             :                  * update worker thread.
    1891             :                  */
    1892         145 :                 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
    1893         145 :                                 this_cpu_ptr(&vmstat_work),
    1894             :                                 round_jiffies_relative(sysctl_stat_interval));
    1895             :         }
    1896         157 : }
    1897             : 
    1898             : /*
    1899             :  * Switch off vmstat processing and then fold all the remaining differentials
    1900             :  * until the diffs stay at zero. The function is used by NOHZ and can only be
    1901             :  * invoked when tick processing is not active.
    1902             :  */
    1903             : /*
    1904             :  * Check if the diffs for a certain cpu indicate that
    1905             :  * an update is needed.
    1906             :  */
    1907         986 : static bool need_update(int cpu)
    1908             : {
    1909         986 :         pg_data_t *last_pgdat = NULL;
    1910         986 :         struct zone *zone;
    1911             : 
    1912        3131 :         for_each_populated_zone(zone) {
    1913         986 :                 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
    1914         986 :                 struct per_cpu_nodestat *n;
    1915             :                 /*
    1916             :                  * The fast way of checking if there are any vmstat diffs.
    1917             :                  */
    1918         986 :                 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
    1919             :                                sizeof(p->vm_stat_diff[0])))
    1920             :                         return true;
    1921             : #ifdef CONFIG_NUMA
    1922         725 :                 if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
    1923             :                                sizeof(p->vm_numa_stat_diff[0])))
    1924             :                         return true;
    1925             : #endif
    1926         715 :                 if (last_pgdat == zone->zone_pgdat)
    1927           0 :                         continue;
    1928         715 :                 last_pgdat = zone->zone_pgdat;
    1929         715 :                 n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
    1930         715 :                 if (memchr_inv(n->vm_node_stat_diff, 0, NR_VM_NODE_STAT_ITEMS *
    1931             :                                sizeof(n->vm_node_stat_diff[0])))
    1932             :                     return true;
    1933             :         }
    1934             :         return false;
    1935             : }
    1936             : 
    1937             : /*
    1938             :  * Switch off vmstat processing and then fold all the remaining differentials
    1939             :  * until the diffs stay at zero. The function is used by NOHZ and can only be
    1940             :  * invoked when tick processing is not active.
    1941             :  */
    1942        1160 : void quiet_vmstat(void)
    1943             : {
    1944        1160 :         if (system_state != SYSTEM_RUNNING)
    1945             :                 return;
    1946             : 
    1947        1124 :         if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
    1948             :                 return;
    1949             : 
    1950         963 :         if (!need_update(smp_processor_id()))
    1951             :                 return;
    1952             : 
    1953             :         /*
    1954             :          * Just refresh counters and do not care about the pending delayed
    1955             :          * vmstat_update. It doesn't fire that often to matter and canceling
    1956             :          * it would be too expensive from this path.
    1957             :          * vmstat_shepherd will take care about that for us.
    1958             :          */
    1959         255 :         refresh_cpu_vm_stats(false);
    1960             : }
    1961             : 
    1962             : /*
    1963             :  * Shepherd worker thread that checks the
    1964             :  * differentials of processors that have their worker
    1965             :  * threads for vm statistics updates disabled because of
    1966             :  * inactivity.
    1967             :  */
    1968             : static void vmstat_shepherd(struct work_struct *w);
    1969             : 
    1970             : static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
    1971             : 
    1972          39 : static void vmstat_shepherd(struct work_struct *w)
    1973             : {
    1974          39 :         int cpu;
    1975             : 
    1976          39 :         get_online_cpus();
    1977             :         /* Check processors whose vmstat worker threads have been disabled */
    1978         234 :         for_each_online_cpu(cpu) {
    1979         156 :                 struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
    1980             : 
    1981         156 :                 if (!delayed_work_pending(dw) && need_update(cpu))
    1982          16 :                         queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
    1983             : 
    1984         156 :                 cond_resched();
    1985             :         }
    1986          39 :         put_online_cpus();
    1987             : 
    1988          39 :         schedule_delayed_work(&shepherd,
    1989             :                 round_jiffies_relative(sysctl_stat_interval));
    1990          39 : }
    1991             : 
    1992           1 : static void __init start_shepherd_timer(void)
    1993             : {
    1994           1 :         int cpu;
    1995             : 
    1996           6 :         for_each_possible_cpu(cpu)
    1997           5 :                 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
    1998             :                         vmstat_update);
    1999             : 
    2000           1 :         schedule_delayed_work(&shepherd,
    2001             :                 round_jiffies_relative(sysctl_stat_interval));
    2002           1 : }
    2003             : 
    2004           1 : static void __init init_cpu_node_state(void)
    2005             : {
    2006           1 :         int node;
    2007             : 
    2008           2 :         for_each_online_node(node) {
    2009           1 :                 if (cpumask_weight(cpumask_of_node(node)) > 0)
    2010           2 :                         node_set_state(node, N_CPU);
    2011             :         }
    2012           1 : }
    2013             : 
    2014           3 : static int vmstat_cpu_online(unsigned int cpu)
    2015             : {
    2016           3 :         refresh_zone_stat_thresholds();
    2017           3 :         node_set_state(cpu_to_node(cpu), N_CPU);
    2018           3 :         return 0;
    2019             : }
    2020             : 
    2021           0 : static int vmstat_cpu_down_prep(unsigned int cpu)
    2022             : {
    2023           0 :         cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
    2024           0 :         return 0;
    2025             : }
    2026             : 
    2027           0 : static int vmstat_cpu_dead(unsigned int cpu)
    2028             : {
    2029           0 :         const struct cpumask *node_cpus;
    2030           0 :         int node;
    2031             : 
    2032           0 :         node = cpu_to_node(cpu);
    2033             : 
    2034           0 :         refresh_zone_stat_thresholds();
    2035           0 :         node_cpus = cpumask_of_node(node);
    2036           0 :         if (cpumask_weight(node_cpus) > 0)
    2037             :                 return 0;
    2038             : 
    2039           0 :         node_clear_state(node, N_CPU);
    2040           0 :         return 0;
    2041             : }
    2042             : 
    2043             : #endif
    2044             : 
    2045             : struct workqueue_struct *mm_percpu_wq;
    2046             : 
    2047           1 : void __init init_mm_internals(void)
    2048             : {
    2049           1 :         int ret __maybe_unused;
    2050             : 
    2051           1 :         mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
    2052             : 
    2053             : #ifdef CONFIG_SMP
    2054           1 :         ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
    2055             :                                         NULL, vmstat_cpu_dead);
    2056           1 :         if (ret < 0)
    2057           0 :                 pr_err("vmstat: failed to register 'dead' hotplug state\n");
    2058             : 
    2059           1 :         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
    2060             :                                         vmstat_cpu_online,
    2061             :                                         vmstat_cpu_down_prep);
    2062           1 :         if (ret < 0)
    2063           0 :                 pr_err("vmstat: failed to register 'online' hotplug state\n");
    2064             : 
    2065           1 :         get_online_cpus();
    2066           1 :         init_cpu_node_state();
    2067           1 :         put_online_cpus();
    2068             : 
    2069           1 :         start_shepherd_timer();
    2070             : #endif
    2071             : #ifdef CONFIG_PROC_FS
    2072           1 :         proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
    2073           1 :         proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
    2074           1 :         proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
    2075           1 :         proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
    2076             : #endif
    2077           1 : }
    2078             : 
    2079             : #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
    2080             : 
    2081             : /*
    2082             :  * Return an index indicating how much of the available free memory is
    2083             :  * unusable for an allocation of the requested size.
    2084             :  */
    2085           0 : static int unusable_free_index(unsigned int order,
    2086             :                                 struct contig_page_info *info)
    2087             : {
    2088             :         /* No free memory is interpreted as all free memory is unusable */
    2089           0 :         if (info->free_pages == 0)
    2090             :                 return 1000;
    2091             : 
    2092             :         /*
    2093             :          * Index should be a value between 0 and 1. Return a value to 3
    2094             :          * decimal places.
    2095             :          *
    2096             :          * 0 => no fragmentation
    2097             :          * 1 => high fragmentation
    2098             :          */
    2099           0 :         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
    2100             : 
    2101             : }
    2102             : 
    2103           0 : static void unusable_show_print(struct seq_file *m,
    2104             :                                         pg_data_t *pgdat, struct zone *zone)
    2105             : {
    2106           0 :         unsigned int order;
    2107           0 :         int index;
    2108           0 :         struct contig_page_info info;
    2109             : 
    2110           0 :         seq_printf(m, "Node %d, zone %8s ",
    2111             :                                 pgdat->node_id,
    2112             :                                 zone->name);
    2113           0 :         for (order = 0; order < MAX_ORDER; ++order) {
    2114           0 :                 fill_contig_page_info(zone, order, &info);
    2115           0 :                 index = unusable_free_index(order, &info);
    2116           0 :                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
    2117             :         }
    2118             : 
    2119           0 :         seq_putc(m, '\n');
    2120           0 : }
    2121             : 
    2122             : /*
    2123             :  * Display unusable free space index
    2124             :  *
    2125             :  * The unusable free space index measures how much of the available free
    2126             :  * memory cannot be used to satisfy an allocation of a given size and is a
    2127             :  * value between 0 and 1. The higher the value, the more of free memory is
    2128             :  * unusable and by implication, the worse the external fragmentation is. This
    2129             :  * can be expressed as a percentage by multiplying by 100.
    2130             :  */
    2131           0 : static int unusable_show(struct seq_file *m, void *arg)
    2132             : {
    2133           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    2134             : 
    2135             :         /* check memoryless node */
    2136           0 :         if (!node_state(pgdat->node_id, N_MEMORY))
    2137             :                 return 0;
    2138             : 
    2139           0 :         walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
    2140             : 
    2141           0 :         return 0;
    2142             : }
    2143             : 
    2144             : static const struct seq_operations unusable_sops = {
    2145             :         .start  = frag_start,
    2146             :         .next   = frag_next,
    2147             :         .stop   = frag_stop,
    2148             :         .show   = unusable_show,
    2149             : };
    2150             : 
    2151           0 : DEFINE_SEQ_ATTRIBUTE(unusable);
    2152             : 
    2153           0 : static void extfrag_show_print(struct seq_file *m,
    2154             :                                         pg_data_t *pgdat, struct zone *zone)
    2155             : {
    2156           0 :         unsigned int order;
    2157           0 :         int index;
    2158             : 
    2159             :         /* Alloc on stack as interrupts are disabled for zone walk */
    2160           0 :         struct contig_page_info info;
    2161             : 
    2162           0 :         seq_printf(m, "Node %d, zone %8s ",
    2163             :                                 pgdat->node_id,
    2164             :                                 zone->name);
    2165           0 :         for (order = 0; order < MAX_ORDER; ++order) {
    2166           0 :                 fill_contig_page_info(zone, order, &info);
    2167           0 :                 index = __fragmentation_index(order, &info);
    2168           0 :                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
    2169             :         }
    2170             : 
    2171           0 :         seq_putc(m, '\n');
    2172           0 : }
    2173             : 
    2174             : /*
    2175             :  * Display fragmentation index for orders that allocations would fail for
    2176             :  */
    2177           0 : static int extfrag_show(struct seq_file *m, void *arg)
    2178             : {
    2179           0 :         pg_data_t *pgdat = (pg_data_t *)arg;
    2180             : 
    2181           0 :         walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
    2182             : 
    2183           0 :         return 0;
    2184             : }
    2185             : 
    2186             : static const struct seq_operations extfrag_sops = {
    2187             :         .start  = frag_start,
    2188             :         .next   = frag_next,
    2189             :         .stop   = frag_stop,
    2190             :         .show   = extfrag_show,
    2191             : };
    2192             : 
    2193           0 : DEFINE_SEQ_ATTRIBUTE(extfrag);
    2194             : 
    2195           1 : static int __init extfrag_debug_init(void)
    2196             : {
    2197           1 :         struct dentry *extfrag_debug_root;
    2198             : 
    2199           1 :         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
    2200             : 
    2201           1 :         debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
    2202             :                             &unusable_fops);
    2203             : 
    2204           1 :         debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
    2205             :                             &extfrag_fops);
    2206             : 
    2207           1 :         return 0;
    2208             : }
    2209             : 
    2210             : module_init(extfrag_debug_init);
    2211             : #endif

Generated by: LCOV version 1.14