LCOV - code coverage report
Current view: top level - mm - percpu-internal.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 21 23 91.3 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _MM_PERCPU_INTERNAL_H
       3             : #define _MM_PERCPU_INTERNAL_H
       4             : 
       5             : #include <linux/types.h>
       6             : #include <linux/percpu.h>
       7             : 
       8             : /*
       9             :  * There are two chunk types: root and memcg-aware.
      10             :  * Chunks of each type have separate slots list.
      11             :  *
      12             :  * Memcg-aware chunks have an attached vector of obj_cgroup pointers, which is
      13             :  * used to store memcg membership data of a percpu object.  Obj_cgroups are
      14             :  * ref-counted pointers to a memory cgroup with an ability to switch dynamically
      15             :  * to the parent memory cgroup.  This allows to reclaim a deleted memory cgroup
      16             :  * without reclaiming of all outstanding objects, which hold a reference at it.
      17             :  */
      18             : enum pcpu_chunk_type {
      19             :         PCPU_CHUNK_ROOT,
      20             : #ifdef CONFIG_MEMCG_KMEM
      21             :         PCPU_CHUNK_MEMCG,
      22             : #endif
      23             :         PCPU_NR_CHUNK_TYPES,
      24             :         PCPU_FAIL_ALLOC = PCPU_NR_CHUNK_TYPES
      25             : };
      26             : 
      27             : /*
      28             :  * pcpu_block_md is the metadata block struct.
      29             :  * Each chunk's bitmap is split into a number of full blocks.
      30             :  * All units are in terms of bits.
      31             :  *
      32             :  * The scan hint is the largest known contiguous area before the contig hint.
      33             :  * It is not necessarily the actual largest contig hint though.  There is an
      34             :  * invariant that the scan_hint_start > contig_hint_start iff
      35             :  * scan_hint == contig_hint.  This is necessary because when scanning forward,
      36             :  * we don't know if a new contig hint would be better than the current one.
      37             :  */
      38             : struct pcpu_block_md {
      39             :         int                     scan_hint;      /* scan hint for block */
      40             :         int                     scan_hint_start; /* block relative starting
      41             :                                                     position of the scan hint */
      42             :         int                     contig_hint;    /* contig hint for block */
      43             :         int                     contig_hint_start; /* block relative starting
      44             :                                                       position of the contig hint */
      45             :         int                     left_free;      /* size of free space along
      46             :                                                    the left side of the block */
      47             :         int                     right_free;     /* size of free space along
      48             :                                                    the right side of the block */
      49             :         int                     first_free;     /* block position of first free */
      50             :         int                     nr_bits;        /* total bits responsible for */
      51             : };
      52             : 
      53             : struct pcpu_chunk {
      54             : #ifdef CONFIG_PERCPU_STATS
      55             :         int                     nr_alloc;       /* # of allocations */
      56             :         size_t                  max_alloc_size; /* largest allocation size */
      57             : #endif
      58             : 
      59             :         struct list_head        list;           /* linked to pcpu_slot lists */
      60             :         int                     free_bytes;     /* free bytes in the chunk */
      61             :         struct pcpu_block_md    chunk_md;
      62             :         void                    *base_addr;     /* base address of this chunk */
      63             : 
      64             :         unsigned long           *alloc_map;     /* allocation map */
      65             :         unsigned long           *bound_map;     /* boundary map */
      66             :         struct pcpu_block_md    *md_blocks;     /* metadata blocks */
      67             : 
      68             :         void                    *data;          /* chunk data */
      69             :         bool                    immutable;      /* no [de]population allowed */
      70             :         int                     start_offset;   /* the overlap with the previous
      71             :                                                    region to have a page aligned
      72             :                                                    base_addr */
      73             :         int                     end_offset;     /* additional area required to
      74             :                                                    have the region end page
      75             :                                                    aligned */
      76             : #ifdef CONFIG_MEMCG_KMEM
      77             :         struct obj_cgroup       **obj_cgroups;  /* vector of object cgroups */
      78             : #endif
      79             : 
      80             :         int                     nr_pages;       /* # of pages served by this chunk */
      81             :         int                     nr_populated;   /* # of populated pages */
      82             :         int                     nr_empty_pop_pages; /* # of empty populated pages */
      83             :         unsigned long           populated[];    /* populated bitmap */
      84             : };
      85             : 
      86             : extern spinlock_t pcpu_lock;
      87             : 
      88             : extern struct list_head *pcpu_chunk_lists;
      89             : extern int pcpu_nr_slots;
      90             : extern int pcpu_nr_empty_pop_pages;
      91             : 
      92             : extern struct pcpu_chunk *pcpu_first_chunk;
      93             : extern struct pcpu_chunk *pcpu_reserved_chunk;
      94             : 
      95             : /**
      96             :  * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
      97             :  * @chunk: chunk of interest
      98             :  *
      99             :  * This conversion is from the number of physical pages that the chunk
     100             :  * serves to the number of bitmap blocks used.
     101             :  */
     102       13016 : static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
     103             : {
     104       13016 :         return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
     105             : }
     106             : 
     107             : /**
     108             :  * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
     109             :  * @pages: number of physical pages
     110             :  *
     111             :  * This conversion is from physical pages to the number of bits
     112             :  * required in the bitmap.
     113             :  */
     114       10049 : static inline int pcpu_nr_pages_to_map_bits(int pages)
     115             : {
     116       10049 :         return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
     117             : }
     118             : 
     119             : /**
     120             :  * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
     121             :  * @chunk: chunk of interest
     122             :  *
     123             :  * This conversion is from the number of physical pages that the chunk
     124             :  * serves to the number of bits in the bitmap.
     125             :  */
     126       10049 : static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
     127             : {
     128       10049 :         return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
     129             : }
     130             : 
     131             : #ifdef CONFIG_MEMCG_KMEM
     132             : static inline enum pcpu_chunk_type pcpu_chunk_type(struct pcpu_chunk *chunk)
     133             : {
     134             :         if (chunk->obj_cgroups)
     135             :                 return PCPU_CHUNK_MEMCG;
     136             :         return PCPU_CHUNK_ROOT;
     137             : }
     138             : 
     139             : static inline bool pcpu_is_memcg_chunk(enum pcpu_chunk_type chunk_type)
     140             : {
     141             :         return chunk_type == PCPU_CHUNK_MEMCG;
     142             : }
     143             : 
     144             : #else
     145        1573 : static inline enum pcpu_chunk_type pcpu_chunk_type(struct pcpu_chunk *chunk)
     146             : {
     147        1573 :         return PCPU_CHUNK_ROOT;
     148             : }
     149             : 
     150        3765 : static inline bool pcpu_is_memcg_chunk(enum pcpu_chunk_type chunk_type)
     151             : {
     152        3765 :         return false;
     153             : }
     154             : #endif
     155             : 
     156        3765 : static inline struct list_head *pcpu_chunk_list(enum pcpu_chunk_type chunk_type)
     157             : {
     158        3765 :         return &pcpu_chunk_lists[pcpu_nr_slots *
     159        3765 :                                  pcpu_is_memcg_chunk(chunk_type)];
     160             : }
     161             : 
     162             : #ifdef CONFIG_PERCPU_STATS
     163             : 
     164             : #include <linux/spinlock.h>
     165             : 
     166             : struct percpu_stats {
     167             :         u64 nr_alloc;           /* lifetime # of allocations */
     168             :         u64 nr_dealloc;         /* lifetime # of deallocations */
     169             :         u64 nr_cur_alloc;       /* current # of allocations */
     170             :         u64 nr_max_alloc;       /* max # of live allocations */
     171             :         u32 nr_chunks;          /* current # of live chunks */
     172             :         u32 nr_max_chunks;      /* max # of live chunks */
     173             :         size_t min_alloc_size;  /* min allocaiton size */
     174             :         size_t max_alloc_size;  /* max allocation size */
     175             : };
     176             : 
     177             : extern struct percpu_stats pcpu_stats;
     178             : extern struct pcpu_alloc_info pcpu_stats_ai;
     179             : 
     180             : /*
     181             :  * For debug purposes. We don't care about the flexible array.
     182             :  */
     183             : static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
     184             : {
     185             :         memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
     186             : 
     187             :         /* initialize min_alloc_size to unit_size */
     188             :         pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
     189             : }
     190             : 
     191             : /*
     192             :  * pcpu_stats_area_alloc - increment area allocation stats
     193             :  * @chunk: the location of the area being allocated
     194             :  * @size: size of area to allocate in bytes
     195             :  *
     196             :  * CONTEXT:
     197             :  * pcpu_lock.
     198             :  */
     199             : static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
     200             : {
     201             :         lockdep_assert_held(&pcpu_lock);
     202             : 
     203             :         pcpu_stats.nr_alloc++;
     204             :         pcpu_stats.nr_cur_alloc++;
     205             :         pcpu_stats.nr_max_alloc =
     206             :                 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
     207             :         pcpu_stats.min_alloc_size =
     208             :                 min(pcpu_stats.min_alloc_size, size);
     209             :         pcpu_stats.max_alloc_size =
     210             :                 max(pcpu_stats.max_alloc_size, size);
     211             : 
     212             :         chunk->nr_alloc++;
     213             :         chunk->max_alloc_size = max(chunk->max_alloc_size, size);
     214             : }
     215             : 
     216             : /*
     217             :  * pcpu_stats_area_dealloc - decrement allocation stats
     218             :  * @chunk: the location of the area being deallocated
     219             :  *
     220             :  * CONTEXT:
     221             :  * pcpu_lock.
     222             :  */
     223             : static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
     224             : {
     225             :         lockdep_assert_held(&pcpu_lock);
     226             : 
     227             :         pcpu_stats.nr_dealloc++;
     228             :         pcpu_stats.nr_cur_alloc--;
     229             : 
     230             :         chunk->nr_alloc--;
     231             : }
     232             : 
     233             : /*
     234             :  * pcpu_stats_chunk_alloc - increment chunk stats
     235             :  */
     236             : static inline void pcpu_stats_chunk_alloc(void)
     237             : {
     238             :         unsigned long flags;
     239             :         spin_lock_irqsave(&pcpu_lock, flags);
     240             : 
     241             :         pcpu_stats.nr_chunks++;
     242             :         pcpu_stats.nr_max_chunks =
     243             :                 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
     244             : 
     245             :         spin_unlock_irqrestore(&pcpu_lock, flags);
     246             : }
     247             : 
     248             : /*
     249             :  * pcpu_stats_chunk_dealloc - decrement chunk stats
     250             :  */
     251             : static inline void pcpu_stats_chunk_dealloc(void)
     252             : {
     253             :         unsigned long flags;
     254             :         spin_lock_irqsave(&pcpu_lock, flags);
     255             : 
     256             :         pcpu_stats.nr_chunks--;
     257             : 
     258             :         spin_unlock_irqrestore(&pcpu_lock, flags);
     259             : }
     260             : 
     261             : #else
     262             : 
     263           1 : static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
     264             : {
     265           1 : }
     266             : 
     267        2172 : static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
     268             : {
     269        2172 : }
     270             : 
     271        1558 : static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
     272             : {
     273        1558 : }
     274             : 
     275           2 : static inline void pcpu_stats_chunk_alloc(void)
     276             : {
     277           2 : }
     278             : 
     279           0 : static inline void pcpu_stats_chunk_dealloc(void)
     280             : {
     281           0 : }
     282             : 
     283             : #endif /* !CONFIG_PERCPU_STATS */
     284             : 
     285             : #endif

Generated by: LCOV version 1.14