LCOV - code coverage report
Current view: top level - include/linux - mempolicy.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 16 24 66.7 %
Date: 2021-04-22 12:43:58 Functions: 1 2 50.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * NUMA memory policies for Linux.
       4             :  * Copyright 2003,2004 Andi Kleen SuSE Labs
       5             :  */
       6             : #ifndef _LINUX_MEMPOLICY_H
       7             : #define _LINUX_MEMPOLICY_H 1
       8             : 
       9             : #include <linux/sched.h>
      10             : #include <linux/mmzone.h>
      11             : #include <linux/dax.h>
      12             : #include <linux/slab.h>
      13             : #include <linux/rbtree.h>
      14             : #include <linux/spinlock.h>
      15             : #include <linux/nodemask.h>
      16             : #include <linux/pagemap.h>
      17             : #include <uapi/linux/mempolicy.h>
      18             : 
      19             : struct mm_struct;
      20             : 
      21             : #ifdef CONFIG_NUMA
      22             : 
      23             : /*
      24             :  * Describe a memory policy.
      25             :  *
      26             :  * A mempolicy can be either associated with a process or with a VMA.
      27             :  * For VMA related allocations the VMA policy is preferred, otherwise
      28             :  * the process policy is used. Interrupts ignore the memory policy
      29             :  * of the current process.
      30             :  *
      31             :  * Locking policy for interleave:
      32             :  * In process context there is no locking because only the process accesses
      33             :  * its own state. All vma manipulation is somewhat protected by a down_read on
      34             :  * mmap_lock.
      35             :  *
      36             :  * Freeing policy:
      37             :  * Mempolicy objects are reference counted.  A mempolicy will be freed when
      38             :  * mpol_put() decrements the reference count to zero.
      39             :  *
      40             :  * Duplicating policy objects:
      41             :  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
      42             :  * to the new storage.  The reference count of the new object is initialized
      43             :  * to 1, representing the caller of mpol_dup().
      44             :  */
      45             : struct mempolicy {
      46             :         atomic_t refcnt;
      47             :         unsigned short mode;    /* See MPOL_* above */
      48             :         unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
      49             :         union {
      50             :                 short            preferred_node; /* preferred */
      51             :                 nodemask_t       nodes;         /* interleave/bind */
      52             :                 /* undefined for default */
      53             :         } v;
      54             :         union {
      55             :                 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
      56             :                 nodemask_t user_nodemask;       /* nodemask passed by user */
      57             :         } w;
      58             : };
      59             : 
      60             : /*
      61             :  * Support for managing mempolicy data objects (clone, copy, destroy)
      62             :  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
      63             :  */
      64             : 
      65             : extern void __mpol_put(struct mempolicy *pol);
      66      115735 : static inline void mpol_put(struct mempolicy *pol)
      67             : {
      68      115735 :         if (pol)
      69           2 :                 __mpol_put(pol);
      70           0 : }
      71             : 
      72             : /*
      73             :  * Does mempolicy pol need explicit unref after use?
      74             :  * Currently only needed for shared policies.
      75             :  */
      76       74037 : static inline int mpol_needs_cond_ref(struct mempolicy *pol)
      77             : {
      78       72052 :         return (pol && (pol->flags & MPOL_F_SHARED));
      79             : }
      80             : 
      81       74037 : static inline void mpol_cond_put(struct mempolicy *pol)
      82             : {
      83       74037 :         if (mpol_needs_cond_ref(pol))
      84           0 :                 __mpol_put(pol);
      85       74037 : }
      86             : 
      87             : extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
      88       87702 : static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
      89             : {
      90       87702 :         if (pol)
      91           4 :                 pol = __mpol_dup(pol);
      92       87702 :         return pol;
      93             : }
      94             : 
      95             : #define vma_policy(vma) ((vma)->vm_policy)
      96             : 
      97           0 : static inline void mpol_get(struct mempolicy *pol)
      98             : {
      99           0 :         if (pol)
     100           0 :                 atomic_inc(&pol->refcnt);
     101           0 : }
     102             : 
     103             : extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
     104       61577 : static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
     105             : {
     106       61577 :         if (a == b)
     107             :                 return true;
     108           0 :         return __mpol_equal(a, b);
     109             : }
     110             : 
     111             : /*
     112             :  * Tree of shared policies for a shared memory region.
     113             :  * Maintain the policies in a pseudo mm that contains vmas. The vmas
     114             :  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
     115             :  * bytes, so that we can work with shared memory segments bigger than
     116             :  * unsigned long.
     117             :  */
     118             : 
     119             : struct sp_node {
     120             :         struct rb_node nd;
     121             :         unsigned long start, end;
     122             :         struct mempolicy *policy;
     123             : };
     124             : 
     125             : struct shared_policy {
     126             :         struct rb_root root;
     127             :         rwlock_t lock;
     128             : };
     129             : 
     130             : int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
     131             : void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
     132             : int mpol_set_shared_policy(struct shared_policy *info,
     133             :                                 struct vm_area_struct *vma,
     134             :                                 struct mempolicy *new);
     135             : void mpol_free_shared_policy(struct shared_policy *p);
     136             : struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
     137             :                                             unsigned long idx);
     138             : 
     139             : struct mempolicy *get_task_policy(struct task_struct *p);
     140             : struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
     141             :                 unsigned long addr);
     142             : bool vma_policy_mof(struct vm_area_struct *vma);
     143             : 
     144             : extern void numa_default_policy(void);
     145             : extern void numa_policy_init(void);
     146             : extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
     147             : extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
     148             : 
     149             : extern int huge_node(struct vm_area_struct *vma,
     150             :                                 unsigned long addr, gfp_t gfp_flags,
     151             :                                 struct mempolicy **mpol, nodemask_t **nodemask);
     152             : extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
     153             : extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
     154             :                                 const nodemask_t *mask);
     155             : extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
     156             : 
     157             : static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
     158             : {
     159             :         struct mempolicy *mpol = get_task_policy(current);
     160             : 
     161             :         return policy_nodemask(gfp, mpol);
     162             : }
     163             : 
     164             : extern unsigned int mempolicy_slab_node(void);
     165             : 
     166             : extern enum zone_type policy_zone;
     167             : 
     168           2 : static inline void check_highest_zone(enum zone_type k)
     169             : {
     170           2 :         if (k > policy_zone && k != ZONE_MOVABLE)
     171           0 :                 policy_zone = k;
     172             : }
     173             : 
     174             : int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
     175             :                      const nodemask_t *to, int flags);
     176             : 
     177             : 
     178             : #ifdef CONFIG_TMPFS
     179             : extern int mpol_parse_str(char *str, struct mempolicy **mpol);
     180             : #endif
     181             : 
     182             : extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
     183             : 
     184             : /* Check if a vma is migratable */
     185             : extern bool vma_migratable(struct vm_area_struct *vma);
     186             : 
     187             : extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
     188             : extern void mpol_put_task_policy(struct task_struct *);
     189             : 
     190             : #else
     191             : 
     192             : struct mempolicy {};
     193             : 
     194             : static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
     195             : {
     196             :         return true;
     197             : }
     198             : 
     199             : static inline void mpol_put(struct mempolicy *p)
     200             : {
     201             : }
     202             : 
     203             : static inline void mpol_cond_put(struct mempolicy *pol)
     204             : {
     205             : }
     206             : 
     207             : static inline void mpol_get(struct mempolicy *pol)
     208             : {
     209             : }
     210             : 
     211             : struct shared_policy {};
     212             : 
     213             : static inline void mpol_shared_policy_init(struct shared_policy *sp,
     214             :                                                 struct mempolicy *mpol)
     215             : {
     216             : }
     217             : 
     218             : static inline void mpol_free_shared_policy(struct shared_policy *p)
     219             : {
     220             : }
     221             : 
     222             : static inline struct mempolicy *
     223             : mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
     224             : {
     225             :         return NULL;
     226             : }
     227             : 
     228             : #define vma_policy(vma) NULL
     229             : 
     230             : static inline int
     231             : vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
     232             : {
     233             :         return 0;
     234             : }
     235             : 
     236             : static inline void numa_policy_init(void)
     237             : {
     238             : }
     239             : 
     240             : static inline void numa_default_policy(void)
     241             : {
     242             : }
     243             : 
     244             : static inline void mpol_rebind_task(struct task_struct *tsk,
     245             :                                 const nodemask_t *new)
     246             : {
     247             : }
     248             : 
     249             : static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
     250             : {
     251             : }
     252             : 
     253             : static inline int huge_node(struct vm_area_struct *vma,
     254             :                                 unsigned long addr, gfp_t gfp_flags,
     255             :                                 struct mempolicy **mpol, nodemask_t **nodemask)
     256             : {
     257             :         *mpol = NULL;
     258             :         *nodemask = NULL;
     259             :         return 0;
     260             : }
     261             : 
     262             : static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
     263             : {
     264             :         return false;
     265             : }
     266             : 
     267             : static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
     268             :                                    const nodemask_t *to, int flags)
     269             : {
     270             :         return 0;
     271             : }
     272             : 
     273             : static inline void check_highest_zone(int k)
     274             : {
     275             : }
     276             : 
     277             : #ifdef CONFIG_TMPFS
     278             : static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
     279             : {
     280             :         return 1;       /* error */
     281             : }
     282             : #endif
     283             : 
     284             : static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
     285             :                                  unsigned long address)
     286             : {
     287             :         return -1; /* no node preference */
     288             : }
     289             : 
     290             : static inline void mpol_put_task_policy(struct task_struct *task)
     291             : {
     292             : }
     293             : 
     294             : static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
     295             : {
     296             :         return NULL;
     297             : }
     298             : #endif /* CONFIG_NUMA */
     299             : #endif

Generated by: LCOV version 1.14