Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef _LINUX_SCHED_MM_H 3 : #define _LINUX_SCHED_MM_H 4 : 5 : #include <linux/kernel.h> 6 : #include <linux/atomic.h> 7 : #include <linux/sched.h> 8 : #include <linux/mm_types.h> 9 : #include <linux/gfp.h> 10 : #include <linux/sync_core.h> 11 : 12 : /* 13 : * Routines for handling mm_structs 14 : */ 15 : extern struct mm_struct *mm_alloc(void); 16 : 17 : /** 18 : * mmgrab() - Pin a &struct mm_struct. 19 : * @mm: The &struct mm_struct to pin. 20 : * 21 : * Make sure that @mm will not get freed even after the owning task 22 : * exits. This doesn't guarantee that the associated address space 23 : * will still exist later on and mmget_not_zero() has to be used before 24 : * accessing it. 25 : * 26 : * This is a preferred way to pin @mm for a longer/unbounded amount 27 : * of time. 28 : * 29 : * Use mmdrop() to release the reference acquired by mmgrab(). 30 : * 31 : * See also <Documentation/vm/active_mm.rst> for an in-depth explanation 32 : * of &mm_struct.mm_count vs &mm_struct.mm_users. 33 : */ 34 6530 : static inline void mmgrab(struct mm_struct *mm) 35 : { 36 6530 : atomic_inc(&mm->mm_count); 37 6530 : } 38 : 39 : extern void __mmdrop(struct mm_struct *mm); 40 : 41 9414 : static inline void mmdrop(struct mm_struct *mm) 42 : { 43 : /* 44 : * The implicit full barrier implied by atomic_dec_and_test() is 45 : * required by the membarrier system call before returning to 46 : * user-space, after storing to rq->curr. 47 : */ 48 18829 : if (unlikely(atomic_dec_and_test(&mm->mm_count))) 49 2057 : __mmdrop(mm); 50 9415 : } 51 : 52 : /** 53 : * mmget() - Pin the address space associated with a &struct mm_struct. 54 : * @mm: The address space to pin. 55 : * 56 : * Make sure that the address space of the given &struct mm_struct doesn't 57 : * go away. This does not protect against parts of the address space being 58 : * modified or freed, however. 59 : * 60 : * Never use this function to pin this address space for an 61 : * unbounded/indefinite amount of time. 62 : * 63 : * Use mmput() to release the reference acquired by mmget(). 64 : * 65 : * See also <Documentation/vm/active_mm.rst> for an in-depth explanation 66 : * of &mm_struct.mm_count vs &mm_struct.mm_users. 67 : */ 68 322 : static inline void mmget(struct mm_struct *mm) 69 : { 70 322 : atomic_inc(&mm->mm_users); 71 322 : } 72 : 73 92 : static inline bool mmget_not_zero(struct mm_struct *mm) 74 : { 75 92 : return atomic_inc_not_zero(&mm->mm_users); 76 : } 77 : 78 : /* mmput gets rid of the mappings and all user-space */ 79 : extern void mmput(struct mm_struct *); 80 : #ifdef CONFIG_MMU 81 : /* same as above but performs the slow path from the async context. Can 82 : * be called from the atomic context as well 83 : */ 84 : void mmput_async(struct mm_struct *); 85 : #endif 86 : 87 : /* Grab a reference to a task's mm, if it is not already going away */ 88 : extern struct mm_struct *get_task_mm(struct task_struct *task); 89 : /* 90 : * Grab a reference to a task's mm, if it is not already going away 91 : * and ptrace_may_access with the mode parameter passed to it 92 : * succeeds. 93 : */ 94 : extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 95 : /* Remove the current tasks stale references to the old mm_struct on exit() */ 96 : extern void exit_mm_release(struct task_struct *, struct mm_struct *); 97 : /* Remove the current tasks stale references to the old mm_struct on exec() */ 98 : extern void exec_mm_release(struct task_struct *, struct mm_struct *); 99 : 100 : #ifdef CONFIG_MEMCG 101 : extern void mm_update_next_owner(struct mm_struct *mm); 102 : #else 103 1020 : static inline void mm_update_next_owner(struct mm_struct *mm) 104 : { 105 1020 : } 106 : #endif /* CONFIG_MEMCG */ 107 : 108 : #ifdef CONFIG_MMU 109 : extern void arch_pick_mmap_layout(struct mm_struct *mm, 110 : struct rlimit *rlim_stack); 111 : extern unsigned long 112 : arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 113 : unsigned long, unsigned long); 114 : extern unsigned long 115 : arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 116 : unsigned long len, unsigned long pgoff, 117 : unsigned long flags); 118 : #else 119 : static inline void arch_pick_mmap_layout(struct mm_struct *mm, 120 : struct rlimit *rlim_stack) {} 121 : #endif 122 : 123 0 : static inline bool in_vfork(struct task_struct *tsk) 124 : { 125 0 : bool ret; 126 : 127 : /* 128 : * need RCU to access ->real_parent if CLONE_VM was used along with 129 : * CLONE_PARENT. 130 : * 131 : * We check real_parent->mm == tsk->mm because CLONE_VFORK does not 132 : * imply CLONE_VM 133 : * 134 : * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus 135 : * ->real_parent is not necessarily the task doing vfork(), so in 136 : * theory we can't rely on task_lock() if we want to dereference it. 137 : * 138 : * And in this case we can't trust the real_parent->mm == tsk->mm 139 : * check, it can be false negative. But we do not care, if init or 140 : * another oom-unkillable task does this it should blame itself. 141 : */ 142 0 : rcu_read_lock(); 143 0 : ret = tsk->vfork_done && 144 0 : rcu_dereference(tsk->real_parent)->mm == tsk->mm; 145 0 : rcu_read_unlock(); 146 : 147 0 : return ret; 148 : } 149 : 150 : /* 151 : * Applies per-task gfp context to the given allocation flags. 152 : * PF_MEMALLOC_NOIO implies GFP_NOIO 153 : * PF_MEMALLOC_NOFS implies GFP_NOFS 154 : */ 155 3298088 : static inline gfp_t current_gfp_context(gfp_t flags) 156 : { 157 3298088 : unsigned int pflags = READ_ONCE(current->flags); 158 : 159 3298088 : if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { 160 : /* 161 : * NOIO implies both NOIO and NOFS and it is a weaker context 162 : * so always make sure it makes precedence 163 : */ 164 114094 : if (pflags & PF_MEMALLOC_NOIO) 165 0 : flags &= ~(__GFP_IO | __GFP_FS); 166 114094 : else if (pflags & PF_MEMALLOC_NOFS) 167 114094 : flags &= ~__GFP_FS; 168 : } 169 3298088 : return flags; 170 : } 171 : 172 : #ifdef CONFIG_LOCKDEP 173 : extern void __fs_reclaim_acquire(void); 174 : extern void __fs_reclaim_release(void); 175 : extern void fs_reclaim_acquire(gfp_t gfp_mask); 176 : extern void fs_reclaim_release(gfp_t gfp_mask); 177 : #else 178 : static inline void __fs_reclaim_acquire(void) { } 179 : static inline void __fs_reclaim_release(void) { } 180 : static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } 181 : static inline void fs_reclaim_release(gfp_t gfp_mask) { } 182 : #endif 183 : 184 : /** 185 : * might_alloc - Mark possible allocation sites 186 : * @gfp_mask: gfp_t flags that would be used to allocate 187 : * 188 : * Similar to might_sleep() and other annotations, this can be used in functions 189 : * that might allocate, but often don't. Compiles to nothing without 190 : * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. 191 : */ 192 1471217 : static inline void might_alloc(gfp_t gfp_mask) 193 : { 194 1471217 : fs_reclaim_acquire(gfp_mask); 195 1470977 : fs_reclaim_release(gfp_mask); 196 : 197 1470838 : might_sleep_if(gfpflags_allow_blocking(gfp_mask)); 198 1471380 : } 199 : 200 : /** 201 : * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. 202 : * 203 : * This functions marks the beginning of the GFP_NOIO allocation scope. 204 : * All further allocations will implicitly drop __GFP_IO flag and so 205 : * they are safe for the IO critical section from the allocation recursion 206 : * point of view. Use memalloc_noio_restore to end the scope with flags 207 : * returned by this function. 208 : * 209 : * This function is safe to be used from any context. 210 : */ 211 0 : static inline unsigned int memalloc_noio_save(void) 212 : { 213 0 : unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 214 0 : current->flags |= PF_MEMALLOC_NOIO; 215 0 : return flags; 216 : } 217 : 218 : /** 219 : * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. 220 : * @flags: Flags to restore. 221 : * 222 : * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. 223 : * Always make sure that the given flags is the return value from the 224 : * pairing memalloc_noio_save call. 225 : */ 226 0 : static inline void memalloc_noio_restore(unsigned int flags) 227 : { 228 0 : current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 229 : } 230 : 231 : /** 232 : * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. 233 : * 234 : * This functions marks the beginning of the GFP_NOFS allocation scope. 235 : * All further allocations will implicitly drop __GFP_FS flag and so 236 : * they are safe for the FS critical section from the allocation recursion 237 : * point of view. Use memalloc_nofs_restore to end the scope with flags 238 : * returned by this function. 239 : * 240 : * This function is safe to be used from any context. 241 : */ 242 7912 : static inline unsigned int memalloc_nofs_save(void) 243 : { 244 7912 : unsigned int flags = current->flags & PF_MEMALLOC_NOFS; 245 7912 : current->flags |= PF_MEMALLOC_NOFS; 246 7912 : return flags; 247 : } 248 : 249 : /** 250 : * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. 251 : * @flags: Flags to restore. 252 : * 253 : * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. 254 : * Always make sure that the given flags is the return value from the 255 : * pairing memalloc_nofs_save call. 256 : */ 257 7911 : static inline void memalloc_nofs_restore(unsigned int flags) 258 : { 259 1656 : current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; 260 : } 261 : 262 0 : static inline unsigned int memalloc_noreclaim_save(void) 263 : { 264 0 : unsigned int flags = current->flags & PF_MEMALLOC; 265 0 : current->flags |= PF_MEMALLOC; 266 0 : return flags; 267 : } 268 : 269 0 : static inline void memalloc_noreclaim_restore(unsigned int flags) 270 : { 271 0 : current->flags = (current->flags & ~PF_MEMALLOC) | flags; 272 0 : } 273 : 274 : #ifdef CONFIG_CMA 275 : static inline unsigned int memalloc_nocma_save(void) 276 : { 277 : unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; 278 : 279 : current->flags |= PF_MEMALLOC_NOCMA; 280 : return flags; 281 : } 282 : 283 : static inline void memalloc_nocma_restore(unsigned int flags) 284 : { 285 : current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; 286 : } 287 : #else 288 : static inline unsigned int memalloc_nocma_save(void) 289 : { 290 : return 0; 291 : } 292 : 293 : static inline void memalloc_nocma_restore(unsigned int flags) 294 : { 295 : } 296 : #endif 297 : 298 : #ifdef CONFIG_MEMCG 299 : DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); 300 : /** 301 : * set_active_memcg - Starts the remote memcg charging scope. 302 : * @memcg: memcg to charge. 303 : * 304 : * This function marks the beginning of the remote memcg charging scope. All the 305 : * __GFP_ACCOUNT allocations till the end of the scope will be charged to the 306 : * given memcg. 307 : * 308 : * NOTE: This function can nest. Users must save the return value and 309 : * reset the previous value after their own charging scope is over. 310 : */ 311 : static inline struct mem_cgroup * 312 : set_active_memcg(struct mem_cgroup *memcg) 313 : { 314 : struct mem_cgroup *old; 315 : 316 : if (in_interrupt()) { 317 : old = this_cpu_read(int_active_memcg); 318 : this_cpu_write(int_active_memcg, memcg); 319 : } else { 320 : old = current->active_memcg; 321 : current->active_memcg = memcg; 322 : } 323 : 324 : return old; 325 : } 326 : #else 327 : static inline struct mem_cgroup * 328 11972 : set_active_memcg(struct mem_cgroup *memcg) 329 : { 330 11972 : return NULL; 331 : } 332 : #endif 333 : 334 : #ifdef CONFIG_MEMBARRIER 335 : enum { 336 : MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), 337 : MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), 338 : MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), 339 : MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), 340 : MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), 341 : MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), 342 : MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), 343 : MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), 344 : }; 345 : 346 : enum { 347 : MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), 348 : MEMBARRIER_FLAG_RSEQ = (1U << 1), 349 : }; 350 : 351 : #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 352 : #include <asm/membarrier.h> 353 : #endif 354 : 355 : static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 356 : { 357 : if (current->mm != mm) 358 : return; 359 : if (likely(!(atomic_read(&mm->membarrier_state) & 360 : MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) 361 : return; 362 : sync_core_before_usermode(); 363 : } 364 : 365 : extern void membarrier_exec_mmap(struct mm_struct *mm); 366 : 367 : extern void membarrier_update_current_mm(struct mm_struct *next_mm); 368 : 369 : #else 370 : #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 371 : static inline void membarrier_arch_switch_mm(struct mm_struct *prev, 372 : struct mm_struct *next, 373 : struct task_struct *tsk) 374 : { 375 : } 376 : #endif 377 1021 : static inline void membarrier_exec_mmap(struct mm_struct *mm) 378 : { 379 1021 : } 380 7345 : static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 381 : { 382 7345 : } 383 0 : static inline void membarrier_update_current_mm(struct mm_struct *next_mm) 384 : { 385 0 : } 386 : #endif 387 : 388 : #endif /* _LINUX_SCHED_MM_H */