Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_SCHED_SIGNAL_H
3 : #define _LINUX_SCHED_SIGNAL_H
4 :
5 : #include <linux/rculist.h>
6 : #include <linux/signal.h>
7 : #include <linux/sched.h>
8 : #include <linux/sched/jobctl.h>
9 : #include <linux/sched/task.h>
10 : #include <linux/cred.h>
11 : #include <linux/refcount.h>
12 : #include <linux/posix-timers.h>
13 : #include <linux/mm_types.h>
14 : #include <asm/ptrace.h>
15 :
16 : /*
17 : * Types defining task->signal and task->sighand and APIs using them:
18 : */
19 :
20 : struct sighand_struct {
21 : spinlock_t siglock;
22 : refcount_t count;
23 : wait_queue_head_t signalfd_wqh;
24 : struct k_sigaction action[_NSIG];
25 : };
26 :
27 : /*
28 : * Per-process accounting stats:
29 : */
30 : struct pacct_struct {
31 : int ac_flag;
32 : long ac_exitcode;
33 : unsigned long ac_mem;
34 : u64 ac_utime, ac_stime;
35 : unsigned long ac_minflt, ac_majflt;
36 : };
37 :
38 : struct cpu_itimer {
39 : u64 expires;
40 : u64 incr;
41 : };
42 :
43 : /*
44 : * This is the atomic variant of task_cputime, which can be used for
45 : * storing and updating task_cputime statistics without locking.
46 : */
47 : struct task_cputime_atomic {
48 : atomic64_t utime;
49 : atomic64_t stime;
50 : atomic64_t sum_exec_runtime;
51 : };
52 :
53 : #define INIT_CPUTIME_ATOMIC \
54 : (struct task_cputime_atomic) { \
55 : .utime = ATOMIC64_INIT(0), \
56 : .stime = ATOMIC64_INIT(0), \
57 : .sum_exec_runtime = ATOMIC64_INIT(0), \
58 : }
59 : /**
60 : * struct thread_group_cputimer - thread group interval timer counts
61 : * @cputime_atomic: atomic thread group interval timers.
62 : *
63 : * This structure contains the version of task_cputime, above, that is
64 : * used for thread group CPU timer calculations.
65 : */
66 : struct thread_group_cputimer {
67 : struct task_cputime_atomic cputime_atomic;
68 : };
69 :
70 : struct multiprocess_signals {
71 : sigset_t signal;
72 : struct hlist_node node;
73 : };
74 :
75 : /*
76 : * NOTE! "signal_struct" does not have its own
77 : * locking, because a shared signal_struct always
78 : * implies a shared sighand_struct, so locking
79 : * sighand_struct is always a proper superset of
80 : * the locking of signal_struct.
81 : */
82 : struct signal_struct {
83 : refcount_t sigcnt;
84 : atomic_t live;
85 : int nr_threads;
86 : struct list_head thread_head;
87 :
88 : wait_queue_head_t wait_chldexit; /* for wait4() */
89 :
90 : /* current thread group signal load-balancing target: */
91 : struct task_struct *curr_target;
92 :
93 : /* shared signal handling: */
94 : struct sigpending shared_pending;
95 :
96 : /* For collecting multiprocess signals during fork */
97 : struct hlist_head multiprocess;
98 :
99 : /* thread group exit support */
100 : int group_exit_code;
101 : /* overloaded:
102 : * - notify group_exit_task when ->count is equal to notify_count
103 : * - everyone except group_exit_task is stopped during signal delivery
104 : * of fatal signals, group_exit_task processes the signal.
105 : */
106 : int notify_count;
107 : struct task_struct *group_exit_task;
108 :
109 : /* thread group stop support, overloads group_exit_code too */
110 : int group_stop_count;
111 : unsigned int flags; /* see SIGNAL_* flags below */
112 :
113 : /*
114 : * PR_SET_CHILD_SUBREAPER marks a process, like a service
115 : * manager, to re-parent orphan (double-forking) child processes
116 : * to this process instead of 'init'. The service manager is
117 : * able to receive SIGCHLD signals and is able to investigate
118 : * the process until it calls wait(). All children of this
119 : * process will inherit a flag if they should look for a
120 : * child_subreaper process at exit.
121 : */
122 : unsigned int is_child_subreaper:1;
123 : unsigned int has_child_subreaper:1;
124 :
125 : #ifdef CONFIG_POSIX_TIMERS
126 :
127 : /* POSIX.1b Interval Timers */
128 : int posix_timer_id;
129 : struct list_head posix_timers;
130 :
131 : /* ITIMER_REAL timer for the process */
132 : struct hrtimer real_timer;
133 : ktime_t it_real_incr;
134 :
135 : /*
136 : * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
137 : * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
138 : * values are defined to 0 and 1 respectively
139 : */
140 : struct cpu_itimer it[2];
141 :
142 : /*
143 : * Thread group totals for process CPU timers.
144 : * See thread_group_cputimer(), et al, for details.
145 : */
146 : struct thread_group_cputimer cputimer;
147 :
148 : #endif
149 : /* Empty if CONFIG_POSIX_TIMERS=n */
150 : struct posix_cputimers posix_cputimers;
151 :
152 : /* PID/PID hash table linkage. */
153 : struct pid *pids[PIDTYPE_MAX];
154 :
155 : #ifdef CONFIG_NO_HZ_FULL
156 : atomic_t tick_dep_mask;
157 : #endif
158 :
159 : struct pid *tty_old_pgrp;
160 :
161 : /* boolean value for session group leader */
162 : int leader;
163 :
164 : struct tty_struct *tty; /* NULL if no tty */
165 :
166 : #ifdef CONFIG_SCHED_AUTOGROUP
167 : struct autogroup *autogroup;
168 : #endif
169 : /*
170 : * Cumulative resource counters for dead threads in the group,
171 : * and for reaped dead child processes forked by this group.
172 : * Live threads maintain their own counters and add to these
173 : * in __exit_signal, except for the group leader.
174 : */
175 : seqlock_t stats_lock;
176 : u64 utime, stime, cutime, cstime;
177 : u64 gtime;
178 : u64 cgtime;
179 : struct prev_cputime prev_cputime;
180 : unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
181 : unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
182 : unsigned long inblock, oublock, cinblock, coublock;
183 : unsigned long maxrss, cmaxrss;
184 : struct task_io_accounting ioac;
185 :
186 : /*
187 : * Cumulative ns of schedule CPU time fo dead threads in the
188 : * group, not including a zombie group leader, (This only differs
189 : * from jiffies_to_ns(utime + stime) if sched_clock uses something
190 : * other than jiffies.)
191 : */
192 : unsigned long long sum_sched_runtime;
193 :
194 : /*
195 : * We don't bother to synchronize most readers of this at all,
196 : * because there is no reader checking a limit that actually needs
197 : * to get both rlim_cur and rlim_max atomically, and either one
198 : * alone is a single word that can safely be read normally.
199 : * getrlimit/setrlimit use task_lock(current->group_leader) to
200 : * protect this instead of the siglock, because they really
201 : * have no need to disable irqs.
202 : */
203 : struct rlimit rlim[RLIM_NLIMITS];
204 :
205 : #ifdef CONFIG_BSD_PROCESS_ACCT
206 : struct pacct_struct pacct; /* per-process accounting information */
207 : #endif
208 : #ifdef CONFIG_TASKSTATS
209 : struct taskstats *stats;
210 : #endif
211 : #ifdef CONFIG_AUDIT
212 : unsigned audit_tty;
213 : struct tty_audit_buf *tty_audit_buf;
214 : #endif
215 :
216 : /*
217 : * Thread is the potential origin of an oom condition; kill first on
218 : * oom
219 : */
220 : bool oom_flag_origin;
221 : short oom_score_adj; /* OOM kill score adjustment */
222 : short oom_score_adj_min; /* OOM kill score adjustment min value.
223 : * Only settable by CAP_SYS_RESOURCE. */
224 : struct mm_struct *oom_mm; /* recorded mm when the thread group got
225 : * killed by the oom killer */
226 :
227 : struct mutex cred_guard_mutex; /* guard against foreign influences on
228 : * credential calculations
229 : * (notably. ptrace)
230 : * Deprecated do not use in new code.
231 : * Use exec_update_lock instead.
232 : */
233 : struct rw_semaphore exec_update_lock; /* Held while task_struct is
234 : * being updated during exec,
235 : * and may have inconsistent
236 : * permissions.
237 : */
238 : } __randomize_layout;
239 :
240 : /*
241 : * Bits in flags field of signal_struct.
242 : */
243 : #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
244 : #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
245 : #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
246 : #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
247 : /*
248 : * Pending notifications to parent.
249 : */
250 : #define SIGNAL_CLD_STOPPED 0x00000010
251 : #define SIGNAL_CLD_CONTINUED 0x00000020
252 : #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
253 :
254 : #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
255 :
256 : #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
257 : SIGNAL_STOP_CONTINUED)
258 :
259 0 : static inline void signal_set_stop_flags(struct signal_struct *sig,
260 : unsigned int flags)
261 : {
262 0 : WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
263 0 : sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
264 0 : }
265 :
266 : /* If true, all threads except ->group_exit_task have pending SIGKILL */
267 482 : static inline int signal_group_exit(const struct signal_struct *sig)
268 : {
269 482 : return (sig->flags & SIGNAL_GROUP_EXIT) ||
270 482 : (sig->group_exit_task != NULL);
271 : }
272 :
273 : extern void flush_signals(struct task_struct *);
274 : extern void ignore_signals(struct task_struct *);
275 : extern void flush_signal_handlers(struct task_struct *, int force_default);
276 : extern int dequeue_signal(struct task_struct *task,
277 : sigset_t *mask, kernel_siginfo_t *info);
278 :
279 : static inline int kernel_dequeue_signal(void)
280 : {
281 : struct task_struct *task = current;
282 : kernel_siginfo_t __info;
283 : int ret;
284 :
285 : spin_lock_irq(&task->sighand->siglock);
286 : ret = dequeue_signal(task, &task->blocked, &__info);
287 : spin_unlock_irq(&task->sighand->siglock);
288 :
289 : return ret;
290 : }
291 :
292 : static inline void kernel_signal_stop(void)
293 : {
294 : spin_lock_irq(¤t->sighand->siglock);
295 : if (current->jobctl & JOBCTL_STOP_DEQUEUED)
296 : set_special_state(TASK_STOPPED);
297 : spin_unlock_irq(¤t->sighand->siglock);
298 :
299 : schedule();
300 : }
301 : #ifdef __ARCH_SI_TRAPNO
302 : # define ___ARCH_SI_TRAPNO(_a1) , _a1
303 : #else
304 : # define ___ARCH_SI_TRAPNO(_a1)
305 : #endif
306 : #ifdef __ia64__
307 : # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
308 : #else
309 : # define ___ARCH_SI_IA64(_a1, _a2, _a3)
310 : #endif
311 :
312 : int force_sig_fault_to_task(int sig, int code, void __user *addr
313 : ___ARCH_SI_TRAPNO(int trapno)
314 : ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
315 : , struct task_struct *t);
316 : int force_sig_fault(int sig, int code, void __user *addr
317 : ___ARCH_SI_TRAPNO(int trapno)
318 : ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
319 : int send_sig_fault(int sig, int code, void __user *addr
320 : ___ARCH_SI_TRAPNO(int trapno)
321 : ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
322 : , struct task_struct *t);
323 :
324 : int force_sig_mceerr(int code, void __user *, short);
325 : int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
326 :
327 : int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
328 : int force_sig_pkuerr(void __user *addr, u32 pkey);
329 :
330 : int force_sig_ptrace_errno_trap(int errno, void __user *addr);
331 :
332 : extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
333 : extern void force_sigsegv(int sig);
334 : extern int force_sig_info(struct kernel_siginfo *);
335 : extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
336 : extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
337 : extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
338 : const struct cred *);
339 : extern int kill_pgrp(struct pid *pid, int sig, int priv);
340 : extern int kill_pid(struct pid *pid, int sig, int priv);
341 : extern __must_check bool do_notify_parent(struct task_struct *, int);
342 : extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
343 : extern void force_sig(int);
344 : extern int send_sig(int, struct task_struct *, int);
345 : extern int zap_other_threads(struct task_struct *p);
346 : extern struct sigqueue *sigqueue_alloc(void);
347 : extern void sigqueue_free(struct sigqueue *);
348 : extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
349 : extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
350 :
351 0 : static inline int restart_syscall(void)
352 : {
353 0 : set_tsk_thread_flag(current, TIF_SIGPENDING);
354 0 : return -ERESTARTNOINTR;
355 : }
356 :
357 207203 : static inline int task_sigpending(struct task_struct *p)
358 : {
359 411589 : return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
360 : }
361 :
362 27575 : static inline int signal_pending(struct task_struct *p)
363 : {
364 : /*
365 : * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
366 : * behavior in terms of ensuring that we break out of wait loops
367 : * so that notify signal callbacks can be processed.
368 : */
369 27575 : if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
370 : return 1;
371 27575 : return task_sigpending(p);
372 : }
373 :
374 68 : static inline int __fatal_signal_pending(struct task_struct *p)
375 : {
376 68 : return unlikely(sigismember(&p->pending.signal, SIGKILL));
377 : }
378 :
379 176822 : static inline int fatal_signal_pending(struct task_struct *p)
380 : {
381 176822 : return task_sigpending(p) && __fatal_signal_pending(p);
382 : }
383 :
384 25742 : static inline int signal_pending_state(long state, struct task_struct *p)
385 : {
386 25742 : if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
387 : return 0;
388 8200 : if (!signal_pending(p))
389 : return 0;
390 :
391 35 : return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
392 : }
393 :
394 : /*
395 : * This should only be used in fault handlers to decide whether we
396 : * should stop the current fault routine to handle the signals
397 : * instead, especially with the case where we've got interrupted with
398 : * a VM_FAULT_RETRY.
399 : */
400 296337 : static inline bool fault_signal_pending(vm_fault_t fault_flags,
401 : struct pt_regs *regs)
402 : {
403 296337 : return unlikely((fault_flags & VM_FAULT_RETRY) &&
404 : (fatal_signal_pending(current) ||
405 : (user_mode(regs) && signal_pending(current))));
406 : }
407 :
408 : /*
409 : * Reevaluate whether the task has signals pending delivery.
410 : * Wake the task if so.
411 : * This is required every time the blocked sigset_t changes.
412 : * callers must hold sighand->siglock.
413 : */
414 : extern void recalc_sigpending_and_wake(struct task_struct *t);
415 : extern void recalc_sigpending(void);
416 : extern void calculate_sigpending(void);
417 :
418 : extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
419 :
420 465 : static inline void signal_wake_up(struct task_struct *t, bool resume)
421 : {
422 930 : signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
423 465 : }
424 12 : static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
425 : {
426 12 : signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
427 12 : }
428 :
429 : void task_join_group_stop(struct task_struct *task);
430 :
431 : #ifdef TIF_RESTORE_SIGMASK
432 : /*
433 : * Legacy restore_sigmask accessors. These are inefficient on
434 : * SMP architectures because they require atomic operations.
435 : */
436 :
437 : /**
438 : * set_restore_sigmask() - make sure saved_sigmask processing gets done
439 : *
440 : * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
441 : * will run before returning to user mode, to process the flag. For
442 : * all callers, TIF_SIGPENDING is already set or it's no harm to set
443 : * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
444 : * arch code will notice on return to user mode, in case those bits
445 : * are scarce. We set TIF_SIGPENDING here to ensure that the arch
446 : * signal code always gets run when TIF_RESTORE_SIGMASK is set.
447 : */
448 : static inline void set_restore_sigmask(void)
449 : {
450 : set_thread_flag(TIF_RESTORE_SIGMASK);
451 : }
452 :
453 : static inline void clear_tsk_restore_sigmask(struct task_struct *task)
454 : {
455 : clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
456 : }
457 :
458 : static inline void clear_restore_sigmask(void)
459 : {
460 : clear_thread_flag(TIF_RESTORE_SIGMASK);
461 : }
462 : static inline bool test_tsk_restore_sigmask(struct task_struct *task)
463 : {
464 : return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
465 : }
466 : static inline bool test_restore_sigmask(void)
467 : {
468 : return test_thread_flag(TIF_RESTORE_SIGMASK);
469 : }
470 : static inline bool test_and_clear_restore_sigmask(void)
471 : {
472 : return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
473 : }
474 :
475 : #else /* TIF_RESTORE_SIGMASK */
476 :
477 : /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
478 3 : static inline void set_restore_sigmask(void)
479 : {
480 0 : current->restore_sigmask = true;
481 : }
482 0 : static inline void clear_tsk_restore_sigmask(struct task_struct *task)
483 : {
484 0 : task->restore_sigmask = false;
485 : }
486 461 : static inline void clear_restore_sigmask(void)
487 : {
488 461 : current->restore_sigmask = false;
489 : }
490 1265 : static inline bool test_restore_sigmask(void)
491 : {
492 1265 : return current->restore_sigmask;
493 : }
494 0 : static inline bool test_tsk_restore_sigmask(struct task_struct *task)
495 : {
496 0 : return task->restore_sigmask;
497 : }
498 791 : static inline bool test_and_clear_restore_sigmask(void)
499 : {
500 791 : if (!current->restore_sigmask)
501 : return false;
502 0 : current->restore_sigmask = false;
503 0 : return true;
504 : }
505 : #endif
506 :
507 791 : static inline void restore_saved_sigmask(void)
508 : {
509 791 : if (test_and_clear_restore_sigmask())
510 0 : __set_current_blocked(¤t->saved_sigmask);
511 791 : }
512 :
513 : extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
514 :
515 777 : static inline void restore_saved_sigmask_unless(bool interrupted)
516 : {
517 777 : if (interrupted)
518 5 : WARN_ON(!signal_pending(current));
519 : else
520 772 : restore_saved_sigmask();
521 777 : }
522 :
523 1265 : static inline sigset_t *sigmask_to_save(void)
524 : {
525 1265 : sigset_t *res = ¤t->blocked;
526 1265 : if (unlikely(test_restore_sigmask()))
527 3 : res = ¤t->saved_sigmask;
528 1265 : return res;
529 : }
530 :
531 0 : static inline int kill_cad_pid(int sig, int priv)
532 : {
533 0 : return kill_pid(cad_pid, sig, priv);
534 : }
535 :
536 : /* These can be the second arg to send_sig_info/send_group_sig_info. */
537 : #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
538 : #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
539 :
540 : /*
541 : * True if we are on the alternate signal stack.
542 : */
543 1743 : static inline int on_sig_stack(unsigned long sp)
544 : {
545 : /*
546 : * If the signal stack is SS_AUTODISARM then, by construction, we
547 : * can't be on the signal stack unless user code deliberately set
548 : * SS_AUTODISARM when we were already on it.
549 : *
550 : * This improves reliability: if user state gets corrupted such that
551 : * the stack pointer points very close to the end of the signal stack,
552 : * then this check will enable the signal to be handled anyway.
553 : */
554 1743 : if (current->sas_ss_flags & SS_AUTODISARM)
555 : return 0;
556 :
557 : #ifdef CONFIG_STACK_GROWSUP
558 : return sp >= current->sas_ss_sp &&
559 : sp - current->sas_ss_sp < current->sas_ss_size;
560 : #else
561 1743 : return sp > current->sas_ss_sp &&
562 1743 : sp - current->sas_ss_sp <= current->sas_ss_size;
563 : #endif
564 : }
565 :
566 0 : static inline int sas_ss_flags(unsigned long sp)
567 : {
568 0 : if (!current->sas_ss_size)
569 : return SS_DISABLE;
570 :
571 0 : return on_sig_stack(sp) ? SS_ONSTACK : 0;
572 : }
573 :
574 59 : static inline void sas_ss_reset(struct task_struct *p)
575 : {
576 59 : p->sas_ss_sp = 0;
577 59 : p->sas_ss_size = 0;
578 59 : p->sas_ss_flags = SS_DISABLE;
579 59 : }
580 :
581 0 : static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
582 : {
583 0 : if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
584 : #ifdef CONFIG_STACK_GROWSUP
585 : return current->sas_ss_sp;
586 : #else
587 0 : return current->sas_ss_sp + current->sas_ss_size;
588 : #endif
589 : return sp;
590 : }
591 :
592 : extern void __cleanup_sighand(struct sighand_struct *);
593 : extern void flush_itimer_signals(void);
594 :
595 : #define tasklist_empty() \
596 : list_empty(&init_task.tasks)
597 :
598 : #define next_task(p) \
599 : list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
600 :
601 : #define for_each_process(p) \
602 : for (p = &init_task ; (p = next_task(p)) != &init_task ; )
603 :
604 : extern bool current_is_single_threaded(void);
605 :
606 : /*
607 : * Careful: do_each_thread/while_each_thread is a double loop so
608 : * 'break' will not work as expected - use goto instead.
609 : */
610 : #define do_each_thread(g, t) \
611 : for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
612 :
613 : #define while_each_thread(g, t) \
614 : while ((t = next_thread(t)) != g)
615 :
616 : #define __for_each_thread(signal, t) \
617 : list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
618 :
619 : #define for_each_thread(p, t) \
620 : __for_each_thread((p)->signal, t)
621 :
622 : /* Careful: this is a double loop, 'break' won't work as expected. */
623 : #define for_each_process_thread(p, t) \
624 : for_each_process(p) for_each_thread(p, t)
625 :
626 : typedef int (*proc_visitor)(struct task_struct *p, void *data);
627 : void walk_process_tree(struct task_struct *top, proc_visitor, void *);
628 :
629 : static inline
630 0 : struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
631 : {
632 0 : struct pid *pid;
633 0 : if (type == PIDTYPE_PID)
634 0 : pid = task_pid(task);
635 : else
636 0 : pid = task->signal->pids[type];
637 0 : return pid;
638 : }
639 :
640 1174 : static inline struct pid *task_tgid(struct task_struct *task)
641 : {
642 1174 : return task->signal->pids[PIDTYPE_TGID];
643 : }
644 :
645 : /*
646 : * Without tasklist or RCU lock it is not safe to dereference
647 : * the result of task_pgrp/task_session even if task == current,
648 : * we can race with another thread doing sys_setsid/sys_setpgid.
649 : */
650 973 : static inline struct pid *task_pgrp(struct task_struct *task)
651 : {
652 973 : return task->signal->pids[PIDTYPE_PGID];
653 : }
654 :
655 974 : static inline struct pid *task_session(struct task_struct *task)
656 : {
657 974 : return task->signal->pids[PIDTYPE_SID];
658 : }
659 :
660 197 : static inline int get_nr_threads(struct task_struct *task)
661 : {
662 197 : return task->signal->nr_threads;
663 : }
664 :
665 2020 : static inline bool thread_group_leader(struct task_struct *p)
666 : {
667 2020 : return p->exit_signal >= 0;
668 : }
669 :
670 : static inline
671 1497 : bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
672 : {
673 1497 : return p1->signal == p2->signal;
674 : }
675 :
676 3422 : static inline struct task_struct *next_thread(const struct task_struct *p)
677 : {
678 3422 : return list_entry_rcu(p->thread_group.next,
679 : struct task_struct, thread_group);
680 : }
681 :
682 3916 : static inline int thread_group_empty(struct task_struct *p)
683 : {
684 3916 : return list_empty(&p->thread_group);
685 : }
686 :
687 : #define delay_group_leader(p) \
688 : (thread_group_leader(p) && !thread_group_empty(p))
689 :
690 : extern bool thread_group_exited(struct pid *pid);
691 :
692 : extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
693 : unsigned long *flags);
694 :
695 232 : static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
696 : unsigned long *flags)
697 : {
698 232 : struct sighand_struct *ret;
699 :
700 232 : ret = __lock_task_sighand(task, flags);
701 232 : (void)__cond_lock(&task->sighand->siglock, ret);
702 232 : return ret;
703 : }
704 :
705 232 : static inline void unlock_task_sighand(struct task_struct *task,
706 : unsigned long *flags)
707 : {
708 232 : spin_unlock_irqrestore(&task->sighand->siglock, *flags);
709 227 : }
710 :
711 111414 : static inline unsigned long task_rlimit(const struct task_struct *task,
712 : unsigned int limit)
713 : {
714 111414 : return READ_ONCE(task->signal->rlim[limit].rlim_cur);
715 : }
716 :
717 0 : static inline unsigned long task_rlimit_max(const struct task_struct *task,
718 : unsigned int limit)
719 : {
720 0 : return READ_ONCE(task->signal->rlim[limit].rlim_max);
721 : }
722 :
723 110379 : static inline unsigned long rlimit(unsigned int limit)
724 : {
725 110379 : return task_rlimit(current, limit);
726 : }
727 :
728 0 : static inline unsigned long rlimit_max(unsigned int limit)
729 : {
730 0 : return task_rlimit_max(current, limit);
731 : }
732 :
733 : #endif /* _LINUX_SCHED_SIGNAL_H */
|