Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * linux/kernel/signal.c
4 : *
5 : * Copyright (C) 1991, 1992 Linus Torvalds
6 : *
7 : * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 : *
9 : * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 : * Changes to use preallocated sigqueue structures
11 : * to allow signals to be sent reliably.
12 : */
13 :
14 : #include <linux/slab.h>
15 : #include <linux/export.h>
16 : #include <linux/init.h>
17 : #include <linux/sched/mm.h>
18 : #include <linux/sched/user.h>
19 : #include <linux/sched/debug.h>
20 : #include <linux/sched/task.h>
21 : #include <linux/sched/task_stack.h>
22 : #include <linux/sched/cputime.h>
23 : #include <linux/file.h>
24 : #include <linux/fs.h>
25 : #include <linux/proc_fs.h>
26 : #include <linux/tty.h>
27 : #include <linux/binfmts.h>
28 : #include <linux/coredump.h>
29 : #include <linux/security.h>
30 : #include <linux/syscalls.h>
31 : #include <linux/ptrace.h>
32 : #include <linux/signal.h>
33 : #include <linux/signalfd.h>
34 : #include <linux/ratelimit.h>
35 : #include <linux/tracehook.h>
36 : #include <linux/capability.h>
37 : #include <linux/freezer.h>
38 : #include <linux/pid_namespace.h>
39 : #include <linux/nsproxy.h>
40 : #include <linux/user_namespace.h>
41 : #include <linux/uprobes.h>
42 : #include <linux/compat.h>
43 : #include <linux/cn_proc.h>
44 : #include <linux/compiler.h>
45 : #include <linux/posix-timers.h>
46 : #include <linux/livepatch.h>
47 : #include <linux/cgroup.h>
48 : #include <linux/audit.h>
49 :
50 : #define CREATE_TRACE_POINTS
51 : #include <trace/events/signal.h>
52 :
53 : #include <asm/param.h>
54 : #include <linux/uaccess.h>
55 : #include <asm/unistd.h>
56 : #include <asm/siginfo.h>
57 : #include <asm/cacheflush.h>
58 :
59 : /*
60 : * SLAB caches for signal bits.
61 : */
62 :
63 : static struct kmem_cache *sigqueue_cachep;
64 :
65 : int print_fatal_signals __read_mostly;
66 :
67 3696 : static void __user *sig_handler(struct task_struct *t, int sig)
68 : {
69 3696 : return t->sighand->action[sig - 1].sa.sa_handler;
70 : }
71 :
72 3696 : static inline bool sig_handler_ignored(void __user *handler, int sig)
73 : {
74 : /* Is it explicitly or implicitly ignored? */
75 3696 : return handler == SIG_IGN ||
76 2023 : (handler == SIG_DFL && sig_kernel_ignore(sig));
77 : }
78 :
79 793 : static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
80 : {
81 793 : void __user *handler;
82 :
83 793 : handler = sig_handler(t, sig);
84 :
85 : /* SIGKILL and SIGSTOP may not be sent to the global init */
86 793 : if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 : return true;
88 :
89 793 : if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 0 : handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 : return true;
92 :
93 : /* Only allow kernel generated signals to this kthread */
94 793 : if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
95 : (handler == SIG_KTHREAD_KERNEL) && !force))
96 : return true;
97 :
98 793 : return sig_handler_ignored(handler, sig);
99 : }
100 :
101 896 : static bool sig_ignored(struct task_struct *t, int sig, bool force)
102 : {
103 : /*
104 : * Blocked signals are never ignored, since the
105 : * signal handler may change by the time it is
106 : * unblocked.
107 : */
108 896 : if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
109 : return false;
110 :
111 : /*
112 : * Tracers may want to know about even ignored signal unless it
113 : * is SIGKILL which can't be reported anyway but can be ignored
114 : * by SIGNAL_UNKILLABLE task.
115 : */
116 805 : if (t->ptrace && sig != SIGKILL)
117 : return false;
118 :
119 793 : return sig_task_ignored(t, sig, force);
120 : }
121 :
122 : /*
123 : * Re-calculate pending state from the set of locally pending
124 : * signals, globally pending signals, and blocked signals.
125 : */
126 9652 : static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 : {
128 9652 : unsigned long ready;
129 9652 : long i;
130 :
131 9652 : switch (_NSIG_WORDS) {
132 : default:
133 : for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 : ready |= signal->sig[i] &~ blocked->sig[i];
135 : break;
136 :
137 : case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 : ready |= signal->sig[2] &~ blocked->sig[2];
139 : ready |= signal->sig[1] &~ blocked->sig[1];
140 : ready |= signal->sig[0] &~ blocked->sig[0];
141 : break;
142 :
143 : case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 : ready |= signal->sig[0] &~ blocked->sig[0];
145 : break;
146 :
147 9652 : case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 : }
149 9652 : return ready != 0;
150 : }
151 :
152 : #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153 :
154 4826 : static bool recalc_sigpending_tsk(struct task_struct *t)
155 : {
156 4826 : if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 4826 : PENDING(&t->pending, &t->blocked) ||
158 4826 : PENDING(&t->signal->shared_pending, &t->blocked) ||
159 4811 : cgroup_task_frozen(t)) {
160 15 : set_tsk_thread_flag(t, TIF_SIGPENDING);
161 15 : return true;
162 : }
163 :
164 : /*
165 : * We must never clear the flag in another thread, or in current
166 : * when it's possible the current syscall is returning -ERESTART*.
167 : * So we don't clear it here, and only callers who know they should do.
168 : */
169 : return false;
170 : }
171 :
172 : /*
173 : * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 : * This is superfluous when called on current, the wakeup is a harmless no-op.
175 : */
176 0 : void recalc_sigpending_and_wake(struct task_struct *t)
177 : {
178 0 : if (recalc_sigpending_tsk(t))
179 0 : signal_wake_up(t, 0);
180 0 : }
181 :
182 4814 : void recalc_sigpending(void)
183 : {
184 4814 : if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 4799 : !klp_patch_pending(current))
186 4799 : clear_thread_flag(TIF_SIGPENDING);
187 :
188 4814 : }
189 : EXPORT_SYMBOL(recalc_sigpending);
190 :
191 935 : void calculate_sigpending(void)
192 : {
193 : /* Have any signals or users of TIF_SIGPENDING been delayed
194 : * until after fork?
195 : */
196 935 : spin_lock_irq(¤t->sighand->siglock);
197 935 : set_tsk_thread_flag(current, TIF_SIGPENDING);
198 935 : recalc_sigpending();
199 935 : spin_unlock_irq(¤t->sighand->siglock);
200 935 : }
201 :
202 : /* Given the mask, find the first available signal that should be serviced. */
203 :
204 : #define SYNCHRONOUS_MASK \
205 : (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 : sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 :
208 1406 : int next_signal(struct sigpending *pending, sigset_t *mask)
209 : {
210 1406 : unsigned long i, *s, *m, x;
211 1406 : int sig = 0;
212 :
213 1406 : s = pending->signal.sig;
214 1406 : m = mask->sig;
215 :
216 : /*
217 : * Handle the first word specially: it contains the
218 : * synchronous signals that need to be dequeued first.
219 : */
220 1406 : x = *s &~ *m;
221 1406 : if (x) {
222 614 : if (x & SYNCHRONOUS_MASK)
223 0 : x &= SYNCHRONOUS_MASK;
224 614 : sig = ffz(~x) + 1;
225 614 : return sig;
226 : }
227 :
228 : switch (_NSIG_WORDS) {
229 : default:
230 : for (i = 1; i < _NSIG_WORDS; ++i) {
231 : x = *++s &~ *++m;
232 : if (!x)
233 : continue;
234 : sig = ffz(~x) + i*_NSIG_BPW + 1;
235 : break;
236 : }
237 : break;
238 :
239 : case 2:
240 : x = s[1] &~ m[1];
241 : if (!x)
242 : break;
243 : sig = ffz(~x) + _NSIG_BPW + 1;
244 : break;
245 :
246 : case 1:
247 : /* Nothing to do */
248 : break;
249 : }
250 :
251 : return sig;
252 : }
253 :
254 0 : static inline void print_dropped_signal(int sig)
255 : {
256 0 : static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 :
258 0 : if (!print_fatal_signals)
259 : return;
260 :
261 0 : if (!__ratelimit(&ratelimit_state))
262 : return;
263 :
264 0 : pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 : current->comm, current->pid, sig);
266 : }
267 :
268 : /**
269 : * task_set_jobctl_pending - set jobctl pending bits
270 : * @task: target task
271 : * @mask: pending bits to set
272 : *
273 : * Clear @mask from @task->jobctl. @mask must be subset of
274 : * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 : * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 : * cleared. If @task is already being killed or exiting, this function
277 : * becomes noop.
278 : *
279 : * CONTEXT:
280 : * Must be called with @task->sighand->siglock held.
281 : *
282 : * RETURNS:
283 : * %true if @mask is set, %false if made noop because @task was dying.
284 : */
285 0 : bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 : {
287 0 : BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 : JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 0 : BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 :
291 0 : if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
292 : return false;
293 :
294 0 : if (mask & JOBCTL_STOP_SIGMASK)
295 0 : task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 :
297 0 : task->jobctl |= mask;
298 0 : return true;
299 : }
300 :
301 : /**
302 : * task_clear_jobctl_trapping - clear jobctl trapping bit
303 : * @task: target task
304 : *
305 : * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 : * Clear it and wake up the ptracer. Note that we don't need any further
307 : * locking. @task->siglock guarantees that @task->parent points to the
308 : * ptracer.
309 : *
310 : * CONTEXT:
311 : * Must be called with @task->sighand->siglock held.
312 : */
313 48 : void task_clear_jobctl_trapping(struct task_struct *task)
314 : {
315 48 : if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 0 : task->jobctl &= ~JOBCTL_TRAPPING;
317 0 : smp_mb(); /* advised by wake_up_bit() */
318 0 : wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
319 : }
320 48 : }
321 :
322 : /**
323 : * task_clear_jobctl_pending - clear jobctl pending bits
324 : * @task: target task
325 : * @mask: pending bits to clear
326 : *
327 : * Clear @mask from @task->jobctl. @mask must be subset of
328 : * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 : * STOP bits are cleared together.
330 : *
331 : * If clearing of @mask leaves no stop or trap pending, this function calls
332 : * task_clear_jobctl_trapping().
333 : *
334 : * CONTEXT:
335 : * Must be called with @task->sighand->siglock held.
336 : */
337 24 : void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 : {
339 24 : BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 :
341 24 : if (mask & JOBCTL_STOP_PENDING)
342 0 : mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 :
344 24 : task->jobctl &= ~mask;
345 :
346 24 : if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 24 : task_clear_jobctl_trapping(task);
348 24 : }
349 :
350 : /**
351 : * task_participate_group_stop - participate in a group stop
352 : * @task: task participating in a group stop
353 : *
354 : * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 : * Group stop states are cleared and the group stop count is consumed if
356 : * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 : * stop, the appropriate `SIGNAL_*` flags are set.
358 : *
359 : * CONTEXT:
360 : * Must be called with @task->sighand->siglock held.
361 : *
362 : * RETURNS:
363 : * %true if group stop completion should be notified to the parent, %false
364 : * otherwise.
365 : */
366 0 : static bool task_participate_group_stop(struct task_struct *task)
367 : {
368 0 : struct signal_struct *sig = task->signal;
369 0 : bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 :
371 0 : WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 :
373 0 : task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
374 :
375 0 : if (!consume)
376 : return false;
377 :
378 0 : if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 0 : sig->group_stop_count--;
380 :
381 : /*
382 : * Tell the caller to notify completion iff we are entering into a
383 : * fresh group stop. Read comment in do_signal_stop() for details.
384 : */
385 0 : if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 0 : signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
387 0 : return true;
388 : }
389 : return false;
390 : }
391 :
392 6 : void task_join_group_stop(struct task_struct *task)
393 : {
394 6 : unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 6 : struct signal_struct *sig = current->signal;
396 :
397 6 : if (sig->group_stop_count) {
398 0 : sig->group_stop_count++;
399 0 : mask |= JOBCTL_STOP_CONSUME;
400 6 : } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
401 : return;
402 :
403 : /* Have the new thread join an on-going signal group stop */
404 0 : task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
405 : }
406 :
407 : /*
408 : * allocate a new signal queue record
409 : * - this may be called without locks if and only if t == current, otherwise an
410 : * appropriate lock must be held to stop the target task from exiting
411 : */
412 : static struct sigqueue *
413 545 : __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 : {
415 545 : struct sigqueue *q = NULL;
416 545 : struct user_struct *user;
417 545 : int sigpending;
418 :
419 : /*
420 : * Protect access to @t credentials. This can go away when all
421 : * callers hold rcu read lock.
422 : *
423 : * NOTE! A pending signal will hold on to the user refcount,
424 : * and we get/put the refcount only when the sigpending count
425 : * changes from/to zero.
426 : */
427 545 : rcu_read_lock();
428 545 : user = __task_cred(t)->user;
429 545 : sigpending = atomic_inc_return(&user->sigpending);
430 545 : if (sigpending == 1)
431 487 : get_uid(user);
432 545 : rcu_read_unlock();
433 :
434 545 : if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 545 : q = kmem_cache_alloc(sigqueue_cachep, flags);
436 : } else {
437 0 : print_dropped_signal(sig);
438 : }
439 :
440 545 : if (unlikely(q == NULL)) {
441 0 : if (atomic_dec_and_test(&user->sigpending))
442 0 : free_uid(user);
443 : } else {
444 545 : INIT_LIST_HEAD(&q->list);
445 545 : q->flags = 0;
446 545 : q->user = user;
447 : }
448 :
449 545 : return q;
450 : }
451 :
452 545 : static void __sigqueue_free(struct sigqueue *q)
453 : {
454 545 : if (q->flags & SIGQUEUE_PREALLOC)
455 : return;
456 1090 : if (atomic_dec_and_test(&q->user->sigpending))
457 487 : free_uid(q->user);
458 545 : kmem_cache_free(sigqueue_cachep, q);
459 : }
460 :
461 1716 : void flush_sigqueue(struct sigpending *queue)
462 : {
463 1716 : struct sigqueue *q;
464 :
465 1716 : sigemptyset(&queue->signal);
466 1726 : while (!list_empty(&queue->list)) {
467 10 : q = list_entry(queue->list.next, struct sigqueue , list);
468 10 : list_del_init(&q->list);
469 10 : __sigqueue_free(q);
470 : }
471 1716 : }
472 :
473 : /*
474 : * Flush all pending signals for this kthread.
475 : */
476 1 : void flush_signals(struct task_struct *t)
477 : {
478 1 : unsigned long flags;
479 :
480 1 : spin_lock_irqsave(&t->sighand->siglock, flags);
481 1 : clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 1 : flush_sigqueue(&t->pending);
483 1 : flush_sigqueue(&t->signal->shared_pending);
484 1 : spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 1 : }
486 : EXPORT_SYMBOL(flush_signals);
487 :
488 : #ifdef CONFIG_POSIX_TIMERS
489 1146 : static void __flush_itimer_signals(struct sigpending *pending)
490 : {
491 1146 : sigset_t signal, retain;
492 1146 : struct sigqueue *q, *n;
493 :
494 1146 : signal = pending->signal;
495 1146 : sigemptyset(&retain);
496 :
497 1146 : list_for_each_entry_safe(q, n, &pending->list, list) {
498 0 : int sig = q->info.si_signo;
499 :
500 0 : if (likely(q->info.si_code != SI_TIMER)) {
501 0 : sigaddset(&retain, sig);
502 : } else {
503 0 : sigdelset(&signal, sig);
504 0 : list_del_init(&q->list);
505 0 : __sigqueue_free(q);
506 : }
507 : }
508 :
509 1146 : sigorsets(&pending->signal, &signal, &retain);
510 1146 : }
511 :
512 573 : void flush_itimer_signals(void)
513 : {
514 573 : struct task_struct *tsk = current;
515 573 : unsigned long flags;
516 :
517 573 : spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 573 : __flush_itimer_signals(&tsk->pending);
519 573 : __flush_itimer_signals(&tsk->signal->shared_pending);
520 573 : spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
521 573 : }
522 : #endif
523 :
524 1 : void ignore_signals(struct task_struct *t)
525 : {
526 1 : int i;
527 :
528 65 : for (i = 0; i < _NSIG; ++i)
529 64 : t->sighand->action[i].sa.sa_handler = SIG_IGN;
530 :
531 1 : flush_signals(t);
532 1 : }
533 :
534 : /*
535 : * Flush all handlers for a task.
536 : */
537 :
538 : void
539 573 : flush_signal_handlers(struct task_struct *t, int force_default)
540 : {
541 573 : int i;
542 573 : struct k_sigaction *ka = &t->sighand->action[0];
543 37245 : for (i = _NSIG ; i != 0 ; i--) {
544 36672 : if (force_default || ka->sa.sa_handler != SIG_IGN)
545 36456 : ka->sa.sa_handler = SIG_DFL;
546 36672 : ka->sa.sa_flags = 0;
547 : #ifdef __ARCH_HAS_SA_RESTORER
548 36672 : ka->sa.sa_restorer = NULL;
549 : #endif
550 36672 : sigemptyset(&ka->sa.sa_mask);
551 36672 : ka++;
552 : }
553 573 : }
554 :
555 0 : bool unhandled_signal(struct task_struct *tsk, int sig)
556 : {
557 0 : void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 0 : if (is_global_init(tsk))
559 : return true;
560 :
561 0 : if (handler != SIG_IGN && handler != SIG_DFL)
562 : return false;
563 :
564 : /* if ptraced, let the tracer determine */
565 0 : return !tsk->ptrace;
566 : }
567 :
568 530 : static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
569 : bool *resched_timer)
570 : {
571 530 : struct sigqueue *q, *first = NULL;
572 :
573 : /*
574 : * Collect the siginfo appropriate to this signal. Check if
575 : * there is another siginfo for the same signal.
576 : */
577 1060 : list_for_each_entry(q, &list->list, list) {
578 530 : if (q->info.si_signo == sig) {
579 530 : if (first)
580 0 : goto still_pending;
581 : first = q;
582 : }
583 : }
584 :
585 530 : sigdelset(&list->signal, sig);
586 :
587 530 : if (first) {
588 530 : still_pending:
589 530 : list_del_init(&first->list);
590 530 : copy_siginfo(info, &first->info);
591 :
592 1060 : *resched_timer =
593 530 : (first->flags & SIGQUEUE_PREALLOC) &&
594 530 : (info->si_code == SI_TIMER) &&
595 0 : (info->si_sys_private);
596 :
597 530 : __sigqueue_free(first);
598 : } else {
599 : /*
600 : * Ok, it wasn't in the queue. This must be
601 : * a fast-pathed signal or we must have been
602 : * out of queue space. So zero out the info.
603 : */
604 0 : clear_siginfo(info);
605 0 : info->si_signo = sig;
606 0 : info->si_errno = 0;
607 0 : info->si_code = SI_USER;
608 0 : info->si_pid = 0;
609 0 : info->si_uid = 0;
610 : }
611 530 : }
612 :
613 1096 : static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 : kernel_siginfo_t *info, bool *resched_timer)
615 : {
616 1096 : int sig = next_signal(pending, mask);
617 :
618 1096 : if (sig)
619 530 : collect_signal(sig, pending, info, resched_timer);
620 1096 : return sig;
621 : }
622 :
623 : /*
624 : * Dequeue a signal and return the element to the caller, which is
625 : * expected to free it.
626 : *
627 : * All callers have to hold the siglock.
628 : */
629 554 : int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 : {
631 554 : bool resched_timer = false;
632 554 : int signr;
633 :
634 : /* We only dequeue private signals from ourselves, we don't let
635 : * signalfd steal them
636 : */
637 554 : signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 554 : if (!signr) {
639 542 : signr = __dequeue_signal(&tsk->signal->shared_pending,
640 : mask, info, &resched_timer);
641 : #ifdef CONFIG_POSIX_TIMERS
642 : /*
643 : * itimer signal ?
644 : *
645 : * itimers are process shared and we restart periodic
646 : * itimers in the signal delivery path to prevent DoS
647 : * attacks in the high resolution timer case. This is
648 : * compliant with the old way of self-restarting
649 : * itimers, as the SIGALRM is a legacy signal and only
650 : * queued once. Changing the restart behaviour to
651 : * restart the timer in the signal dequeue path is
652 : * reducing the timer noise on heavy loaded !highres
653 : * systems too.
654 : */
655 542 : if (unlikely(signr == SIGALRM)) {
656 0 : struct hrtimer *tmr = &tsk->signal->real_timer;
657 :
658 0 : if (!hrtimer_is_queued(tmr) &&
659 0 : tsk->signal->it_real_incr != 0) {
660 0 : hrtimer_forward(tmr, tmr->base->get_time(),
661 : tsk->signal->it_real_incr);
662 0 : hrtimer_restart(tmr);
663 : }
664 : }
665 : #endif
666 : }
667 :
668 554 : recalc_sigpending();
669 554 : if (!signr)
670 : return 0;
671 :
672 530 : if (unlikely(sig_kernel_stop(signr))) {
673 : /*
674 : * Set a marker that we have dequeued a stop signal. Our
675 : * caller might release the siglock and then the pending
676 : * stop signal it is about to process is no longer in the
677 : * pending bitmasks, but must still be cleared by a SIGCONT
678 : * (and overruled by a SIGKILL). So those cases clear this
679 : * shared flag after we've set it. Note that this flag may
680 : * remain set after the signal we return is ignored or
681 : * handled. That doesn't matter because its only purpose
682 : * is to alert stop-signal processing code when another
683 : * processor has come along and cleared the flag.
684 : */
685 15 : current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 : }
687 : #ifdef CONFIG_POSIX_TIMERS
688 530 : if (resched_timer) {
689 : /*
690 : * Release the siglock to ensure proper locking order
691 : * of timer locks outside of siglocks. Note, we leave
692 : * irqs disabled here, since the posix-timers code is
693 : * about to disable them again anyway.
694 : */
695 0 : spin_unlock(&tsk->sighand->siglock);
696 0 : posixtimer_rearm(info);
697 0 : spin_lock(&tsk->sighand->siglock);
698 :
699 : /* Don't expose the si_sys_private value to userspace */
700 0 : info->si_sys_private = 0;
701 : }
702 : #endif
703 : return signr;
704 : }
705 : EXPORT_SYMBOL_GPL(dequeue_signal);
706 :
707 499 : static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 : {
709 499 : struct task_struct *tsk = current;
710 499 : struct sigpending *pending = &tsk->pending;
711 499 : struct sigqueue *q, *sync = NULL;
712 :
713 : /*
714 : * Might a synchronous signal be in the queue?
715 : */
716 499 : if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
717 : return 0;
718 :
719 : /*
720 : * Return the first synchronous signal in the queue.
721 : */
722 0 : list_for_each_entry(q, &pending->list, list) {
723 : /* Synchronous signals have a positive si_code */
724 0 : if ((q->info.si_code > SI_USER) &&
725 0 : (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
726 0 : sync = q;
727 0 : goto next;
728 : }
729 : }
730 : return 0;
731 0 : next:
732 : /*
733 : * Check if there is another siginfo for the same signal.
734 : */
735 0 : list_for_each_entry_continue(q, &pending->list, list) {
736 0 : if (q->info.si_signo == sync->info.si_signo)
737 0 : goto still_pending;
738 : }
739 :
740 0 : sigdelset(&pending->signal, sync->info.si_signo);
741 0 : recalc_sigpending();
742 0 : still_pending:
743 0 : list_del_init(&sync->list);
744 0 : copy_siginfo(info, &sync->info);
745 0 : __sigqueue_free(sync);
746 0 : return info->si_signo;
747 : }
748 :
749 : /*
750 : * Tell a process that it has a new active signal..
751 : *
752 : * NOTE! we rely on the previous spin_lock to
753 : * lock interrupts for us! We can only be called with
754 : * "siglock" held, and the local interrupt must
755 : * have been disabled when that got acquired!
756 : *
757 : * No need to set need_resched since signal event passing
758 : * goes through ->blocked
759 : */
760 477 : void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 : {
762 477 : set_tsk_thread_flag(t, TIF_SIGPENDING);
763 : /*
764 : * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 : * case. We don't check t->state here because there is a race with it
766 : * executing another processor and just now entering stopped state.
767 : * By using wake_up_state, we ensure the process will wake up and
768 : * handle its death signal.
769 : */
770 477 : if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
771 71 : kick_process(t);
772 477 : }
773 :
774 : /*
775 : * Remove signals in mask from the pending set and queue.
776 : * Returns 1 if any signals were found.
777 : *
778 : * All callers must be holding the siglock.
779 : */
780 550 : static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 : {
782 550 : struct sigqueue *q, *n;
783 550 : sigset_t m;
784 :
785 550 : sigandsets(&m, mask, &s->signal);
786 550 : if (sigisemptyset(&m))
787 550 : return;
788 :
789 0 : sigandnsets(&s->signal, &s->signal, mask);
790 0 : list_for_each_entry_safe(q, n, &s->list, list) {
791 0 : if (sigismember(mask, q->info.si_signo)) {
792 0 : list_del_init(&q->list);
793 0 : __sigqueue_free(q);
794 : }
795 : }
796 : }
797 :
798 577 : static inline int is_si_special(const struct kernel_siginfo *info)
799 : {
800 577 : return info <= SEND_SIG_PRIV;
801 : }
802 :
803 37 : static inline bool si_fromuser(const struct kernel_siginfo *info)
804 : {
805 111 : return info == SEND_SIG_NOINFO ||
806 37 : (!is_si_special(info) && SI_FROMUSER(info));
807 : }
808 :
809 : /*
810 : * called with RCU read lock from check_kill_permission()
811 : */
812 30 : static bool kill_ok_by_cred(struct task_struct *t)
813 : {
814 30 : const struct cred *cred = current_cred();
815 30 : const struct cred *tcred = __task_cred(t);
816 :
817 30 : return uid_eq(cred->euid, tcred->suid) ||
818 4 : uid_eq(cred->euid, tcred->uid) ||
819 4 : uid_eq(cred->uid, tcred->suid) ||
820 38 : uid_eq(cred->uid, tcred->uid) ||
821 4 : ns_capable(tcred->user_ns, CAP_KILL);
822 : }
823 :
824 : /*
825 : * Bad permissions for sending the signal
826 : * - the caller must hold the RCU read lock
827 : */
828 37 : static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 : struct task_struct *t)
830 : {
831 37 : struct pid *sid;
832 37 : int error;
833 :
834 37 : if (!valid_signal(sig))
835 : return -EINVAL;
836 :
837 74 : if (!si_fromuser(info))
838 : return 0;
839 :
840 37 : error = audit_signal_info(sig, t); /* Let audit system see the signal */
841 37 : if (error)
842 : return error;
843 :
844 67 : if (!same_thread_group(current, t) &&
845 30 : !kill_ok_by_cred(t)) {
846 4 : switch (sig) {
847 : case SIGCONT:
848 0 : sid = task_session(t);
849 : /*
850 : * We don't return the error if sid == NULL. The
851 : * task was unhashed, the caller must notice this.
852 : */
853 0 : if (!sid || sid == task_session(current))
854 : break;
855 : fallthrough;
856 : default:
857 : return -EPERM;
858 : }
859 33 : }
860 :
861 33 : return security_task_kill(t, info, sig, NULL);
862 : }
863 :
864 : /**
865 : * ptrace_trap_notify - schedule trap to notify ptracer
866 : * @t: tracee wanting to notify tracer
867 : *
868 : * This function schedules sticky ptrace trap which is cleared on the next
869 : * TRAP_STOP to notify ptracer of an event. @t must have been seized by
870 : * ptracer.
871 : *
872 : * If @t is running, STOP trap will be taken. If trapped for STOP and
873 : * ptracer is listening for events, tracee is woken up so that it can
874 : * re-trap for the new event. If trapped otherwise, STOP trap will be
875 : * eventually taken without returning to userland after the existing traps
876 : * are finished by PTRACE_CONT.
877 : *
878 : * CONTEXT:
879 : * Must be called with @task->sighand->siglock held.
880 : */
881 0 : static void ptrace_trap_notify(struct task_struct *t)
882 : {
883 0 : WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 0 : assert_spin_locked(&t->sighand->siglock);
885 :
886 0 : task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 0 : ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
888 0 : }
889 :
890 : /*
891 : * Handle magic process-wide effects of stop/continue signals. Unlike
892 : * the signal actions, these happen immediately at signal-generation
893 : * time regardless of blocking, ignoring, or handling. This does the
894 : * actual continuing for SIGCONT, but not the actual stopping for stop
895 : * signals. The process stop is done as a signal action for SIG_DFL.
896 : *
897 : * Returns true if the signal should be actually delivered, otherwise
898 : * it should be dropped.
899 : */
900 896 : static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 : {
902 896 : struct signal_struct *signal = p->signal;
903 896 : struct task_struct *t;
904 896 : sigset_t flush;
905 :
906 896 : if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 0 : if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 0 : return sig == SIGKILL;
909 : /*
910 : * The process is in the middle of dying, nothing to do.
911 : */
912 896 : } else if (sig_kernel_stop(sig)) {
913 : /*
914 : * This is a stop signal. Remove SIGCONT from all queues.
915 : */
916 15 : siginitset(&flush, sigmask(SIGCONT));
917 15 : flush_sigqueue_mask(&flush, &signal->shared_pending);
918 38 : for_each_thread(p, t)
919 23 : flush_sigqueue_mask(&flush, &t->pending);
920 881 : } else if (sig == SIGCONT) {
921 0 : unsigned int why;
922 : /*
923 : * Remove all stop signals from all queues, wake all threads.
924 : */
925 0 : siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 0 : flush_sigqueue_mask(&flush, &signal->shared_pending);
927 0 : for_each_thread(p, t) {
928 0 : flush_sigqueue_mask(&flush, &t->pending);
929 0 : task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 0 : if (likely(!(t->ptrace & PT_SEIZED)))
931 0 : wake_up_state(t, __TASK_STOPPED);
932 : else
933 0 : ptrace_trap_notify(t);
934 : }
935 :
936 : /*
937 : * Notify the parent with CLD_CONTINUED if we were stopped.
938 : *
939 : * If we were in the middle of a group stop, we pretend it
940 : * was already finished, and then continued. Since SIGCHLD
941 : * doesn't queue we report only CLD_STOPPED, as if the next
942 : * CLD_CONTINUED was dropped.
943 : */
944 0 : why = 0;
945 0 : if (signal->flags & SIGNAL_STOP_STOPPED)
946 : why |= SIGNAL_CLD_CONTINUED;
947 0 : else if (signal->group_stop_count)
948 : why |= SIGNAL_CLD_STOPPED;
949 :
950 : if (why) {
951 : /*
952 : * The first thread which returns from do_signal_stop()
953 : * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 : * notify its parent. See get_signal().
955 : */
956 0 : signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 0 : signal->group_stop_count = 0;
958 0 : signal->group_exit_code = 0;
959 : }
960 : }
961 :
962 896 : return !sig_ignored(p, sig, force);
963 : }
964 :
965 : /*
966 : * Test if P wants to take SIG. After we've checked all threads with this,
967 : * it's equivalent to finding no threads not blocking SIG. Any threads not
968 : * blocking SIG were ruled out because they are not running and already
969 : * have pending signals. Such threads will dequeue from the shared queue
970 : * as soon as they're available, so putting the signal on the shared queue
971 : * will be equivalent to sending it to one such thread.
972 : */
973 540 : static inline bool wants_signal(int sig, struct task_struct *p)
974 : {
975 540 : if (sigismember(&p->blocked, sig))
976 : return false;
977 :
978 465 : if (p->flags & PF_EXITING)
979 : return false;
980 :
981 465 : if (sig == SIGKILL)
982 : return true;
983 :
984 465 : if (task_is_stopped_or_traced(p))
985 : return false;
986 :
987 1301 : return task_curr(p) || !task_sigpending(p);
988 : }
989 :
990 540 : static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 : {
992 540 : struct signal_struct *signal = p->signal;
993 540 : struct task_struct *t;
994 :
995 : /*
996 : * Now find a thread we can wake up to take the signal off the queue.
997 : *
998 : * If the main thread wants the signal, it gets first crack.
999 : * Probably the least surprising to the average bear.
1000 : */
1001 540 : if (wants_signal(sig, p))
1002 : t = p;
1003 75 : else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 : /*
1005 : * There is just one thread and it does not need to be woken.
1006 : * It will dequeue unblocked signals before it runs again.
1007 : */
1008 : return;
1009 : else {
1010 : /*
1011 : * Otherwise try to find a suitable thread.
1012 : */
1013 0 : t = signal->curr_target;
1014 0 : while (!wants_signal(sig, t)) {
1015 0 : t = next_thread(t);
1016 0 : if (t == signal->curr_target)
1017 : /*
1018 : * No thread needs to be woken.
1019 : * Any eligible threads will see
1020 : * the signal in the queue soon.
1021 : */
1022 : return;
1023 : }
1024 0 : signal->curr_target = t;
1025 : }
1026 :
1027 : /*
1028 : * Found a killable thread. If the signal will be fatal,
1029 : * then start taking the whole group down immediately.
1030 : */
1031 465 : if (sig_fatal(p, sig) &&
1032 0 : !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 0 : !sigismember(&t->real_blocked, sig) &&
1034 0 : (sig == SIGKILL || !p->ptrace)) {
1035 : /*
1036 : * This signal will be fatal to the whole group.
1037 : */
1038 0 : if (!sig_kernel_coredump(sig)) {
1039 : /*
1040 : * Start a group exit and wake everybody up.
1041 : * This way we don't have other threads
1042 : * running and doing things after a slower
1043 : * thread has the fatal signal pending.
1044 : */
1045 0 : signal->flags = SIGNAL_GROUP_EXIT;
1046 0 : signal->group_exit_code = sig;
1047 0 : signal->group_stop_count = 0;
1048 0 : t = p;
1049 0 : do {
1050 0 : task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 0 : sigaddset(&t->pending.signal, SIGKILL);
1052 0 : signal_wake_up(t, 1);
1053 0 : } while_each_thread(p, t);
1054 : return;
1055 : }
1056 : }
1057 :
1058 : /*
1059 : * The signal is already in the shared-pending queue.
1060 : * Tell the chosen thread to wake up and dequeue it.
1061 : */
1062 465 : signal_wake_up(t, sig == SIGKILL);
1063 : return;
1064 : }
1065 :
1066 556 : static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 : {
1068 556 : return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1069 : }
1070 :
1071 896 : static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 : enum pid_type type, bool force)
1073 : {
1074 896 : struct sigpending *pending;
1075 896 : struct sigqueue *q;
1076 896 : int override_rlimit;
1077 896 : int ret = 0, result;
1078 :
1079 896 : assert_spin_locked(&t->sighand->siglock);
1080 :
1081 896 : result = TRACE_SIGNAL_IGNORED;
1082 896 : if (!prepare_signal(sig, t, force))
1083 340 : goto ret;
1084 :
1085 556 : pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 : /*
1087 : * Short-circuit ignored signals and support queuing
1088 : * exactly one non-rt signal, so that we can get more
1089 : * detailed information about the cause of the signal.
1090 : */
1091 556 : result = TRACE_SIGNAL_ALREADY_PENDING;
1092 1112 : if (legacy_queue(pending, sig))
1093 16 : goto ret;
1094 :
1095 540 : result = TRACE_SIGNAL_DELIVERED;
1096 : /*
1097 : * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 : */
1099 540 : if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER)))
1100 0 : goto out_set;
1101 :
1102 : /*
1103 : * Real-time signals must be queued if sent by sigqueue, or
1104 : * some other real-time mechanism. It is implementation
1105 : * defined whether kill() does so. We attempt to do so, on
1106 : * the principle of least surprise, but since kill is not
1107 : * allowed to fail with EAGAIN when low on memory we just
1108 : * make sure at least one signal gets delivered and don't
1109 : * pass on the info struct.
1110 : */
1111 540 : if (sig < SIGRTMIN)
1112 540 : override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 : else
1114 : override_rlimit = 0;
1115 :
1116 540 : q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1117 540 : if (q) {
1118 540 : list_add_tail(&q->list, &pending->list);
1119 540 : switch ((unsigned long) info) {
1120 0 : case (unsigned long) SEND_SIG_NOINFO:
1121 0 : clear_siginfo(&q->info);
1122 0 : q->info.si_signo = sig;
1123 0 : q->info.si_errno = 0;
1124 0 : q->info.si_code = SI_USER;
1125 0 : q->info.si_pid = task_tgid_nr_ns(current,
1126 : task_active_pid_ns(t));
1127 0 : rcu_read_lock();
1128 0 : q->info.si_uid =
1129 0 : from_kuid_munged(task_cred_xxx(t, user_ns),
1130 0 : current_uid());
1131 0 : rcu_read_unlock();
1132 0 : break;
1133 8 : case (unsigned long) SEND_SIG_PRIV:
1134 8 : clear_siginfo(&q->info);
1135 8 : q->info.si_signo = sig;
1136 8 : q->info.si_errno = 0;
1137 8 : q->info.si_code = SI_KERNEL;
1138 8 : q->info.si_pid = 0;
1139 8 : q->info.si_uid = 0;
1140 8 : break;
1141 532 : default:
1142 532 : copy_siginfo(&q->info, info);
1143 : break;
1144 : }
1145 0 : } else if (!is_si_special(info) &&
1146 0 : sig >= SIGRTMIN && info->si_code != SI_USER) {
1147 : /*
1148 : * Queue overflow, abort. We may abort if the
1149 : * signal was rt and sent by user using something
1150 : * other than kill().
1151 : */
1152 0 : result = TRACE_SIGNAL_OVERFLOW_FAIL;
1153 0 : ret = -EAGAIN;
1154 0 : goto ret;
1155 : } else {
1156 : /*
1157 : * This is a silent loss of information. We still
1158 : * send the signal, but the *info bits are lost.
1159 : */
1160 : result = TRACE_SIGNAL_LOSE_INFO;
1161 : }
1162 :
1163 540 : out_set:
1164 540 : signalfd_notify(t, sig);
1165 540 : sigaddset(&pending->signal, sig);
1166 :
1167 : /* Let multiprocess signals appear after on-going forks */
1168 540 : if (type > PIDTYPE_TGID) {
1169 0 : struct multiprocess_signals *delayed;
1170 0 : hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1171 0 : sigset_t *signal = &delayed->signal;
1172 : /* Can't queue both a stop and a continue signal */
1173 0 : if (sig == SIGCONT)
1174 0 : sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1175 0 : else if (sig_kernel_stop(sig))
1176 0 : sigdelset(signal, SIGCONT);
1177 0 : sigaddset(signal, sig);
1178 : }
1179 : }
1180 :
1181 540 : complete_signal(sig, t, type);
1182 896 : ret:
1183 896 : trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1184 896 : return ret;
1185 : }
1186 :
1187 32 : static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1188 : {
1189 32 : bool ret = false;
1190 32 : switch (siginfo_layout(info->si_signo, info->si_code)) {
1191 32 : case SIL_KILL:
1192 : case SIL_CHLD:
1193 : case SIL_RT:
1194 32 : ret = true;
1195 32 : break;
1196 : case SIL_TIMER:
1197 : case SIL_POLL:
1198 : case SIL_FAULT:
1199 : case SIL_FAULT_MCEERR:
1200 : case SIL_FAULT_BNDERR:
1201 : case SIL_FAULT_PKUERR:
1202 : case SIL_SYS:
1203 : ret = false;
1204 : break;
1205 : }
1206 32 : return ret;
1207 : }
1208 :
1209 40 : static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1210 : enum pid_type type)
1211 : {
1212 : /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1213 40 : bool force = false;
1214 :
1215 40 : if (info == SEND_SIG_NOINFO) {
1216 : /* Force if sent from an ancestor pid namespace */
1217 0 : force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1218 40 : } else if (info == SEND_SIG_PRIV) {
1219 : /* Don't ignore kernel generated signals */
1220 : force = true;
1221 32 : } else if (has_si_pid_and_uid(info)) {
1222 : /* SIGKILL and SIGSTOP is special or has ids */
1223 32 : struct user_namespace *t_user_ns;
1224 :
1225 32 : rcu_read_lock();
1226 64 : t_user_ns = task_cred_xxx(t, user_ns);
1227 32 : if (current_user_ns() != t_user_ns) {
1228 0 : kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1229 0 : info->si_uid = from_kuid_munged(t_user_ns, uid);
1230 : }
1231 32 : rcu_read_unlock();
1232 :
1233 : /* A kernel generated signal? */
1234 32 : force = (info->si_code == SI_KERNEL);
1235 :
1236 : /* From an ancestor pid namespace? */
1237 32 : if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1238 0 : info->si_pid = 0;
1239 0 : force = true;
1240 : }
1241 : }
1242 40 : return __send_signal(sig, info, t, type, force);
1243 : }
1244 :
1245 0 : static void print_fatal_signal(int signr)
1246 : {
1247 0 : struct pt_regs *regs = signal_pt_regs();
1248 0 : pr_info("potentially unexpected fatal signal %d.\n", signr);
1249 :
1250 : #if defined(__i386__) && !defined(__arch_um__)
1251 : pr_info("code at %08lx: ", regs->ip);
1252 : {
1253 : int i;
1254 : for (i = 0; i < 16; i++) {
1255 : unsigned char insn;
1256 :
1257 : if (get_user(insn, (unsigned char *)(regs->ip + i)))
1258 : break;
1259 : pr_cont("%02x ", insn);
1260 : }
1261 : }
1262 : pr_cont("\n");
1263 : #endif
1264 0 : preempt_disable();
1265 0 : show_regs(regs);
1266 0 : preempt_enable();
1267 0 : }
1268 :
1269 0 : static int __init setup_print_fatal_signals(char *str)
1270 : {
1271 0 : get_option (&str, &print_fatal_signals);
1272 :
1273 0 : return 1;
1274 : }
1275 :
1276 : __setup("print-fatal-signals=", setup_print_fatal_signals);
1277 :
1278 : int
1279 12 : __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1280 : {
1281 0 : return send_signal(sig, info, p, PIDTYPE_TGID);
1282 : }
1283 :
1284 28 : int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1285 : enum pid_type type)
1286 : {
1287 28 : unsigned long flags;
1288 28 : int ret = -ESRCH;
1289 :
1290 28 : if (lock_task_sighand(p, &flags)) {
1291 28 : ret = send_signal(sig, info, p, type);
1292 28 : unlock_task_sighand(p, &flags);
1293 : }
1294 :
1295 28 : return ret;
1296 : }
1297 :
1298 : /*
1299 : * Force a signal that the process can't ignore: if necessary
1300 : * we unblock the signal and change any SIG_IGN to SIG_DFL.
1301 : *
1302 : * Note: If we unblock the signal, we always reset it to SIG_DFL,
1303 : * since we do not want to have a signal handler that was blocked
1304 : * be invoked when user space had explicitly blocked it.
1305 : *
1306 : * We don't want to have recursive SIGSEGV's etc, for example,
1307 : * that is why we also clear SIGNAL_UNKILLABLE.
1308 : */
1309 : static int
1310 0 : force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1311 : {
1312 0 : unsigned long int flags;
1313 0 : int ret, blocked, ignored;
1314 0 : struct k_sigaction *action;
1315 0 : int sig = info->si_signo;
1316 :
1317 0 : spin_lock_irqsave(&t->sighand->siglock, flags);
1318 0 : action = &t->sighand->action[sig-1];
1319 0 : ignored = action->sa.sa_handler == SIG_IGN;
1320 0 : blocked = sigismember(&t->blocked, sig);
1321 0 : if (blocked || ignored) {
1322 0 : action->sa.sa_handler = SIG_DFL;
1323 0 : if (blocked) {
1324 0 : sigdelset(&t->blocked, sig);
1325 0 : recalc_sigpending_and_wake(t);
1326 : }
1327 : }
1328 : /*
1329 : * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1330 : * debugging to leave init killable.
1331 : */
1332 0 : if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1333 0 : t->signal->flags &= ~SIGNAL_UNKILLABLE;
1334 0 : ret = send_signal(sig, info, t, PIDTYPE_PID);
1335 0 : spin_unlock_irqrestore(&t->sighand->siglock, flags);
1336 :
1337 0 : return ret;
1338 : }
1339 :
1340 0 : int force_sig_info(struct kernel_siginfo *info)
1341 : {
1342 0 : return force_sig_info_to_task(info, current);
1343 : }
1344 :
1345 : /*
1346 : * Nuke all other threads in the group.
1347 : */
1348 0 : int zap_other_threads(struct task_struct *p)
1349 : {
1350 0 : struct task_struct *t = p;
1351 0 : int count = 0;
1352 :
1353 0 : p->signal->group_stop_count = 0;
1354 :
1355 0 : while_each_thread(p, t) {
1356 0 : task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1357 0 : count++;
1358 :
1359 : /* Don't bother with already dead threads */
1360 0 : if (t->exit_state)
1361 0 : continue;
1362 0 : sigaddset(&t->pending.signal, SIGKILL);
1363 0 : signal_wake_up(t, 1);
1364 : }
1365 :
1366 0 : return count;
1367 : }
1368 :
1369 227 : struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1370 : unsigned long *flags)
1371 : {
1372 227 : struct sighand_struct *sighand;
1373 :
1374 227 : rcu_read_lock();
1375 227 : for (;;) {
1376 227 : sighand = rcu_dereference(tsk->sighand);
1377 227 : if (unlikely(sighand == NULL))
1378 : break;
1379 :
1380 : /*
1381 : * This sighand can be already freed and even reused, but
1382 : * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1383 : * initializes ->siglock: this slab can't go away, it has
1384 : * the same object type, ->siglock can't be reinitialized.
1385 : *
1386 : * We need to ensure that tsk->sighand is still the same
1387 : * after we take the lock, we can race with de_thread() or
1388 : * __exit_signal(). In the latter case the next iteration
1389 : * must see ->sighand == NULL.
1390 : */
1391 227 : spin_lock_irqsave(&sighand->siglock, *flags);
1392 227 : if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1393 : break;
1394 227 : spin_unlock_irqrestore(&sighand->siglock, *flags);
1395 : }
1396 227 : rcu_read_unlock();
1397 :
1398 227 : return sighand;
1399 : }
1400 :
1401 : /*
1402 : * send signal info to all the members of a group
1403 : */
1404 33 : int group_send_sig_info(int sig, struct kernel_siginfo *info,
1405 : struct task_struct *p, enum pid_type type)
1406 : {
1407 33 : int ret;
1408 :
1409 33 : rcu_read_lock();
1410 33 : ret = check_kill_permission(sig, info, p);
1411 33 : rcu_read_unlock();
1412 :
1413 33 : if (!ret && sig)
1414 16 : ret = do_send_sig_info(sig, info, p, type);
1415 :
1416 33 : return ret;
1417 : }
1418 :
1419 : /*
1420 : * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1421 : * control characters do (^C, ^Z etc)
1422 : * - the caller must hold at least a readlock on tasklist_lock
1423 : */
1424 0 : int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1425 : {
1426 0 : struct task_struct *p = NULL;
1427 0 : int retval, success;
1428 :
1429 0 : success = 0;
1430 0 : retval = -ESRCH;
1431 0 : do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1432 0 : int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1433 0 : success |= !err;
1434 0 : retval = err;
1435 0 : } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1436 0 : return success ? 0 : retval;
1437 : }
1438 :
1439 41 : int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1440 : {
1441 41 : int error = -ESRCH;
1442 41 : struct task_struct *p;
1443 :
1444 41 : for (;;) {
1445 41 : rcu_read_lock();
1446 41 : p = pid_task(pid, PIDTYPE_PID);
1447 41 : if (p)
1448 33 : error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1449 41 : rcu_read_unlock();
1450 41 : if (likely(!p || error != -ESRCH))
1451 41 : return error;
1452 :
1453 : /*
1454 : * The task was unhashed in between, try again. If it
1455 : * is dead, pid_task() will return NULL, if we race with
1456 : * de_thread() it will find the new leader.
1457 : */
1458 : }
1459 : }
1460 :
1461 41 : static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1462 : {
1463 41 : int error;
1464 41 : rcu_read_lock();
1465 41 : error = kill_pid_info(sig, info, find_vpid(pid));
1466 41 : rcu_read_unlock();
1467 41 : return error;
1468 : }
1469 :
1470 0 : static inline bool kill_as_cred_perm(const struct cred *cred,
1471 : struct task_struct *target)
1472 : {
1473 0 : const struct cred *pcred = __task_cred(target);
1474 :
1475 0 : return uid_eq(cred->euid, pcred->suid) ||
1476 0 : uid_eq(cred->euid, pcred->uid) ||
1477 0 : uid_eq(cred->uid, pcred->suid) ||
1478 0 : uid_eq(cred->uid, pcred->uid);
1479 : }
1480 :
1481 : /*
1482 : * The usb asyncio usage of siginfo is wrong. The glibc support
1483 : * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1484 : * AKA after the generic fields:
1485 : * kernel_pid_t si_pid;
1486 : * kernel_uid32_t si_uid;
1487 : * sigval_t si_value;
1488 : *
1489 : * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1490 : * after the generic fields is:
1491 : * void __user *si_addr;
1492 : *
1493 : * This is a practical problem when there is a 64bit big endian kernel
1494 : * and a 32bit userspace. As the 32bit address will encoded in the low
1495 : * 32bits of the pointer. Those low 32bits will be stored at higher
1496 : * address than appear in a 32 bit pointer. So userspace will not
1497 : * see the address it was expecting for it's completions.
1498 : *
1499 : * There is nothing in the encoding that can allow
1500 : * copy_siginfo_to_user32 to detect this confusion of formats, so
1501 : * handle this by requiring the caller of kill_pid_usb_asyncio to
1502 : * notice when this situration takes place and to store the 32bit
1503 : * pointer in sival_int, instead of sival_addr of the sigval_t addr
1504 : * parameter.
1505 : */
1506 0 : int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1507 : struct pid *pid, const struct cred *cred)
1508 : {
1509 0 : struct kernel_siginfo info;
1510 0 : struct task_struct *p;
1511 0 : unsigned long flags;
1512 0 : int ret = -EINVAL;
1513 :
1514 0 : if (!valid_signal(sig))
1515 : return ret;
1516 :
1517 0 : clear_siginfo(&info);
1518 0 : info.si_signo = sig;
1519 0 : info.si_errno = errno;
1520 0 : info.si_code = SI_ASYNCIO;
1521 0 : *((sigval_t *)&info.si_pid) = addr;
1522 :
1523 0 : rcu_read_lock();
1524 0 : p = pid_task(pid, PIDTYPE_PID);
1525 0 : if (!p) {
1526 0 : ret = -ESRCH;
1527 0 : goto out_unlock;
1528 : }
1529 0 : if (!kill_as_cred_perm(cred, p)) {
1530 0 : ret = -EPERM;
1531 0 : goto out_unlock;
1532 : }
1533 0 : ret = security_task_kill(p, &info, sig, cred);
1534 0 : if (ret)
1535 0 : goto out_unlock;
1536 :
1537 0 : if (sig) {
1538 0 : if (lock_task_sighand(p, &flags)) {
1539 0 : ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1540 0 : unlock_task_sighand(p, &flags);
1541 : } else
1542 : ret = -ESRCH;
1543 : }
1544 0 : out_unlock:
1545 0 : rcu_read_unlock();
1546 0 : return ret;
1547 : }
1548 : EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1549 :
1550 : /*
1551 : * kill_something_info() interprets pid in interesting ways just like kill(2).
1552 : *
1553 : * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1554 : * is probably wrong. Should make it like BSD or SYSV.
1555 : */
1556 :
1557 41 : static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1558 : {
1559 41 : int ret;
1560 :
1561 41 : if (pid > 0)
1562 41 : return kill_proc_info(sig, info, pid);
1563 :
1564 : /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1565 0 : if (pid == INT_MIN)
1566 : return -ESRCH;
1567 :
1568 0 : read_lock(&tasklist_lock);
1569 0 : if (pid != -1) {
1570 0 : ret = __kill_pgrp_info(sig, info,
1571 0 : pid ? find_vpid(-pid) : task_pgrp(current));
1572 : } else {
1573 : int retval = 0, count = 0;
1574 : struct task_struct * p;
1575 :
1576 0 : for_each_process(p) {
1577 0 : if (task_pid_vnr(p) > 1 &&
1578 0 : !same_thread_group(p, current)) {
1579 0 : int err = group_send_sig_info(sig, info, p,
1580 : PIDTYPE_MAX);
1581 0 : ++count;
1582 0 : if (err != -EPERM)
1583 0 : retval = err;
1584 : }
1585 : }
1586 0 : ret = count ? retval : -ESRCH;
1587 : }
1588 0 : read_unlock(&tasklist_lock);
1589 :
1590 0 : return ret;
1591 : }
1592 :
1593 : /*
1594 : * These are for backward compatibility with the rest of the kernel source.
1595 : */
1596 :
1597 8 : int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1598 : {
1599 : /*
1600 : * Make sure legacy kernel users don't send in bad values
1601 : * (normal paths check this in check_kill_permission).
1602 : */
1603 8 : if (!valid_signal(sig))
1604 : return -EINVAL;
1605 :
1606 8 : return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1607 : }
1608 : EXPORT_SYMBOL(send_sig_info);
1609 :
1610 : #define __si_special(priv) \
1611 : ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1612 :
1613 : int
1614 0 : send_sig(int sig, struct task_struct *p, int priv)
1615 : {
1616 0 : return send_sig_info(sig, __si_special(priv), p);
1617 : }
1618 : EXPORT_SYMBOL(send_sig);
1619 :
1620 0 : void force_sig(int sig)
1621 : {
1622 0 : struct kernel_siginfo info;
1623 :
1624 0 : clear_siginfo(&info);
1625 0 : info.si_signo = sig;
1626 0 : info.si_errno = 0;
1627 0 : info.si_code = SI_KERNEL;
1628 0 : info.si_pid = 0;
1629 0 : info.si_uid = 0;
1630 0 : force_sig_info(&info);
1631 0 : }
1632 : EXPORT_SYMBOL(force_sig);
1633 :
1634 : /*
1635 : * When things go south during signal handling, we
1636 : * will force a SIGSEGV. And if the signal that caused
1637 : * the problem was already a SIGSEGV, we'll want to
1638 : * make sure we don't even try to deliver the signal..
1639 : */
1640 0 : void force_sigsegv(int sig)
1641 : {
1642 0 : struct task_struct *p = current;
1643 :
1644 0 : if (sig == SIGSEGV) {
1645 0 : unsigned long flags;
1646 0 : spin_lock_irqsave(&p->sighand->siglock, flags);
1647 0 : p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1648 0 : spin_unlock_irqrestore(&p->sighand->siglock, flags);
1649 : }
1650 0 : force_sig(SIGSEGV);
1651 0 : }
1652 :
1653 0 : int force_sig_fault_to_task(int sig, int code, void __user *addr
1654 : ___ARCH_SI_TRAPNO(int trapno)
1655 : ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1656 : , struct task_struct *t)
1657 : {
1658 0 : struct kernel_siginfo info;
1659 :
1660 0 : clear_siginfo(&info);
1661 0 : info.si_signo = sig;
1662 0 : info.si_errno = 0;
1663 0 : info.si_code = code;
1664 0 : info.si_addr = addr;
1665 : #ifdef __ARCH_SI_TRAPNO
1666 : info.si_trapno = trapno;
1667 : #endif
1668 : #ifdef __ia64__
1669 : info.si_imm = imm;
1670 : info.si_flags = flags;
1671 : info.si_isr = isr;
1672 : #endif
1673 0 : return force_sig_info_to_task(&info, t);
1674 : }
1675 :
1676 0 : int force_sig_fault(int sig, int code, void __user *addr
1677 : ___ARCH_SI_TRAPNO(int trapno)
1678 : ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1679 : {
1680 0 : return force_sig_fault_to_task(sig, code, addr
1681 : ___ARCH_SI_TRAPNO(trapno)
1682 : ___ARCH_SI_IA64(imm, flags, isr), current);
1683 : }
1684 :
1685 0 : int send_sig_fault(int sig, int code, void __user *addr
1686 : ___ARCH_SI_TRAPNO(int trapno)
1687 : ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1688 : , struct task_struct *t)
1689 : {
1690 0 : struct kernel_siginfo info;
1691 :
1692 0 : clear_siginfo(&info);
1693 0 : info.si_signo = sig;
1694 0 : info.si_errno = 0;
1695 0 : info.si_code = code;
1696 0 : info.si_addr = addr;
1697 : #ifdef __ARCH_SI_TRAPNO
1698 : info.si_trapno = trapno;
1699 : #endif
1700 : #ifdef __ia64__
1701 : info.si_imm = imm;
1702 : info.si_flags = flags;
1703 : info.si_isr = isr;
1704 : #endif
1705 0 : return send_sig_info(info.si_signo, &info, t);
1706 : }
1707 :
1708 0 : int force_sig_mceerr(int code, void __user *addr, short lsb)
1709 : {
1710 0 : struct kernel_siginfo info;
1711 :
1712 0 : WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1713 0 : clear_siginfo(&info);
1714 0 : info.si_signo = SIGBUS;
1715 0 : info.si_errno = 0;
1716 0 : info.si_code = code;
1717 0 : info.si_addr = addr;
1718 0 : info.si_addr_lsb = lsb;
1719 0 : return force_sig_info(&info);
1720 : }
1721 :
1722 0 : int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1723 : {
1724 0 : struct kernel_siginfo info;
1725 :
1726 0 : WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1727 0 : clear_siginfo(&info);
1728 0 : info.si_signo = SIGBUS;
1729 0 : info.si_errno = 0;
1730 0 : info.si_code = code;
1731 0 : info.si_addr = addr;
1732 0 : info.si_addr_lsb = lsb;
1733 0 : return send_sig_info(info.si_signo, &info, t);
1734 : }
1735 : EXPORT_SYMBOL(send_sig_mceerr);
1736 :
1737 0 : int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1738 : {
1739 0 : struct kernel_siginfo info;
1740 :
1741 0 : clear_siginfo(&info);
1742 0 : info.si_signo = SIGSEGV;
1743 0 : info.si_errno = 0;
1744 0 : info.si_code = SEGV_BNDERR;
1745 0 : info.si_addr = addr;
1746 0 : info.si_lower = lower;
1747 0 : info.si_upper = upper;
1748 0 : return force_sig_info(&info);
1749 : }
1750 :
1751 : #ifdef SEGV_PKUERR
1752 0 : int force_sig_pkuerr(void __user *addr, u32 pkey)
1753 : {
1754 0 : struct kernel_siginfo info;
1755 :
1756 0 : clear_siginfo(&info);
1757 0 : info.si_signo = SIGSEGV;
1758 0 : info.si_errno = 0;
1759 0 : info.si_code = SEGV_PKUERR;
1760 0 : info.si_addr = addr;
1761 0 : info.si_pkey = pkey;
1762 0 : return force_sig_info(&info);
1763 : }
1764 : #endif
1765 :
1766 : /* For the crazy architectures that include trap information in
1767 : * the errno field, instead of an actual errno value.
1768 : */
1769 0 : int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1770 : {
1771 0 : struct kernel_siginfo info;
1772 :
1773 0 : clear_siginfo(&info);
1774 0 : info.si_signo = SIGTRAP;
1775 0 : info.si_errno = errno;
1776 0 : info.si_code = TRAP_HWBKPT;
1777 0 : info.si_addr = addr;
1778 0 : return force_sig_info(&info);
1779 : }
1780 :
1781 0 : int kill_pgrp(struct pid *pid, int sig, int priv)
1782 : {
1783 0 : int ret;
1784 :
1785 0 : read_lock(&tasklist_lock);
1786 0 : ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1787 0 : read_unlock(&tasklist_lock);
1788 :
1789 0 : return ret;
1790 : }
1791 : EXPORT_SYMBOL(kill_pgrp);
1792 :
1793 0 : int kill_pid(struct pid *pid, int sig, int priv)
1794 : {
1795 0 : return kill_pid_info(sig, __si_special(priv), pid);
1796 : }
1797 : EXPORT_SYMBOL(kill_pid);
1798 :
1799 : /*
1800 : * These functions support sending signals using preallocated sigqueue
1801 : * structures. This is needed "because realtime applications cannot
1802 : * afford to lose notifications of asynchronous events, like timer
1803 : * expirations or I/O completions". In the case of POSIX Timers
1804 : * we allocate the sigqueue structure from the timer_create. If this
1805 : * allocation fails we are able to report the failure to the application
1806 : * with an EAGAIN error.
1807 : */
1808 5 : struct sigqueue *sigqueue_alloc(void)
1809 : {
1810 5 : struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1811 :
1812 5 : if (q)
1813 5 : q->flags |= SIGQUEUE_PREALLOC;
1814 :
1815 5 : return q;
1816 : }
1817 :
1818 5 : void sigqueue_free(struct sigqueue *q)
1819 : {
1820 5 : unsigned long flags;
1821 5 : spinlock_t *lock = ¤t->sighand->siglock;
1822 :
1823 5 : BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1824 : /*
1825 : * We must hold ->siglock while testing q->list
1826 : * to serialize with collect_signal() or with
1827 : * __exit_signal()->flush_sigqueue().
1828 : */
1829 5 : spin_lock_irqsave(lock, flags);
1830 5 : q->flags &= ~SIGQUEUE_PREALLOC;
1831 : /*
1832 : * If it is queued it will be freed when dequeued,
1833 : * like the "regular" sigqueue.
1834 : */
1835 5 : if (!list_empty(&q->list))
1836 0 : q = NULL;
1837 5 : spin_unlock_irqrestore(lock, flags);
1838 :
1839 5 : if (q)
1840 5 : __sigqueue_free(q);
1841 5 : }
1842 :
1843 0 : int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1844 : {
1845 0 : int sig = q->info.si_signo;
1846 0 : struct sigpending *pending;
1847 0 : struct task_struct *t;
1848 0 : unsigned long flags;
1849 0 : int ret, result;
1850 :
1851 0 : BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1852 :
1853 0 : ret = -1;
1854 0 : rcu_read_lock();
1855 0 : t = pid_task(pid, type);
1856 0 : if (!t || !likely(lock_task_sighand(t, &flags)))
1857 0 : goto ret;
1858 :
1859 0 : ret = 1; /* the signal is ignored */
1860 0 : result = TRACE_SIGNAL_IGNORED;
1861 0 : if (!prepare_signal(sig, t, false))
1862 0 : goto out;
1863 :
1864 0 : ret = 0;
1865 0 : if (unlikely(!list_empty(&q->list))) {
1866 : /*
1867 : * If an SI_TIMER entry is already queue just increment
1868 : * the overrun count.
1869 : */
1870 0 : BUG_ON(q->info.si_code != SI_TIMER);
1871 0 : q->info.si_overrun++;
1872 0 : result = TRACE_SIGNAL_ALREADY_PENDING;
1873 0 : goto out;
1874 : }
1875 0 : q->info.si_overrun = 0;
1876 :
1877 0 : signalfd_notify(t, sig);
1878 0 : pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1879 0 : list_add_tail(&q->list, &pending->list);
1880 0 : sigaddset(&pending->signal, sig);
1881 0 : complete_signal(sig, t, type);
1882 0 : result = TRACE_SIGNAL_DELIVERED;
1883 0 : out:
1884 0 : trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1885 0 : unlock_task_sighand(t, &flags);
1886 0 : ret:
1887 0 : rcu_read_unlock();
1888 0 : return ret;
1889 : }
1890 :
1891 856 : static void do_notify_pidfd(struct task_struct *task)
1892 : {
1893 856 : struct pid *pid;
1894 :
1895 856 : WARN_ON(task->exit_state == 0);
1896 856 : pid = task_pid(task);
1897 856 : wake_up_all(&pid->wait_pidfd);
1898 856 : }
1899 :
1900 : /*
1901 : * Let a parent know about the death of a child.
1902 : * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1903 : *
1904 : * Returns true if our parent ignored us and so we've switched to
1905 : * self-reaping.
1906 : */
1907 856 : bool do_notify_parent(struct task_struct *tsk, int sig)
1908 : {
1909 856 : struct kernel_siginfo info;
1910 856 : unsigned long flags;
1911 856 : struct sighand_struct *psig;
1912 856 : bool autoreap = false;
1913 856 : u64 utime, stime;
1914 :
1915 856 : BUG_ON(sig == -1);
1916 :
1917 : /* do_notify_parent_cldstop should have been called instead. */
1918 856 : BUG_ON(task_is_stopped_or_traced(tsk));
1919 :
1920 856 : BUG_ON(!tsk->ptrace &&
1921 : (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1922 :
1923 : /* Wake up all pidfd waiters */
1924 856 : do_notify_pidfd(tsk);
1925 :
1926 856 : if (sig != SIGCHLD) {
1927 : /*
1928 : * This is only possible if parent == real_parent.
1929 : * Check if it has changed security domain.
1930 : */
1931 0 : if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1932 0 : sig = SIGCHLD;
1933 : }
1934 :
1935 856 : clear_siginfo(&info);
1936 856 : info.si_signo = sig;
1937 856 : info.si_errno = 0;
1938 : /*
1939 : * We are under tasklist_lock here so our parent is tied to
1940 : * us and cannot change.
1941 : *
1942 : * task_active_pid_ns will always return the same pid namespace
1943 : * until a task passes through release_task.
1944 : *
1945 : * write_lock() currently calls preempt_disable() which is the
1946 : * same as rcu_read_lock(), but according to Oleg, this is not
1947 : * correct to rely on this
1948 : */
1949 856 : rcu_read_lock();
1950 856 : info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1951 856 : info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1952 1712 : task_uid(tsk));
1953 856 : rcu_read_unlock();
1954 :
1955 856 : task_cputime(tsk, &utime, &stime);
1956 856 : info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1957 856 : info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1958 :
1959 856 : info.si_status = tsk->exit_code & 0x7f;
1960 856 : if (tsk->exit_code & 0x80)
1961 0 : info.si_code = CLD_DUMPED;
1962 856 : else if (tsk->exit_code & 0x7f)
1963 0 : info.si_code = CLD_KILLED;
1964 : else {
1965 856 : info.si_code = CLD_EXITED;
1966 856 : info.si_status = tsk->exit_code >> 8;
1967 : }
1968 :
1969 856 : psig = tsk->parent->sighand;
1970 856 : spin_lock_irqsave(&psig->siglock, flags);
1971 856 : if (!tsk->ptrace && sig == SIGCHLD &&
1972 856 : (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1973 856 : (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1974 : /*
1975 : * We are exiting and our parent doesn't care. POSIX.1
1976 : * defines special semantics for setting SIGCHLD to SIG_IGN
1977 : * or setting the SA_NOCLDWAIT flag: we should be reaped
1978 : * automatically and not left for our parent's wait4 call.
1979 : * Rather than having the parent do it as a magic kind of
1980 : * signal handler, we just set this to tell do_exit that we
1981 : * can be cleaned up without becoming a zombie. Note that
1982 : * we still call __wake_up_parent in this case, because a
1983 : * blocked sys_wait4 might now return -ECHILD.
1984 : *
1985 : * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1986 : * is implementation-defined: we do (if you don't want
1987 : * it, just use SIG_IGN instead).
1988 : */
1989 0 : autoreap = true;
1990 0 : if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1991 0 : sig = 0;
1992 : }
1993 : /*
1994 : * Send with __send_signal as si_pid and si_uid are in the
1995 : * parent's namespaces.
1996 : */
1997 856 : if (valid_signal(sig) && sig)
1998 856 : __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1999 856 : __wake_up_parent(tsk, tsk->parent);
2000 856 : spin_unlock_irqrestore(&psig->siglock, flags);
2001 :
2002 856 : return autoreap;
2003 : }
2004 :
2005 : /**
2006 : * do_notify_parent_cldstop - notify parent of stopped/continued state change
2007 : * @tsk: task reporting the state change
2008 : * @for_ptracer: the notification is for ptracer
2009 : * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2010 : *
2011 : * Notify @tsk's parent that the stopped/continued state has changed. If
2012 : * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2013 : * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2014 : *
2015 : * CONTEXT:
2016 : * Must be called with tasklist_lock at least read locked.
2017 : */
2018 12 : static void do_notify_parent_cldstop(struct task_struct *tsk,
2019 : bool for_ptracer, int why)
2020 : {
2021 12 : struct kernel_siginfo info;
2022 12 : unsigned long flags;
2023 12 : struct task_struct *parent;
2024 12 : struct sighand_struct *sighand;
2025 12 : u64 utime, stime;
2026 :
2027 12 : if (for_ptracer) {
2028 12 : parent = tsk->parent;
2029 : } else {
2030 0 : tsk = tsk->group_leader;
2031 0 : parent = tsk->real_parent;
2032 : }
2033 :
2034 12 : clear_siginfo(&info);
2035 12 : info.si_signo = SIGCHLD;
2036 12 : info.si_errno = 0;
2037 : /*
2038 : * see comment in do_notify_parent() about the following 4 lines
2039 : */
2040 12 : rcu_read_lock();
2041 12 : info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2042 36 : info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2043 12 : rcu_read_unlock();
2044 :
2045 12 : task_cputime(tsk, &utime, &stime);
2046 12 : info.si_utime = nsec_to_clock_t(utime);
2047 12 : info.si_stime = nsec_to_clock_t(stime);
2048 :
2049 12 : info.si_code = why;
2050 12 : switch (why) {
2051 0 : case CLD_CONTINUED:
2052 0 : info.si_status = SIGCONT;
2053 0 : break;
2054 0 : case CLD_STOPPED:
2055 0 : info.si_status = tsk->signal->group_exit_code & 0x7f;
2056 0 : break;
2057 12 : case CLD_TRAPPED:
2058 12 : info.si_status = tsk->exit_code & 0x7f;
2059 12 : break;
2060 0 : default:
2061 0 : BUG();
2062 : }
2063 :
2064 12 : sighand = parent->sighand;
2065 12 : spin_lock_irqsave(&sighand->siglock, flags);
2066 12 : if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2067 12 : !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2068 12 : __group_send_sig_info(SIGCHLD, &info, parent);
2069 : /*
2070 : * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2071 : */
2072 12 : __wake_up_parent(tsk, parent);
2073 12 : spin_unlock_irqrestore(&sighand->siglock, flags);
2074 12 : }
2075 :
2076 12 : static inline bool may_ptrace_stop(void)
2077 : {
2078 12 : if (!likely(current->ptrace))
2079 : return false;
2080 : /*
2081 : * Are we in the middle of do_coredump?
2082 : * If so and our tracer is also part of the coredump stopping
2083 : * is a deadlock situation, and pointless because our tracer
2084 : * is dead so don't allow us to stop.
2085 : * If SIGKILL was already sent before the caller unlocked
2086 : * ->siglock we must see ->core_state != NULL. Otherwise it
2087 : * is safe to enter schedule().
2088 : *
2089 : * This is almost outdated, a task with the pending SIGKILL can't
2090 : * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2091 : * after SIGKILL was already dequeued.
2092 : */
2093 12 : if (unlikely(current->mm->core_state) &&
2094 0 : unlikely(current->mm == current->parent->mm))
2095 0 : return false;
2096 :
2097 : return true;
2098 : }
2099 :
2100 : /*
2101 : * Return non-zero if there is a SIGKILL that should be waking us up.
2102 : * Called with the siglock held.
2103 : */
2104 : static bool sigkill_pending(struct task_struct *tsk)
2105 : {
2106 : return sigismember(&tsk->pending.signal, SIGKILL) ||
2107 : sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2108 : }
2109 :
2110 : /*
2111 : * This must be called with current->sighand->siglock held.
2112 : *
2113 : * This should be the path for all ptrace stops.
2114 : * We always set current->last_siginfo while stopped here.
2115 : * That makes it a way to test a stopped process for
2116 : * being ptrace-stopped vs being job-control-stopped.
2117 : *
2118 : * If we actually decide not to stop at all because the tracer
2119 : * is gone, we keep current->exit_code unless clear_code.
2120 : */
2121 12 : static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2122 : __releases(¤t->sighand->siglock)
2123 : __acquires(¤t->sighand->siglock)
2124 : {
2125 12 : bool gstop_done = false;
2126 :
2127 12 : if (arch_ptrace_stop_needed(exit_code, info)) {
2128 : /*
2129 : * The arch code has something special to do before a
2130 : * ptrace stop. This is allowed to block, e.g. for faults
2131 : * on user stack pages. We can't keep the siglock while
2132 : * calling arch_ptrace_stop, so we must release it now.
2133 : * To preserve proper semantics, we must do this before
2134 : * any signal bookkeeping like checking group_stop_count.
2135 : * Meanwhile, a SIGKILL could come in before we retake the
2136 : * siglock. That must prevent us from sleeping in TASK_TRACED.
2137 : * So after regaining the lock, we must check for SIGKILL.
2138 : */
2139 : spin_unlock_irq(¤t->sighand->siglock);
2140 : arch_ptrace_stop(exit_code, info);
2141 : spin_lock_irq(¤t->sighand->siglock);
2142 : if (sigkill_pending(current))
2143 : return;
2144 : }
2145 :
2146 12 : set_special_state(TASK_TRACED);
2147 :
2148 : /*
2149 : * We're committing to trapping. TRACED should be visible before
2150 : * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2151 : * Also, transition to TRACED and updates to ->jobctl should be
2152 : * atomic with respect to siglock and should be done after the arch
2153 : * hook as siglock is released and regrabbed across it.
2154 : *
2155 : * TRACER TRACEE
2156 : *
2157 : * ptrace_attach()
2158 : * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2159 : * do_wait()
2160 : * set_current_state() smp_wmb();
2161 : * ptrace_do_wait()
2162 : * wait_task_stopped()
2163 : * task_stopped_code()
2164 : * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2165 : */
2166 12 : smp_wmb();
2167 :
2168 12 : current->last_siginfo = info;
2169 12 : current->exit_code = exit_code;
2170 :
2171 : /*
2172 : * If @why is CLD_STOPPED, we're trapping to participate in a group
2173 : * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2174 : * across siglock relocks since INTERRUPT was scheduled, PENDING
2175 : * could be clear now. We act as if SIGCONT is received after
2176 : * TASK_TRACED is entered - ignore it.
2177 : */
2178 12 : if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2179 0 : gstop_done = task_participate_group_stop(current);
2180 :
2181 : /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2182 12 : task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2183 12 : if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2184 0 : task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2185 :
2186 : /* entering a trap, clear TRAPPING */
2187 12 : task_clear_jobctl_trapping(current);
2188 :
2189 12 : spin_unlock_irq(¤t->sighand->siglock);
2190 12 : read_lock(&tasklist_lock);
2191 12 : if (may_ptrace_stop()) {
2192 : /*
2193 : * Notify parents of the stop.
2194 : *
2195 : * While ptraced, there are two parents - the ptracer and
2196 : * the real_parent of the group_leader. The ptracer should
2197 : * know about every stop while the real parent is only
2198 : * interested in the completion of group stop. The states
2199 : * for the two don't interact with each other. Notify
2200 : * separately unless they're gonna be duplicates.
2201 : */
2202 12 : do_notify_parent_cldstop(current, true, why);
2203 12 : if (gstop_done && ptrace_reparented(current))
2204 0 : do_notify_parent_cldstop(current, false, why);
2205 :
2206 : /*
2207 : * Don't want to allow preemption here, because
2208 : * sys_ptrace() needs this task to be inactive.
2209 : *
2210 : * XXX: implement read_unlock_no_resched().
2211 : */
2212 12 : preempt_disable();
2213 12 : read_unlock(&tasklist_lock);
2214 12 : cgroup_enter_frozen();
2215 12 : preempt_enable_no_resched();
2216 12 : freezable_schedule();
2217 12 : cgroup_leave_frozen(true);
2218 : } else {
2219 : /*
2220 : * By the time we got the lock, our tracer went away.
2221 : * Don't drop the lock yet, another tracer may come.
2222 : *
2223 : * If @gstop_done, the ptracer went away between group stop
2224 : * completion and here. During detach, it would have set
2225 : * JOBCTL_STOP_PENDING on us and we'll re-enter
2226 : * TASK_STOPPED in do_signal_stop() on return, so notifying
2227 : * the real parent of the group stop completion is enough.
2228 : */
2229 0 : if (gstop_done)
2230 0 : do_notify_parent_cldstop(current, false, why);
2231 :
2232 : /* tasklist protects us from ptrace_freeze_traced() */
2233 0 : __set_current_state(TASK_RUNNING);
2234 0 : if (clear_code)
2235 0 : current->exit_code = 0;
2236 0 : read_unlock(&tasklist_lock);
2237 : }
2238 :
2239 : /*
2240 : * We are back. Now reacquire the siglock before touching
2241 : * last_siginfo, so that we are sure to have synchronized with
2242 : * any signal-sending on another CPU that wants to examine it.
2243 : */
2244 12 : spin_lock_irq(¤t->sighand->siglock);
2245 12 : current->last_siginfo = NULL;
2246 :
2247 : /* LISTENING can be set only during STOP traps, clear it */
2248 12 : current->jobctl &= ~JOBCTL_LISTENING;
2249 :
2250 : /*
2251 : * Queued signals ignored us while we were stopped for tracing.
2252 : * So check for any that we should take before resuming user mode.
2253 : * This sets TIF_SIGPENDING, but never clears it.
2254 : */
2255 12 : recalc_sigpending_tsk(current);
2256 : }
2257 :
2258 0 : static void ptrace_do_notify(int signr, int exit_code, int why)
2259 : {
2260 0 : kernel_siginfo_t info;
2261 :
2262 0 : clear_siginfo(&info);
2263 0 : info.si_signo = signr;
2264 0 : info.si_code = exit_code;
2265 0 : info.si_pid = task_pid_vnr(current);
2266 0 : info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2267 :
2268 : /* Let the debugger run. */
2269 0 : ptrace_stop(exit_code, why, 1, &info);
2270 0 : }
2271 :
2272 0 : void ptrace_notify(int exit_code)
2273 : {
2274 0 : BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2275 0 : if (unlikely(current->task_works))
2276 0 : task_work_run();
2277 :
2278 0 : spin_lock_irq(¤t->sighand->siglock);
2279 0 : ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2280 0 : spin_unlock_irq(¤t->sighand->siglock);
2281 0 : }
2282 :
2283 : /**
2284 : * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2285 : * @signr: signr causing group stop if initiating
2286 : *
2287 : * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2288 : * and participate in it. If already set, participate in the existing
2289 : * group stop. If participated in a group stop (and thus slept), %true is
2290 : * returned with siglock released.
2291 : *
2292 : * If ptraced, this function doesn't handle stop itself. Instead,
2293 : * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2294 : * untouched. The caller must ensure that INTERRUPT trap handling takes
2295 : * places afterwards.
2296 : *
2297 : * CONTEXT:
2298 : * Must be called with @current->sighand->siglock held, which is released
2299 : * on %true return.
2300 : *
2301 : * RETURNS:
2302 : * %false if group stop is already cancelled or ptrace trap is scheduled.
2303 : * %true if participated in group stop.
2304 : */
2305 0 : static bool do_signal_stop(int signr)
2306 : __releases(¤t->sighand->siglock)
2307 : {
2308 0 : struct signal_struct *sig = current->signal;
2309 :
2310 0 : if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2311 0 : unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2312 0 : struct task_struct *t;
2313 :
2314 : /* signr will be recorded in task->jobctl for retries */
2315 0 : WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2316 :
2317 0 : if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2318 0 : unlikely(signal_group_exit(sig)))
2319 : return false;
2320 : /*
2321 : * There is no group stop already in progress. We must
2322 : * initiate one now.
2323 : *
2324 : * While ptraced, a task may be resumed while group stop is
2325 : * still in effect and then receive a stop signal and
2326 : * initiate another group stop. This deviates from the
2327 : * usual behavior as two consecutive stop signals can't
2328 : * cause two group stops when !ptraced. That is why we
2329 : * also check !task_is_stopped(t) below.
2330 : *
2331 : * The condition can be distinguished by testing whether
2332 : * SIGNAL_STOP_STOPPED is already set. Don't generate
2333 : * group_exit_code in such case.
2334 : *
2335 : * This is not necessary for SIGNAL_STOP_CONTINUED because
2336 : * an intervening stop signal is required to cause two
2337 : * continued events regardless of ptrace.
2338 : */
2339 0 : if (!(sig->flags & SIGNAL_STOP_STOPPED))
2340 0 : sig->group_exit_code = signr;
2341 :
2342 0 : sig->group_stop_count = 0;
2343 :
2344 0 : if (task_set_jobctl_pending(current, signr | gstop))
2345 0 : sig->group_stop_count++;
2346 :
2347 0 : t = current;
2348 0 : while_each_thread(current, t) {
2349 : /*
2350 : * Setting state to TASK_STOPPED for a group
2351 : * stop is always done with the siglock held,
2352 : * so this check has no races.
2353 : */
2354 0 : if (!task_is_stopped(t) &&
2355 0 : task_set_jobctl_pending(t, signr | gstop)) {
2356 0 : sig->group_stop_count++;
2357 0 : if (likely(!(t->ptrace & PT_SEIZED)))
2358 0 : signal_wake_up(t, 0);
2359 : else
2360 0 : ptrace_trap_notify(t);
2361 : }
2362 : }
2363 : }
2364 :
2365 0 : if (likely(!current->ptrace)) {
2366 0 : int notify = 0;
2367 :
2368 : /*
2369 : * If there are no other threads in the group, or if there
2370 : * is a group stop in progress and we are the last to stop,
2371 : * report to the parent.
2372 : */
2373 0 : if (task_participate_group_stop(current))
2374 0 : notify = CLD_STOPPED;
2375 :
2376 0 : set_special_state(TASK_STOPPED);
2377 0 : spin_unlock_irq(¤t->sighand->siglock);
2378 :
2379 : /*
2380 : * Notify the parent of the group stop completion. Because
2381 : * we're not holding either the siglock or tasklist_lock
2382 : * here, ptracer may attach inbetween; however, this is for
2383 : * group stop and should always be delivered to the real
2384 : * parent of the group leader. The new ptracer will get
2385 : * its notification when this task transitions into
2386 : * TASK_TRACED.
2387 : */
2388 0 : if (notify) {
2389 0 : read_lock(&tasklist_lock);
2390 0 : do_notify_parent_cldstop(current, false, notify);
2391 0 : read_unlock(&tasklist_lock);
2392 : }
2393 :
2394 : /* Now we don't run again until woken by SIGCONT or SIGKILL */
2395 0 : cgroup_enter_frozen();
2396 0 : freezable_schedule();
2397 0 : return true;
2398 : } else {
2399 : /*
2400 : * While ptraced, group stop is handled by STOP trap.
2401 : * Schedule it and let the caller deal with it.
2402 : */
2403 0 : task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2404 0 : return false;
2405 : }
2406 : }
2407 :
2408 : /**
2409 : * do_jobctl_trap - take care of ptrace jobctl traps
2410 : *
2411 : * When PT_SEIZED, it's used for both group stop and explicit
2412 : * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2413 : * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2414 : * the stop signal; otherwise, %SIGTRAP.
2415 : *
2416 : * When !PT_SEIZED, it's used only for group stop trap with stop signal
2417 : * number as exit_code and no siginfo.
2418 : *
2419 : * CONTEXT:
2420 : * Must be called with @current->sighand->siglock held, which may be
2421 : * released and re-acquired before returning with intervening sleep.
2422 : */
2423 0 : static void do_jobctl_trap(void)
2424 : {
2425 0 : struct signal_struct *signal = current->signal;
2426 0 : int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2427 :
2428 0 : if (current->ptrace & PT_SEIZED) {
2429 0 : if (!signal->group_stop_count &&
2430 : !(signal->flags & SIGNAL_STOP_STOPPED))
2431 0 : signr = SIGTRAP;
2432 0 : WARN_ON_ONCE(!signr);
2433 0 : ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2434 : CLD_STOPPED);
2435 : } else {
2436 0 : WARN_ON_ONCE(!signr);
2437 0 : ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2438 0 : current->exit_code = 0;
2439 : }
2440 0 : }
2441 :
2442 : /**
2443 : * do_freezer_trap - handle the freezer jobctl trap
2444 : *
2445 : * Puts the task into frozen state, if only the task is not about to quit.
2446 : * In this case it drops JOBCTL_TRAP_FREEZE.
2447 : *
2448 : * CONTEXT:
2449 : * Must be called with @current->sighand->siglock held,
2450 : * which is always released before returning.
2451 : */
2452 0 : static void do_freezer_trap(void)
2453 : __releases(¤t->sighand->siglock)
2454 : {
2455 : /*
2456 : * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2457 : * let's make another loop to give it a chance to be handled.
2458 : * In any case, we'll return back.
2459 : */
2460 0 : if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2461 : JOBCTL_TRAP_FREEZE) {
2462 0 : spin_unlock_irq(¤t->sighand->siglock);
2463 0 : return;
2464 : }
2465 :
2466 : /*
2467 : * Now we're sure that there is no pending fatal signal and no
2468 : * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2469 : * immediately (if there is a non-fatal signal pending), and
2470 : * put the task into sleep.
2471 : */
2472 0 : __set_current_state(TASK_INTERRUPTIBLE);
2473 0 : clear_thread_flag(TIF_SIGPENDING);
2474 0 : spin_unlock_irq(¤t->sighand->siglock);
2475 0 : cgroup_enter_frozen();
2476 0 : freezable_schedule();
2477 : }
2478 :
2479 12 : static int ptrace_signal(int signr, kernel_siginfo_t *info)
2480 : {
2481 : /*
2482 : * We do not check sig_kernel_stop(signr) but set this marker
2483 : * unconditionally because we do not know whether debugger will
2484 : * change signr. This flag has no meaning unless we are going
2485 : * to stop after return from ptrace_stop(). In this case it will
2486 : * be checked in do_signal_stop(), we should only stop if it was
2487 : * not cleared by SIGCONT while we were sleeping. See also the
2488 : * comment in dequeue_signal().
2489 : */
2490 12 : current->jobctl |= JOBCTL_STOP_DEQUEUED;
2491 12 : ptrace_stop(signr, CLD_TRAPPED, 0, info);
2492 :
2493 : /* We're back. Did the debugger cancel the sig? */
2494 12 : signr = current->exit_code;
2495 12 : if (signr == 0)
2496 : return signr;
2497 :
2498 0 : current->exit_code = 0;
2499 :
2500 : /*
2501 : * Update the siginfo structure if the signal has
2502 : * changed. If the debugger wanted something
2503 : * specific in the siginfo structure then it should
2504 : * have updated *info via PTRACE_SETSIGINFO.
2505 : */
2506 0 : if (signr != info->si_signo) {
2507 0 : clear_siginfo(info);
2508 0 : info->si_signo = signr;
2509 0 : info->si_errno = 0;
2510 0 : info->si_code = SI_USER;
2511 0 : rcu_read_lock();
2512 0 : info->si_pid = task_pid_vnr(current->parent);
2513 0 : info->si_uid = from_kuid_munged(current_user_ns(),
2514 0 : task_uid(current->parent));
2515 0 : rcu_read_unlock();
2516 : }
2517 :
2518 : /* If the (new) signal is now blocked, requeue it. */
2519 0 : if (sigismember(¤t->blocked, signr)) {
2520 0 : send_signal(signr, info, current, PIDTYPE_PID);
2521 0 : signr = 0;
2522 : }
2523 :
2524 : return signr;
2525 : }
2526 :
2527 : static void hide_si_addr_tag_bits(struct ksignal *ksig)
2528 : {
2529 : switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2530 : case SIL_FAULT:
2531 : case SIL_FAULT_MCEERR:
2532 : case SIL_FAULT_BNDERR:
2533 : case SIL_FAULT_PKUERR:
2534 : ksig->info.si_addr = arch_untagged_si_addr(
2535 : ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2536 : break;
2537 : case SIL_KILL:
2538 : case SIL_TIMER:
2539 : case SIL_POLL:
2540 : case SIL_CHLD:
2541 : case SIL_RT:
2542 : case SIL_SYS:
2543 : break;
2544 : }
2545 : }
2546 :
2547 480 : bool get_signal(struct ksignal *ksig)
2548 : {
2549 480 : struct sighand_struct *sighand = current->sighand;
2550 480 : struct signal_struct *signal = current->signal;
2551 480 : int signr;
2552 :
2553 480 : if (unlikely(current->task_works))
2554 3 : task_work_run();
2555 :
2556 : /*
2557 : * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2558 : * that the arch handlers don't all have to do it. If we get here
2559 : * without TIF_SIGPENDING, just exit after running signal work.
2560 : */
2561 : if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2562 : if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2563 : tracehook_notify_signal();
2564 : if (!task_sigpending(current))
2565 : return false;
2566 : }
2567 :
2568 : if (unlikely(uprobe_deny_signal()))
2569 : return false;
2570 :
2571 : /*
2572 : * Do this once, we can't return to user-mode if freezing() == T.
2573 : * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2574 : * thus do not need another check after return.
2575 : */
2576 : try_to_freeze();
2577 :
2578 : relock:
2579 480 : spin_lock_irq(&sighand->siglock);
2580 :
2581 : /*
2582 : * Every stopped thread goes here after wakeup. Check to see if
2583 : * we should notify the parent, prepare_signal(SIGCONT) encodes
2584 : * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2585 : */
2586 480 : if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2587 0 : int why;
2588 :
2589 0 : if (signal->flags & SIGNAL_CLD_CONTINUED)
2590 : why = CLD_CONTINUED;
2591 : else
2592 0 : why = CLD_STOPPED;
2593 :
2594 0 : signal->flags &= ~SIGNAL_CLD_MASK;
2595 :
2596 0 : spin_unlock_irq(&sighand->siglock);
2597 :
2598 : /*
2599 : * Notify the parent that we're continuing. This event is
2600 : * always per-process and doesn't make whole lot of sense
2601 : * for ptracers, who shouldn't consume the state via
2602 : * wait(2) either, but, for backward compatibility, notify
2603 : * the ptracer of the group leader too unless it's gonna be
2604 : * a duplicate.
2605 : */
2606 0 : read_lock(&tasklist_lock);
2607 0 : do_notify_parent_cldstop(current, false, why);
2608 :
2609 0 : if (ptrace_reparented(current->group_leader))
2610 0 : do_notify_parent_cldstop(current->group_leader,
2611 : true, why);
2612 0 : read_unlock(&tasklist_lock);
2613 :
2614 0 : goto relock;
2615 : }
2616 :
2617 : /* Has this task already been marked for death? */
2618 480 : if (signal_group_exit(signal)) {
2619 0 : ksig->info.si_signo = signr = SIGKILL;
2620 0 : sigdelset(¤t->pending.signal, SIGKILL);
2621 0 : trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2622 : &sighand->action[SIGKILL - 1]);
2623 0 : recalc_sigpending();
2624 0 : goto fatal;
2625 : }
2626 :
2627 499 : for (;;) {
2628 499 : struct k_sigaction *ka;
2629 :
2630 499 : if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2631 0 : do_signal_stop(0))
2632 0 : goto relock;
2633 :
2634 499 : if (unlikely(current->jobctl &
2635 : (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2636 0 : if (current->jobctl & JOBCTL_TRAP_MASK) {
2637 0 : do_jobctl_trap();
2638 0 : spin_unlock_irq(&sighand->siglock);
2639 0 : } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2640 0 : do_freezer_trap();
2641 :
2642 0 : goto relock;
2643 : }
2644 :
2645 : /*
2646 : * If the task is leaving the frozen state, let's update
2647 : * cgroup counters and reset the frozen bit.
2648 : */
2649 499 : if (unlikely(cgroup_task_frozen(current))) {
2650 0 : spin_unlock_irq(&sighand->siglock);
2651 0 : cgroup_leave_frozen(false);
2652 0 : goto relock;
2653 : }
2654 :
2655 : /*
2656 : * Signals generated by the execution of an instruction
2657 : * need to be delivered before any other pending signals
2658 : * so that the instruction pointer in the signal stack
2659 : * frame points to the faulting instruction.
2660 : */
2661 499 : signr = dequeue_synchronous_signal(&ksig->info);
2662 499 : if (!signr)
2663 499 : signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2664 :
2665 499 : if (!signr)
2666 : break; /* will return 0 */
2667 :
2668 480 : if (unlikely(current->ptrace) && signr != SIGKILL) {
2669 12 : signr = ptrace_signal(signr, &ksig->info);
2670 12 : if (!signr)
2671 12 : continue;
2672 : }
2673 :
2674 468 : ka = &sighand->action[signr-1];
2675 :
2676 : /* Trace actually delivered signals. */
2677 468 : trace_signal_deliver(signr, &ksig->info, ka);
2678 :
2679 468 : if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2680 0 : continue;
2681 468 : if (ka->sa.sa_handler != SIG_DFL) {
2682 : /* Run the handler. */
2683 461 : ksig->ka = *ka;
2684 :
2685 461 : if (ka->sa.sa_flags & SA_ONESHOT)
2686 0 : ka->sa.sa_handler = SIG_DFL;
2687 :
2688 : break; /* will return non-zero "signr" value */
2689 : }
2690 :
2691 : /*
2692 : * Now we are doing the default action for this signal.
2693 : */
2694 7 : if (sig_kernel_ignore(signr)) /* Default is nothing. */
2695 7 : continue;
2696 :
2697 : /*
2698 : * Global init gets no signals it doesn't want.
2699 : * Container-init gets no signals it doesn't want from same
2700 : * container.
2701 : *
2702 : * Note that if global/container-init sees a sig_kernel_only()
2703 : * signal here, the signal must have been generated internally
2704 : * or must have come from an ancestor namespace. In either
2705 : * case, the signal cannot be dropped.
2706 : */
2707 0 : if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2708 0 : !sig_kernel_only(signr))
2709 0 : continue;
2710 :
2711 0 : if (sig_kernel_stop(signr)) {
2712 : /*
2713 : * The default action is to stop all threads in
2714 : * the thread group. The job control signals
2715 : * do nothing in an orphaned pgrp, but SIGSTOP
2716 : * always works. Note that siglock needs to be
2717 : * dropped during the call to is_orphaned_pgrp()
2718 : * because of lock ordering with tasklist_lock.
2719 : * This allows an intervening SIGCONT to be posted.
2720 : * We need to check for that and bail out if necessary.
2721 : */
2722 0 : if (signr != SIGSTOP) {
2723 0 : spin_unlock_irq(&sighand->siglock);
2724 :
2725 : /* signals can be posted during this window */
2726 :
2727 0 : if (is_current_pgrp_orphaned())
2728 0 : goto relock;
2729 :
2730 0 : spin_lock_irq(&sighand->siglock);
2731 : }
2732 :
2733 0 : if (likely(do_signal_stop(ksig->info.si_signo))) {
2734 : /* It released the siglock. */
2735 0 : goto relock;
2736 : }
2737 :
2738 : /*
2739 : * We didn't actually stop, due to a race
2740 : * with SIGCONT or something like that.
2741 : */
2742 0 : continue;
2743 : }
2744 :
2745 0 : fatal:
2746 0 : spin_unlock_irq(&sighand->siglock);
2747 0 : if (unlikely(cgroup_task_frozen(current)))
2748 0 : cgroup_leave_frozen(true);
2749 :
2750 : /*
2751 : * Anything else is fatal, maybe with a core dump.
2752 : */
2753 0 : current->flags |= PF_SIGNALED;
2754 :
2755 0 : if (sig_kernel_coredump(signr)) {
2756 0 : if (print_fatal_signals)
2757 0 : print_fatal_signal(ksig->info.si_signo);
2758 0 : proc_coredump_connector(current);
2759 : /*
2760 : * If it was able to dump core, this kills all
2761 : * other threads in the group and synchronizes with
2762 : * their demise. If we lost the race with another
2763 : * thread getting here, it set group_exit_code
2764 : * first and our do_group_exit call below will use
2765 : * that value and ignore the one we pass it.
2766 : */
2767 0 : do_coredump(&ksig->info);
2768 : }
2769 :
2770 : /*
2771 : * Death signals, no core dump.
2772 : */
2773 0 : do_group_exit(ksig->info.si_signo);
2774 : /* NOTREACHED */
2775 : }
2776 480 : spin_unlock_irq(&sighand->siglock);
2777 :
2778 480 : ksig->sig = signr;
2779 :
2780 480 : if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2781 480 : hide_si_addr_tag_bits(ksig);
2782 :
2783 480 : return ksig->sig > 0;
2784 : }
2785 :
2786 : /**
2787 : * signal_delivered -
2788 : * @ksig: kernel signal struct
2789 : * @stepping: nonzero if debugger single-step or block-step in use
2790 : *
2791 : * This function should be called when a signal has successfully been
2792 : * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2793 : * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2794 : * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2795 : */
2796 461 : static void signal_delivered(struct ksignal *ksig, int stepping)
2797 : {
2798 461 : sigset_t blocked;
2799 :
2800 : /* A signal was successfully delivered, and the
2801 : saved sigmask was stored on the signal frame,
2802 : and will be restored by sigreturn. So we can
2803 : simply clear the restore sigmask flag. */
2804 461 : clear_restore_sigmask();
2805 :
2806 461 : sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2807 461 : if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2808 461 : sigaddset(&blocked, ksig->sig);
2809 922 : set_current_blocked(&blocked);
2810 461 : tracehook_signal_handler(stepping);
2811 461 : }
2812 :
2813 461 : void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2814 : {
2815 461 : if (failed)
2816 0 : force_sigsegv(ksig->sig);
2817 : else
2818 461 : signal_delivered(ksig, stepping);
2819 461 : }
2820 :
2821 : /*
2822 : * It could be that complete_signal() picked us to notify about the
2823 : * group-wide signal. Other threads should be notified now to take
2824 : * the shared signals in @which since we will not.
2825 : */
2826 0 : static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2827 : {
2828 0 : sigset_t retarget;
2829 0 : struct task_struct *t;
2830 :
2831 0 : sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2832 0 : if (sigisemptyset(&retarget))
2833 0 : return;
2834 :
2835 : t = tsk;
2836 0 : while_each_thread(tsk, t) {
2837 0 : if (t->flags & PF_EXITING)
2838 0 : continue;
2839 :
2840 0 : if (!has_pending_signals(&retarget, &t->blocked))
2841 0 : continue;
2842 : /* Remove the signals this thread can handle. */
2843 0 : sigandsets(&retarget, &retarget, &t->blocked);
2844 :
2845 0 : if (!task_sigpending(t))
2846 0 : signal_wake_up(t, 0);
2847 :
2848 0 : if (sigisemptyset(&retarget))
2849 : break;
2850 : }
2851 : }
2852 :
2853 858 : void exit_signals(struct task_struct *tsk)
2854 : {
2855 858 : int group_stop = 0;
2856 858 : sigset_t unblocked;
2857 :
2858 : /*
2859 : * @tsk is about to have PF_EXITING set - lock out users which
2860 : * expect stable threadgroup.
2861 : */
2862 858 : cgroup_threadgroup_change_begin(tsk);
2863 :
2864 858 : if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2865 856 : tsk->flags |= PF_EXITING;
2866 856 : cgroup_threadgroup_change_end(tsk);
2867 856 : return;
2868 : }
2869 :
2870 2 : spin_lock_irq(&tsk->sighand->siglock);
2871 : /*
2872 : * From now this task is not visible for group-wide signals,
2873 : * see wants_signal(), do_signal_stop().
2874 : */
2875 2 : tsk->flags |= PF_EXITING;
2876 :
2877 2 : cgroup_threadgroup_change_end(tsk);
2878 :
2879 2 : if (!task_sigpending(tsk))
2880 2 : goto out;
2881 :
2882 0 : unblocked = tsk->blocked;
2883 0 : signotset(&unblocked);
2884 0 : retarget_shared_pending(tsk, &unblocked);
2885 :
2886 0 : if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2887 0 : task_participate_group_stop(tsk))
2888 0 : group_stop = CLD_STOPPED;
2889 0 : out:
2890 2 : spin_unlock_irq(&tsk->sighand->siglock);
2891 :
2892 : /*
2893 : * If group stop has completed, deliver the notification. This
2894 : * should always go to the real parent of the group leader.
2895 : */
2896 2 : if (unlikely(group_stop)) {
2897 0 : read_lock(&tasklist_lock);
2898 0 : do_notify_parent_cldstop(tsk, false, group_stop);
2899 0 : read_unlock(&tasklist_lock);
2900 : }
2901 : }
2902 :
2903 : /*
2904 : * System call entry points.
2905 : */
2906 :
2907 : /**
2908 : * sys_restart_syscall - restart a system call
2909 : */
2910 0 : SYSCALL_DEFINE0(restart_syscall)
2911 : {
2912 0 : struct restart_block *restart = ¤t->restart_block;
2913 0 : return restart->fn(restart);
2914 : }
2915 :
2916 0 : long do_no_restart_syscall(struct restart_block *param)
2917 : {
2918 0 : return -EINTR;
2919 : }
2920 :
2921 2386 : static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2922 : {
2923 2386 : if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2924 0 : sigset_t newblocked;
2925 : /* A set of now blocked but previously unblocked signals. */
2926 0 : sigandnsets(&newblocked, newset, ¤t->blocked);
2927 0 : retarget_shared_pending(tsk, &newblocked);
2928 : }
2929 2386 : tsk->blocked = *newset;
2930 2386 : recalc_sigpending();
2931 2386 : }
2932 :
2933 : /**
2934 : * set_current_blocked - change current->blocked mask
2935 : * @newset: new mask
2936 : *
2937 : * It is wrong to change ->blocked directly, this helper should be used
2938 : * to ensure the process can't miss a shared signal we are going to block.
2939 : */
2940 926 : void set_current_blocked(sigset_t *newset)
2941 : {
2942 922 : sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2943 926 : __set_current_blocked(newset);
2944 4 : }
2945 :
2946 2732 : void __set_current_blocked(const sigset_t *newset)
2947 : {
2948 2732 : struct task_struct *tsk = current;
2949 :
2950 : /*
2951 : * In case the signal mask hasn't changed, there is nothing we need
2952 : * to do. The current->blocked shouldn't be modified by other task.
2953 : */
2954 2732 : if (sigequalsets(&tsk->blocked, newset))
2955 : return;
2956 :
2957 2386 : spin_lock_irq(&tsk->sighand->siglock);
2958 2386 : __set_task_blocked(tsk, newset);
2959 2386 : spin_unlock_irq(&tsk->sighand->siglock);
2960 : }
2961 :
2962 : /*
2963 : * This is also useful for kernel threads that want to temporarily
2964 : * (or permanently) block certain signals.
2965 : *
2966 : * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2967 : * interface happily blocks "unblockable" signals like SIGKILL
2968 : * and friends.
2969 : */
2970 1806 : int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2971 : {
2972 1806 : struct task_struct *tsk = current;
2973 1806 : sigset_t newset;
2974 :
2975 : /* Lockless, only current can change ->blocked, never from irq */
2976 1806 : if (oldset)
2977 0 : *oldset = tsk->blocked;
2978 :
2979 1806 : switch (how) {
2980 669 : case SIG_BLOCK:
2981 669 : sigorsets(&newset, &tsk->blocked, set);
2982 : break;
2983 342 : case SIG_UNBLOCK:
2984 342 : sigandnsets(&newset, &tsk->blocked, set);
2985 : break;
2986 795 : case SIG_SETMASK:
2987 795 : newset = *set;
2988 795 : break;
2989 : default:
2990 : return -EINVAL;
2991 : }
2992 :
2993 1806 : __set_current_blocked(&newset);
2994 1806 : return 0;
2995 : }
2996 : EXPORT_SYMBOL(sigprocmask);
2997 :
2998 : /*
2999 : * The api helps set app-provided sigmasks.
3000 : *
3001 : * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3002 : * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3003 : *
3004 : * Note that it does set_restore_sigmask() in advance, so it must be always
3005 : * paired with restore_saved_sigmask_unless() before return from syscall.
3006 : */
3007 199 : int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3008 : {
3009 199 : sigset_t kmask;
3010 :
3011 199 : if (!umask)
3012 : return 0;
3013 0 : if (sigsetsize != sizeof(sigset_t))
3014 : return -EINVAL;
3015 0 : if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3016 : return -EFAULT;
3017 :
3018 0 : set_restore_sigmask();
3019 0 : current->saved_sigmask = current->blocked;
3020 0 : set_current_blocked(&kmask);
3021 :
3022 0 : return 0;
3023 : }
3024 :
3025 : #ifdef CONFIG_COMPAT
3026 0 : int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3027 : size_t sigsetsize)
3028 : {
3029 0 : sigset_t kmask;
3030 :
3031 0 : if (!umask)
3032 : return 0;
3033 0 : if (sigsetsize != sizeof(compat_sigset_t))
3034 : return -EINVAL;
3035 0 : if (get_compat_sigset(&kmask, umask))
3036 : return -EFAULT;
3037 :
3038 0 : set_restore_sigmask();
3039 0 : current->saved_sigmask = current->blocked;
3040 0 : set_current_blocked(&kmask);
3041 :
3042 0 : return 0;
3043 : }
3044 : #endif
3045 :
3046 : /**
3047 : * sys_rt_sigprocmask - change the list of currently blocked signals
3048 : * @how: whether to add, remove, or set signals
3049 : * @nset: stores pending signals
3050 : * @oset: previous value of signal mask if non-null
3051 : * @sigsetsize: size of sigset_t type
3052 : */
3053 3740 : SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3054 : sigset_t __user *, oset, size_t, sigsetsize)
3055 : {
3056 1870 : sigset_t old_set, new_set;
3057 1870 : int error;
3058 :
3059 : /* XXX: Don't preclude handling different sized sigset_t's. */
3060 1870 : if (sigsetsize != sizeof(sigset_t))
3061 : return -EINVAL;
3062 :
3063 1870 : old_set = current->blocked;
3064 :
3065 1870 : if (nset) {
3066 1806 : if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3067 : return -EFAULT;
3068 1806 : sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3069 :
3070 1806 : error = sigprocmask(how, &new_set, NULL);
3071 1806 : if (error)
3072 0 : return error;
3073 : }
3074 :
3075 1870 : if (oset) {
3076 733 : if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3077 0 : return -EFAULT;
3078 : }
3079 :
3080 : return 0;
3081 : }
3082 :
3083 : #ifdef CONFIG_COMPAT
3084 0 : COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3085 : compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3086 : {
3087 0 : sigset_t old_set = current->blocked;
3088 :
3089 : /* XXX: Don't preclude handling different sized sigset_t's. */
3090 0 : if (sigsetsize != sizeof(sigset_t))
3091 : return -EINVAL;
3092 :
3093 0 : if (nset) {
3094 0 : sigset_t new_set;
3095 0 : int error;
3096 0 : if (get_compat_sigset(&new_set, nset))
3097 0 : return -EFAULT;
3098 0 : sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3099 :
3100 0 : error = sigprocmask(how, &new_set, NULL);
3101 0 : if (error)
3102 0 : return error;
3103 : }
3104 0 : return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3105 : }
3106 : #endif
3107 :
3108 0 : static void do_sigpending(sigset_t *set)
3109 : {
3110 0 : spin_lock_irq(¤t->sighand->siglock);
3111 0 : sigorsets(set, ¤t->pending.signal,
3112 0 : ¤t->signal->shared_pending.signal);
3113 0 : spin_unlock_irq(¤t->sighand->siglock);
3114 :
3115 : /* Outside the lock because only this thread touches it. */
3116 0 : sigandsets(set, ¤t->blocked, set);
3117 0 : }
3118 :
3119 : /**
3120 : * sys_rt_sigpending - examine a pending signal that has been raised
3121 : * while blocked
3122 : * @uset: stores pending signals
3123 : * @sigsetsize: size of sigset_t type or larger
3124 : */
3125 0 : SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3126 : {
3127 0 : sigset_t set;
3128 :
3129 0 : if (sigsetsize > sizeof(*uset))
3130 : return -EINVAL;
3131 :
3132 0 : do_sigpending(&set);
3133 :
3134 0 : if (copy_to_user(uset, &set, sigsetsize))
3135 0 : return -EFAULT;
3136 :
3137 : return 0;
3138 : }
3139 :
3140 : #ifdef CONFIG_COMPAT
3141 0 : COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3142 : compat_size_t, sigsetsize)
3143 : {
3144 0 : sigset_t set;
3145 :
3146 0 : if (sigsetsize > sizeof(*uset))
3147 : return -EINVAL;
3148 :
3149 0 : do_sigpending(&set);
3150 :
3151 0 : return put_compat_sigset(uset, &set, sigsetsize);
3152 : }
3153 : #endif
3154 :
3155 : static const struct {
3156 : unsigned char limit, layout;
3157 : } sig_sicodes[] = {
3158 : [SIGILL] = { NSIGILL, SIL_FAULT },
3159 : [SIGFPE] = { NSIGFPE, SIL_FAULT },
3160 : [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3161 : [SIGBUS] = { NSIGBUS, SIL_FAULT },
3162 : [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3163 : #if defined(SIGEMT)
3164 : [SIGEMT] = { NSIGEMT, SIL_FAULT },
3165 : #endif
3166 : [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3167 : [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3168 : [SIGSYS] = { NSIGSYS, SIL_SYS },
3169 : };
3170 :
3171 0 : static bool known_siginfo_layout(unsigned sig, int si_code)
3172 : {
3173 0 : if (si_code == SI_KERNEL)
3174 : return true;
3175 0 : else if ((si_code > SI_USER)) {
3176 0 : if (sig_specific_sicodes(sig)) {
3177 0 : if (si_code <= sig_sicodes[sig].limit)
3178 0 : return true;
3179 : }
3180 0 : else if (si_code <= NSIGPOLL)
3181 0 : return true;
3182 : }
3183 0 : else if (si_code >= SI_DETHREAD)
3184 : return true;
3185 0 : else if (si_code == SI_ASYNCNL)
3186 0 : return true;
3187 : return false;
3188 : }
3189 :
3190 82 : enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3191 : {
3192 82 : enum siginfo_layout layout = SIL_KILL;
3193 82 : if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3194 51 : if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3195 51 : (si_code <= sig_sicodes[sig].limit)) {
3196 51 : layout = sig_sicodes[sig].layout;
3197 : /* Handle the exceptions */
3198 51 : if ((sig == SIGBUS) &&
3199 51 : (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3200 : layout = SIL_FAULT_MCEERR;
3201 51 : else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3202 : layout = SIL_FAULT_BNDERR;
3203 : #ifdef SEGV_PKUERR
3204 51 : else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3205 0 : layout = SIL_FAULT_PKUERR;
3206 : #endif
3207 : }
3208 0 : else if (si_code <= NSIGPOLL)
3209 0 : layout = SIL_POLL;
3210 : } else {
3211 31 : if (si_code == SI_TIMER)
3212 : layout = SIL_TIMER;
3213 31 : else if (si_code == SI_SIGIO)
3214 : layout = SIL_POLL;
3215 31 : else if (si_code < 0)
3216 4 : layout = SIL_RT;
3217 : }
3218 82 : return layout;
3219 : }
3220 :
3221 0 : static inline char __user *si_expansion(const siginfo_t __user *info)
3222 : {
3223 0 : return ((char __user *)info) + sizeof(struct kernel_siginfo);
3224 : }
3225 :
3226 0 : int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3227 : {
3228 0 : char __user *expansion = si_expansion(to);
3229 0 : if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3230 0 : return -EFAULT;
3231 0 : if (clear_user(expansion, SI_EXPANSION_SIZE))
3232 0 : return -EFAULT;
3233 : return 0;
3234 : }
3235 :
3236 0 : static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3237 : const siginfo_t __user *from)
3238 : {
3239 0 : if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3240 0 : char __user *expansion = si_expansion(from);
3241 0 : char buf[SI_EXPANSION_SIZE];
3242 0 : int i;
3243 : /*
3244 : * An unknown si_code might need more than
3245 : * sizeof(struct kernel_siginfo) bytes. Verify all of the
3246 : * extra bytes are 0. This guarantees copy_siginfo_to_user
3247 : * will return this data to userspace exactly.
3248 : */
3249 0 : if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3250 0 : return -EFAULT;
3251 0 : for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3252 0 : if (buf[i] != 0)
3253 : return -E2BIG;
3254 : }
3255 : }
3256 : return 0;
3257 : }
3258 :
3259 0 : static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3260 : const siginfo_t __user *from)
3261 : {
3262 0 : if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3263 0 : return -EFAULT;
3264 0 : to->si_signo = signo;
3265 0 : return post_copy_siginfo_from_user(to, from);
3266 : }
3267 :
3268 0 : int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3269 : {
3270 0 : if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3271 0 : return -EFAULT;
3272 0 : return post_copy_siginfo_from_user(to, from);
3273 : }
3274 :
3275 : #ifdef CONFIG_COMPAT
3276 : /**
3277 : * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3278 : * @to: compat siginfo destination
3279 : * @from: kernel siginfo source
3280 : *
3281 : * Note: This function does not work properly for the SIGCHLD on x32, but
3282 : * fortunately it doesn't have to. The only valid callers for this function are
3283 : * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3284 : * The latter does not care because SIGCHLD will never cause a coredump.
3285 : */
3286 0 : void copy_siginfo_to_external32(struct compat_siginfo *to,
3287 : const struct kernel_siginfo *from)
3288 : {
3289 0 : memset(to, 0, sizeof(*to));
3290 :
3291 0 : to->si_signo = from->si_signo;
3292 0 : to->si_errno = from->si_errno;
3293 0 : to->si_code = from->si_code;
3294 0 : switch(siginfo_layout(from->si_signo, from->si_code)) {
3295 0 : case SIL_KILL:
3296 0 : to->si_pid = from->si_pid;
3297 0 : to->si_uid = from->si_uid;
3298 0 : break;
3299 0 : case SIL_TIMER:
3300 0 : to->si_tid = from->si_tid;
3301 0 : to->si_overrun = from->si_overrun;
3302 0 : to->si_int = from->si_int;
3303 0 : break;
3304 0 : case SIL_POLL:
3305 0 : to->si_band = from->si_band;
3306 0 : to->si_fd = from->si_fd;
3307 0 : break;
3308 0 : case SIL_FAULT:
3309 0 : to->si_addr = ptr_to_compat(from->si_addr);
3310 : #ifdef __ARCH_SI_TRAPNO
3311 : to->si_trapno = from->si_trapno;
3312 : #endif
3313 0 : break;
3314 0 : case SIL_FAULT_MCEERR:
3315 0 : to->si_addr = ptr_to_compat(from->si_addr);
3316 : #ifdef __ARCH_SI_TRAPNO
3317 : to->si_trapno = from->si_trapno;
3318 : #endif
3319 0 : to->si_addr_lsb = from->si_addr_lsb;
3320 0 : break;
3321 0 : case SIL_FAULT_BNDERR:
3322 0 : to->si_addr = ptr_to_compat(from->si_addr);
3323 : #ifdef __ARCH_SI_TRAPNO
3324 : to->si_trapno = from->si_trapno;
3325 : #endif
3326 0 : to->si_lower = ptr_to_compat(from->si_lower);
3327 0 : to->si_upper = ptr_to_compat(from->si_upper);
3328 0 : break;
3329 0 : case SIL_FAULT_PKUERR:
3330 0 : to->si_addr = ptr_to_compat(from->si_addr);
3331 : #ifdef __ARCH_SI_TRAPNO
3332 : to->si_trapno = from->si_trapno;
3333 : #endif
3334 0 : to->si_pkey = from->si_pkey;
3335 0 : break;
3336 0 : case SIL_CHLD:
3337 0 : to->si_pid = from->si_pid;
3338 0 : to->si_uid = from->si_uid;
3339 0 : to->si_status = from->si_status;
3340 0 : to->si_utime = from->si_utime;
3341 0 : to->si_stime = from->si_stime;
3342 0 : break;
3343 0 : case SIL_RT:
3344 0 : to->si_pid = from->si_pid;
3345 0 : to->si_uid = from->si_uid;
3346 0 : to->si_int = from->si_int;
3347 0 : break;
3348 0 : case SIL_SYS:
3349 0 : to->si_call_addr = ptr_to_compat(from->si_call_addr);
3350 0 : to->si_syscall = from->si_syscall;
3351 0 : to->si_arch = from->si_arch;
3352 0 : break;
3353 : }
3354 0 : }
3355 :
3356 0 : int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3357 : const struct kernel_siginfo *from)
3358 : {
3359 0 : struct compat_siginfo new;
3360 :
3361 0 : copy_siginfo_to_external32(&new, from);
3362 0 : if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3363 0 : return -EFAULT;
3364 : return 0;
3365 : }
3366 :
3367 0 : static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3368 : const struct compat_siginfo *from)
3369 : {
3370 0 : clear_siginfo(to);
3371 0 : to->si_signo = from->si_signo;
3372 0 : to->si_errno = from->si_errno;
3373 0 : to->si_code = from->si_code;
3374 0 : switch(siginfo_layout(from->si_signo, from->si_code)) {
3375 0 : case SIL_KILL:
3376 0 : to->si_pid = from->si_pid;
3377 0 : to->si_uid = from->si_uid;
3378 0 : break;
3379 0 : case SIL_TIMER:
3380 0 : to->si_tid = from->si_tid;
3381 0 : to->si_overrun = from->si_overrun;
3382 0 : to->si_int = from->si_int;
3383 0 : break;
3384 0 : case SIL_POLL:
3385 0 : to->si_band = from->si_band;
3386 0 : to->si_fd = from->si_fd;
3387 0 : break;
3388 0 : case SIL_FAULT:
3389 0 : to->si_addr = compat_ptr(from->si_addr);
3390 : #ifdef __ARCH_SI_TRAPNO
3391 : to->si_trapno = from->si_trapno;
3392 : #endif
3393 0 : break;
3394 0 : case SIL_FAULT_MCEERR:
3395 0 : to->si_addr = compat_ptr(from->si_addr);
3396 : #ifdef __ARCH_SI_TRAPNO
3397 : to->si_trapno = from->si_trapno;
3398 : #endif
3399 0 : to->si_addr_lsb = from->si_addr_lsb;
3400 0 : break;
3401 0 : case SIL_FAULT_BNDERR:
3402 0 : to->si_addr = compat_ptr(from->si_addr);
3403 : #ifdef __ARCH_SI_TRAPNO
3404 : to->si_trapno = from->si_trapno;
3405 : #endif
3406 0 : to->si_lower = compat_ptr(from->si_lower);
3407 0 : to->si_upper = compat_ptr(from->si_upper);
3408 0 : break;
3409 0 : case SIL_FAULT_PKUERR:
3410 0 : to->si_addr = compat_ptr(from->si_addr);
3411 : #ifdef __ARCH_SI_TRAPNO
3412 : to->si_trapno = from->si_trapno;
3413 : #endif
3414 0 : to->si_pkey = from->si_pkey;
3415 0 : break;
3416 0 : case SIL_CHLD:
3417 0 : to->si_pid = from->si_pid;
3418 0 : to->si_uid = from->si_uid;
3419 0 : to->si_status = from->si_status;
3420 : #ifdef CONFIG_X86_X32_ABI
3421 : if (in_x32_syscall()) {
3422 : to->si_utime = from->_sifields._sigchld_x32._utime;
3423 : to->si_stime = from->_sifields._sigchld_x32._stime;
3424 : } else
3425 : #endif
3426 : {
3427 0 : to->si_utime = from->si_utime;
3428 0 : to->si_stime = from->si_stime;
3429 : }
3430 0 : break;
3431 0 : case SIL_RT:
3432 0 : to->si_pid = from->si_pid;
3433 0 : to->si_uid = from->si_uid;
3434 0 : to->si_int = from->si_int;
3435 0 : break;
3436 0 : case SIL_SYS:
3437 0 : to->si_call_addr = compat_ptr(from->si_call_addr);
3438 0 : to->si_syscall = from->si_syscall;
3439 0 : to->si_arch = from->si_arch;
3440 0 : break;
3441 : }
3442 0 : return 0;
3443 : }
3444 :
3445 0 : static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3446 : const struct compat_siginfo __user *ufrom)
3447 : {
3448 0 : struct compat_siginfo from;
3449 :
3450 0 : if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3451 : return -EFAULT;
3452 :
3453 0 : from.si_signo = signo;
3454 0 : return post_copy_siginfo_from_user32(to, &from);
3455 : }
3456 :
3457 0 : int copy_siginfo_from_user32(struct kernel_siginfo *to,
3458 : const struct compat_siginfo __user *ufrom)
3459 : {
3460 0 : struct compat_siginfo from;
3461 :
3462 0 : if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3463 : return -EFAULT;
3464 :
3465 0 : return post_copy_siginfo_from_user32(to, &from);
3466 : }
3467 : #endif /* CONFIG_COMPAT */
3468 :
3469 : /**
3470 : * do_sigtimedwait - wait for queued signals specified in @which
3471 : * @which: queued signals to wait for
3472 : * @info: if non-null, the signal's siginfo is returned here
3473 : * @ts: upper bound on process time suspension
3474 : */
3475 1 : static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3476 : const struct timespec64 *ts)
3477 : {
3478 1 : ktime_t *to = NULL, timeout = KTIME_MAX;
3479 1 : struct task_struct *tsk = current;
3480 1 : sigset_t mask = *which;
3481 1 : int sig, ret = 0;
3482 :
3483 1 : if (ts) {
3484 0 : if (!timespec64_valid(ts))
3485 : return -EINVAL;
3486 0 : timeout = timespec64_to_ktime(*ts);
3487 0 : to = &timeout;
3488 : }
3489 :
3490 : /*
3491 : * Invert the set of allowed signals to get those we want to block.
3492 : */
3493 1 : sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3494 1 : signotset(&mask);
3495 :
3496 1 : spin_lock_irq(&tsk->sighand->siglock);
3497 1 : sig = dequeue_signal(tsk, &mask, info);
3498 1 : if (!sig && timeout) {
3499 : /*
3500 : * None ready, temporarily unblock those we're interested
3501 : * while we are sleeping in so that we'll be awakened when
3502 : * they arrive. Unblocking is always fine, we can avoid
3503 : * set_current_blocked().
3504 : */
3505 1 : tsk->real_blocked = tsk->blocked;
3506 1 : sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3507 1 : recalc_sigpending();
3508 1 : spin_unlock_irq(&tsk->sighand->siglock);
3509 :
3510 1 : __set_current_state(TASK_INTERRUPTIBLE);
3511 1 : ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3512 : HRTIMER_MODE_REL);
3513 0 : spin_lock_irq(&tsk->sighand->siglock);
3514 0 : __set_task_blocked(tsk, &tsk->real_blocked);
3515 0 : sigemptyset(&tsk->real_blocked);
3516 0 : sig = dequeue_signal(tsk, &mask, info);
3517 : }
3518 0 : spin_unlock_irq(&tsk->sighand->siglock);
3519 :
3520 0 : if (sig)
3521 : return sig;
3522 0 : return ret ? -EINTR : -EAGAIN;
3523 : }
3524 :
3525 : /**
3526 : * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3527 : * in @uthese
3528 : * @uthese: queued signals to wait for
3529 : * @uinfo: if non-null, the signal's siginfo is returned here
3530 : * @uts: upper bound on process time suspension
3531 : * @sigsetsize: size of sigset_t type
3532 : */
3533 2 : SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3534 : siginfo_t __user *, uinfo,
3535 : const struct __kernel_timespec __user *, uts,
3536 : size_t, sigsetsize)
3537 : {
3538 1 : sigset_t these;
3539 1 : struct timespec64 ts;
3540 1 : kernel_siginfo_t info;
3541 1 : int ret;
3542 :
3543 : /* XXX: Don't preclude handling different sized sigset_t's. */
3544 1 : if (sigsetsize != sizeof(sigset_t))
3545 : return -EINVAL;
3546 :
3547 1 : if (copy_from_user(&these, uthese, sizeof(these)))
3548 : return -EFAULT;
3549 :
3550 1 : if (uts) {
3551 0 : if (get_timespec64(&ts, uts))
3552 : return -EFAULT;
3553 : }
3554 :
3555 2 : ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3556 :
3557 0 : if (ret > 0 && uinfo) {
3558 0 : if (copy_siginfo_to_user(uinfo, &info))
3559 0 : ret = -EFAULT;
3560 : }
3561 :
3562 0 : return ret;
3563 : }
3564 :
3565 : #ifdef CONFIG_COMPAT_32BIT_TIME
3566 : SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3567 : siginfo_t __user *, uinfo,
3568 : const struct old_timespec32 __user *, uts,
3569 : size_t, sigsetsize)
3570 : {
3571 : sigset_t these;
3572 : struct timespec64 ts;
3573 : kernel_siginfo_t info;
3574 : int ret;
3575 :
3576 : if (sigsetsize != sizeof(sigset_t))
3577 : return -EINVAL;
3578 :
3579 : if (copy_from_user(&these, uthese, sizeof(these)))
3580 : return -EFAULT;
3581 :
3582 : if (uts) {
3583 : if (get_old_timespec32(&ts, uts))
3584 : return -EFAULT;
3585 : }
3586 :
3587 : ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3588 :
3589 : if (ret > 0 && uinfo) {
3590 : if (copy_siginfo_to_user(uinfo, &info))
3591 : ret = -EFAULT;
3592 : }
3593 :
3594 : return ret;
3595 : }
3596 : #endif
3597 :
3598 : #ifdef CONFIG_COMPAT
3599 0 : COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3600 : struct compat_siginfo __user *, uinfo,
3601 : struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3602 : {
3603 0 : sigset_t s;
3604 0 : struct timespec64 t;
3605 0 : kernel_siginfo_t info;
3606 0 : long ret;
3607 :
3608 0 : if (sigsetsize != sizeof(sigset_t))
3609 : return -EINVAL;
3610 :
3611 0 : if (get_compat_sigset(&s, uthese))
3612 : return -EFAULT;
3613 :
3614 0 : if (uts) {
3615 0 : if (get_timespec64(&t, uts))
3616 : return -EFAULT;
3617 : }
3618 :
3619 0 : ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3620 :
3621 0 : if (ret > 0 && uinfo) {
3622 0 : if (copy_siginfo_to_user32(uinfo, &info))
3623 0 : ret = -EFAULT;
3624 : }
3625 :
3626 : return ret;
3627 : }
3628 :
3629 : #ifdef CONFIG_COMPAT_32BIT_TIME
3630 : COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3631 : struct compat_siginfo __user *, uinfo,
3632 : struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3633 : {
3634 : sigset_t s;
3635 : struct timespec64 t;
3636 : kernel_siginfo_t info;
3637 : long ret;
3638 :
3639 : if (sigsetsize != sizeof(sigset_t))
3640 : return -EINVAL;
3641 :
3642 : if (get_compat_sigset(&s, uthese))
3643 : return -EFAULT;
3644 :
3645 : if (uts) {
3646 : if (get_old_timespec32(&t, uts))
3647 : return -EFAULT;
3648 : }
3649 :
3650 : ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3651 :
3652 : if (ret > 0 && uinfo) {
3653 : if (copy_siginfo_to_user32(uinfo, &info))
3654 : ret = -EFAULT;
3655 : }
3656 :
3657 : return ret;
3658 : }
3659 : #endif
3660 : #endif
3661 :
3662 41 : static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3663 : {
3664 41 : clear_siginfo(info);
3665 41 : info->si_signo = sig;
3666 41 : info->si_errno = 0;
3667 41 : info->si_code = SI_USER;
3668 41 : info->si_pid = task_tgid_vnr(current);
3669 41 : info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3670 41 : }
3671 :
3672 : /**
3673 : * sys_kill - send a signal to a process
3674 : * @pid: the PID of the process
3675 : * @sig: signal to be sent
3676 : */
3677 82 : SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3678 : {
3679 41 : struct kernel_siginfo info;
3680 :
3681 41 : prepare_kill_siginfo(sig, &info);
3682 :
3683 41 : return kill_something_info(sig, &info, pid);
3684 : }
3685 :
3686 : /*
3687 : * Verify that the signaler and signalee either are in the same pid namespace
3688 : * or that the signaler's pid namespace is an ancestor of the signalee's pid
3689 : * namespace.
3690 : */
3691 0 : static bool access_pidfd_pidns(struct pid *pid)
3692 : {
3693 0 : struct pid_namespace *active = task_active_pid_ns(current);
3694 0 : struct pid_namespace *p = ns_of_pid(pid);
3695 :
3696 0 : for (;;) {
3697 0 : if (!p)
3698 : return false;
3699 0 : if (p == active)
3700 : break;
3701 0 : p = p->parent;
3702 : }
3703 :
3704 : return true;
3705 : }
3706 :
3707 0 : static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3708 : siginfo_t __user *info)
3709 : {
3710 : #ifdef CONFIG_COMPAT
3711 : /*
3712 : * Avoid hooking up compat syscalls and instead handle necessary
3713 : * conversions here. Note, this is a stop-gap measure and should not be
3714 : * considered a generic solution.
3715 : */
3716 0 : if (in_compat_syscall())
3717 0 : return copy_siginfo_from_user32(
3718 : kinfo, (struct compat_siginfo __user *)info);
3719 : #endif
3720 0 : return copy_siginfo_from_user(kinfo, info);
3721 : }
3722 :
3723 0 : static struct pid *pidfd_to_pid(const struct file *file)
3724 : {
3725 0 : struct pid *pid;
3726 :
3727 0 : pid = pidfd_pid(file);
3728 0 : if (!IS_ERR(pid))
3729 : return pid;
3730 :
3731 0 : return tgid_pidfd_to_pid(file);
3732 : }
3733 :
3734 : /**
3735 : * sys_pidfd_send_signal - Signal a process through a pidfd
3736 : * @pidfd: file descriptor of the process
3737 : * @sig: signal to send
3738 : * @info: signal info
3739 : * @flags: future flags
3740 : *
3741 : * The syscall currently only signals via PIDTYPE_PID which covers
3742 : * kill(<positive-pid>, <signal>. It does not signal threads or process
3743 : * groups.
3744 : * In order to extend the syscall to threads and process groups the @flags
3745 : * argument should be used. In essence, the @flags argument will determine
3746 : * what is signaled and not the file descriptor itself. Put in other words,
3747 : * grouping is a property of the flags argument not a property of the file
3748 : * descriptor.
3749 : *
3750 : * Return: 0 on success, negative errno on failure
3751 : */
3752 0 : SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3753 : siginfo_t __user *, info, unsigned int, flags)
3754 : {
3755 0 : int ret;
3756 0 : struct fd f;
3757 0 : struct pid *pid;
3758 0 : kernel_siginfo_t kinfo;
3759 :
3760 : /* Enforce flags be set to 0 until we add an extension. */
3761 0 : if (flags)
3762 : return -EINVAL;
3763 :
3764 0 : f = fdget(pidfd);
3765 0 : if (!f.file)
3766 : return -EBADF;
3767 :
3768 : /* Is this a pidfd? */
3769 0 : pid = pidfd_to_pid(f.file);
3770 0 : if (IS_ERR(pid)) {
3771 0 : ret = PTR_ERR(pid);
3772 0 : goto err;
3773 : }
3774 :
3775 0 : ret = -EINVAL;
3776 0 : if (!access_pidfd_pidns(pid))
3777 0 : goto err;
3778 :
3779 0 : if (info) {
3780 0 : ret = copy_siginfo_from_user_any(&kinfo, info);
3781 0 : if (unlikely(ret))
3782 0 : goto err;
3783 :
3784 0 : ret = -EINVAL;
3785 0 : if (unlikely(sig != kinfo.si_signo))
3786 0 : goto err;
3787 :
3788 : /* Only allow sending arbitrary signals to yourself. */
3789 0 : ret = -EPERM;
3790 0 : if ((task_pid(current) != pid) &&
3791 0 : (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3792 0 : goto err;
3793 : } else {
3794 0 : prepare_kill_siginfo(sig, &kinfo);
3795 : }
3796 :
3797 0 : ret = kill_pid_info(sig, &kinfo, pid);
3798 :
3799 0 : err:
3800 0 : fdput(f);
3801 0 : return ret;
3802 : }
3803 :
3804 : static int
3805 4 : do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3806 : {
3807 4 : struct task_struct *p;
3808 4 : int error = -ESRCH;
3809 :
3810 4 : rcu_read_lock();
3811 4 : p = find_task_by_vpid(pid);
3812 8 : if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3813 4 : error = check_kill_permission(sig, info, p);
3814 : /*
3815 : * The null signal is a permissions and process existence
3816 : * probe. No signal is actually delivered.
3817 : */
3818 4 : if (!error && sig) {
3819 4 : error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3820 : /*
3821 : * If lock_task_sighand() failed we pretend the task
3822 : * dies after receiving the signal. The window is tiny,
3823 : * and the signal is private anyway.
3824 : */
3825 4 : if (unlikely(error == -ESRCH))
3826 0 : error = 0;
3827 : }
3828 : }
3829 4 : rcu_read_unlock();
3830 :
3831 4 : return error;
3832 : }
3833 :
3834 4 : static int do_tkill(pid_t tgid, pid_t pid, int sig)
3835 : {
3836 4 : struct kernel_siginfo info;
3837 :
3838 4 : clear_siginfo(&info);
3839 4 : info.si_signo = sig;
3840 4 : info.si_errno = 0;
3841 4 : info.si_code = SI_TKILL;
3842 4 : info.si_pid = task_tgid_vnr(current);
3843 4 : info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3844 :
3845 4 : return do_send_specific(tgid, pid, sig, &info);
3846 : }
3847 :
3848 : /**
3849 : * sys_tgkill - send signal to one specific thread
3850 : * @tgid: the thread group ID of the thread
3851 : * @pid: the PID of the thread
3852 : * @sig: signal to be sent
3853 : *
3854 : * This syscall also checks the @tgid and returns -ESRCH even if the PID
3855 : * exists but it's not belonging to the target process anymore. This
3856 : * method solves the problem of threads exiting and PIDs getting reused.
3857 : */
3858 8 : SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3859 : {
3860 : /* This is only valid for single tasks */
3861 4 : if (pid <= 0 || tgid <= 0)
3862 : return -EINVAL;
3863 :
3864 4 : return do_tkill(tgid, pid, sig);
3865 : }
3866 :
3867 : /**
3868 : * sys_tkill - send signal to one specific task
3869 : * @pid: the PID of the task
3870 : * @sig: signal to be sent
3871 : *
3872 : * Send a signal to only one task, even if it's a CLONE_THREAD task.
3873 : */
3874 0 : SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3875 : {
3876 : /* This is only valid for single tasks */
3877 0 : if (pid <= 0)
3878 : return -EINVAL;
3879 :
3880 0 : return do_tkill(0, pid, sig);
3881 : }
3882 :
3883 0 : static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3884 : {
3885 : /* Not even root can pretend to send signals from the kernel.
3886 : * Nor can they impersonate a kill()/tgkill(), which adds source info.
3887 : */
3888 0 : if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3889 0 : (task_pid_vnr(current) != pid))
3890 : return -EPERM;
3891 :
3892 : /* POSIX.1b doesn't mention process groups. */
3893 0 : return kill_proc_info(sig, info, pid);
3894 : }
3895 :
3896 : /**
3897 : * sys_rt_sigqueueinfo - send signal information to a signal
3898 : * @pid: the PID of the thread
3899 : * @sig: signal to be sent
3900 : * @uinfo: signal info to be sent
3901 : */
3902 0 : SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3903 : siginfo_t __user *, uinfo)
3904 : {
3905 0 : kernel_siginfo_t info;
3906 0 : int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3907 0 : if (unlikely(ret))
3908 0 : return ret;
3909 0 : return do_rt_sigqueueinfo(pid, sig, &info);
3910 : }
3911 :
3912 : #ifdef CONFIG_COMPAT
3913 0 : COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3914 : compat_pid_t, pid,
3915 : int, sig,
3916 : struct compat_siginfo __user *, uinfo)
3917 : {
3918 0 : kernel_siginfo_t info;
3919 0 : int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3920 0 : if (unlikely(ret))
3921 0 : return ret;
3922 0 : return do_rt_sigqueueinfo(pid, sig, &info);
3923 : }
3924 : #endif
3925 :
3926 0 : static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3927 : {
3928 : /* This is only valid for single tasks */
3929 0 : if (pid <= 0 || tgid <= 0)
3930 : return -EINVAL;
3931 :
3932 : /* Not even root can pretend to send signals from the kernel.
3933 : * Nor can they impersonate a kill()/tgkill(), which adds source info.
3934 : */
3935 0 : if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3936 0 : (task_pid_vnr(current) != pid))
3937 : return -EPERM;
3938 :
3939 0 : return do_send_specific(tgid, pid, sig, info);
3940 : }
3941 :
3942 0 : SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3943 : siginfo_t __user *, uinfo)
3944 : {
3945 0 : kernel_siginfo_t info;
3946 0 : int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3947 0 : if (unlikely(ret))
3948 0 : return ret;
3949 0 : return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3950 : }
3951 :
3952 : #ifdef CONFIG_COMPAT
3953 0 : COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3954 : compat_pid_t, tgid,
3955 : compat_pid_t, pid,
3956 : int, sig,
3957 : struct compat_siginfo __user *, uinfo)
3958 : {
3959 0 : kernel_siginfo_t info;
3960 0 : int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3961 0 : if (unlikely(ret))
3962 0 : return ret;
3963 0 : return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3964 : }
3965 : #endif
3966 :
3967 : /*
3968 : * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3969 : */
3970 0 : void kernel_sigaction(int sig, __sighandler_t action)
3971 : {
3972 0 : spin_lock_irq(¤t->sighand->siglock);
3973 0 : current->sighand->action[sig - 1].sa.sa_handler = action;
3974 0 : if (action == SIG_IGN) {
3975 0 : sigset_t mask;
3976 :
3977 0 : sigemptyset(&mask);
3978 0 : sigaddset(&mask, sig);
3979 :
3980 0 : flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3981 0 : flush_sigqueue_mask(&mask, ¤t->pending);
3982 0 : recalc_sigpending();
3983 : }
3984 0 : spin_unlock_irq(¤t->sighand->siglock);
3985 0 : }
3986 : EXPORT_SYMBOL(kernel_sigaction);
3987 :
3988 0 : void __weak sigaction_compat_abi(struct k_sigaction *act,
3989 : struct k_sigaction *oact)
3990 : {
3991 0 : }
3992 :
3993 3658 : int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3994 : {
3995 3658 : struct task_struct *p = current, *t;
3996 3658 : struct k_sigaction *k;
3997 3658 : sigset_t mask;
3998 :
3999 3658 : if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4000 : return -EINVAL;
4001 :
4002 3658 : k = &p->sighand->action[sig-1];
4003 :
4004 3658 : spin_lock_irq(&p->sighand->siglock);
4005 3658 : if (oact)
4006 1377 : *oact = *k;
4007 :
4008 : /*
4009 : * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4010 : * e.g. by having an architecture use the bit in their uapi.
4011 : */
4012 3658 : BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4013 :
4014 : /*
4015 : * Clear unknown flag bits in order to allow userspace to detect missing
4016 : * support for flag bits and to allow the kernel to use non-uapi bits
4017 : * internally.
4018 : */
4019 3658 : if (act)
4020 2903 : act->sa.sa_flags &= UAPI_SA_FLAGS;
4021 3658 : if (oact)
4022 1377 : oact->sa.sa_flags &= UAPI_SA_FLAGS;
4023 :
4024 3658 : sigaction_compat_abi(act, oact);
4025 :
4026 3658 : if (act) {
4027 2903 : sigdelsetmask(&act->sa.sa_mask,
4028 : sigmask(SIGKILL) | sigmask(SIGSTOP));
4029 2903 : *k = *act;
4030 : /*
4031 : * POSIX 3.3.1.3:
4032 : * "Setting a signal action to SIG_IGN for a signal that is
4033 : * pending shall cause the pending signal to be discarded,
4034 : * whether or not it is blocked."
4035 : *
4036 : * "Setting a signal action to SIG_DFL for a signal that is
4037 : * pending and whose default action is to ignore the signal
4038 : * (for example, SIGCHLD), shall cause the pending signal to
4039 : * be discarded, whether or not it is blocked"
4040 : */
4041 2903 : if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4042 256 : sigemptyset(&mask);
4043 256 : sigaddset(&mask, sig);
4044 256 : flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4045 512 : for_each_thread(p, t)
4046 256 : flush_sigqueue_mask(&mask, &t->pending);
4047 : }
4048 : }
4049 :
4050 3658 : spin_unlock_irq(&p->sighand->siglock);
4051 3658 : return 0;
4052 : }
4053 :
4054 : static int
4055 478 : do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4056 : size_t min_ss_size)
4057 : {
4058 478 : struct task_struct *t = current;
4059 :
4060 478 : if (oss) {
4061 0 : memset(oss, 0, sizeof(stack_t));
4062 0 : oss->ss_sp = (void __user *) t->sas_ss_sp;
4063 0 : oss->ss_size = t->sas_ss_size;
4064 0 : oss->ss_flags = sas_ss_flags(sp) |
4065 0 : (current->sas_ss_flags & SS_FLAG_BITS);
4066 : }
4067 :
4068 478 : if (ss) {
4069 478 : void __user *ss_sp = ss->ss_sp;
4070 478 : size_t ss_size = ss->ss_size;
4071 478 : unsigned ss_flags = ss->ss_flags;
4072 478 : int ss_mode;
4073 :
4074 478 : if (unlikely(on_sig_stack(sp)))
4075 : return -EPERM;
4076 :
4077 478 : ss_mode = ss_flags & ~SS_FLAG_BITS;
4078 478 : if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4079 : ss_mode != 0))
4080 : return -EINVAL;
4081 :
4082 478 : if (ss_mode == SS_DISABLE) {
4083 : ss_size = 0;
4084 : ss_sp = NULL;
4085 : } else {
4086 17 : if (unlikely(ss_size < min_ss_size))
4087 : return -ENOMEM;
4088 : }
4089 :
4090 478 : t->sas_ss_sp = (unsigned long) ss_sp;
4091 478 : t->sas_ss_size = ss_size;
4092 478 : t->sas_ss_flags = ss_flags;
4093 : }
4094 : return 0;
4095 : }
4096 :
4097 34 : SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4098 : {
4099 17 : stack_t new, old;
4100 17 : int err;
4101 34 : if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4102 : return -EFAULT;
4103 17 : err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4104 17 : current_user_stack_pointer(),
4105 : MINSIGSTKSZ);
4106 17 : if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4107 0 : err = -EFAULT;
4108 17 : return err;
4109 : }
4110 :
4111 461 : int restore_altstack(const stack_t __user *uss)
4112 : {
4113 461 : stack_t new;
4114 461 : if (copy_from_user(&new, uss, sizeof(stack_t)))
4115 : return -EFAULT;
4116 461 : (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4117 : MINSIGSTKSZ);
4118 : /* squash all but EFAULT for now */
4119 461 : return 0;
4120 : }
4121 :
4122 0 : int __save_altstack(stack_t __user *uss, unsigned long sp)
4123 : {
4124 0 : struct task_struct *t = current;
4125 0 : int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4126 0 : __put_user(t->sas_ss_flags, &uss->ss_flags) |
4127 0 : __put_user(t->sas_ss_size, &uss->ss_size);
4128 0 : if (err)
4129 : return err;
4130 0 : if (t->sas_ss_flags & SS_AUTODISARM)
4131 0 : sas_ss_reset(t);
4132 : return 0;
4133 : }
4134 :
4135 : #ifdef CONFIG_COMPAT
4136 0 : static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4137 : compat_stack_t __user *uoss_ptr)
4138 : {
4139 0 : stack_t uss, uoss;
4140 0 : int ret;
4141 :
4142 0 : if (uss_ptr) {
4143 0 : compat_stack_t uss32;
4144 0 : if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4145 0 : return -EFAULT;
4146 0 : uss.ss_sp = compat_ptr(uss32.ss_sp);
4147 0 : uss.ss_flags = uss32.ss_flags;
4148 0 : uss.ss_size = uss32.ss_size;
4149 : }
4150 0 : ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4151 0 : compat_user_stack_pointer(),
4152 : COMPAT_MINSIGSTKSZ);
4153 0 : if (ret >= 0 && uoss_ptr) {
4154 0 : compat_stack_t old;
4155 0 : memset(&old, 0, sizeof(old));
4156 0 : old.ss_sp = ptr_to_compat(uoss.ss_sp);
4157 0 : old.ss_flags = uoss.ss_flags;
4158 0 : old.ss_size = uoss.ss_size;
4159 0 : if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4160 0 : ret = -EFAULT;
4161 : }
4162 : return ret;
4163 : }
4164 :
4165 0 : COMPAT_SYSCALL_DEFINE2(sigaltstack,
4166 : const compat_stack_t __user *, uss_ptr,
4167 : compat_stack_t __user *, uoss_ptr)
4168 : {
4169 0 : return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4170 : }
4171 :
4172 0 : int compat_restore_altstack(const compat_stack_t __user *uss)
4173 : {
4174 0 : int err = do_compat_sigaltstack(uss, NULL);
4175 : /* squash all but -EFAULT for now */
4176 0 : return err == -EFAULT ? err : 0;
4177 : }
4178 :
4179 0 : int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4180 : {
4181 0 : int err;
4182 0 : struct task_struct *t = current;
4183 0 : err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4184 0 : &uss->ss_sp) |
4185 0 : __put_user(t->sas_ss_flags, &uss->ss_flags) |
4186 0 : __put_user(t->sas_ss_size, &uss->ss_size);
4187 0 : if (err)
4188 : return err;
4189 0 : if (t->sas_ss_flags & SS_AUTODISARM)
4190 0 : sas_ss_reset(t);
4191 : return 0;
4192 : }
4193 : #endif
4194 :
4195 : #ifdef __ARCH_WANT_SYS_SIGPENDING
4196 :
4197 : /**
4198 : * sys_sigpending - examine pending signals
4199 : * @uset: where mask of pending signal is returned
4200 : */
4201 0 : SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4202 : {
4203 0 : sigset_t set;
4204 :
4205 0 : if (sizeof(old_sigset_t) > sizeof(*uset))
4206 : return -EINVAL;
4207 :
4208 0 : do_sigpending(&set);
4209 :
4210 0 : if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4211 0 : return -EFAULT;
4212 :
4213 : return 0;
4214 : }
4215 :
4216 : #ifdef CONFIG_COMPAT
4217 0 : COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4218 : {
4219 0 : sigset_t set;
4220 :
4221 0 : do_sigpending(&set);
4222 :
4223 0 : return put_user(set.sig[0], set32);
4224 : }
4225 : #endif
4226 :
4227 : #endif
4228 :
4229 : #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4230 : /**
4231 : * sys_sigprocmask - examine and change blocked signals
4232 : * @how: whether to add, remove, or set signals
4233 : * @nset: signals to add or remove (if non-null)
4234 : * @oset: previous value of signal mask if non-null
4235 : *
4236 : * Some platforms have their own version with special arguments;
4237 : * others support only sys_rt_sigprocmask.
4238 : */
4239 :
4240 0 : SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4241 : old_sigset_t __user *, oset)
4242 : {
4243 0 : old_sigset_t old_set, new_set;
4244 0 : sigset_t new_blocked;
4245 :
4246 0 : old_set = current->blocked.sig[0];
4247 :
4248 0 : if (nset) {
4249 0 : if (copy_from_user(&new_set, nset, sizeof(*nset)))
4250 : return -EFAULT;
4251 :
4252 0 : new_blocked = current->blocked;
4253 :
4254 0 : switch (how) {
4255 0 : case SIG_BLOCK:
4256 0 : sigaddsetmask(&new_blocked, new_set);
4257 : break;
4258 0 : case SIG_UNBLOCK:
4259 0 : sigdelsetmask(&new_blocked, new_set);
4260 : break;
4261 0 : case SIG_SETMASK:
4262 0 : new_blocked.sig[0] = new_set;
4263 0 : break;
4264 : default:
4265 : return -EINVAL;
4266 : }
4267 :
4268 0 : set_current_blocked(&new_blocked);
4269 : }
4270 :
4271 0 : if (oset) {
4272 0 : if (copy_to_user(oset, &old_set, sizeof(*oset)))
4273 0 : return -EFAULT;
4274 : }
4275 :
4276 : return 0;
4277 : }
4278 : #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4279 :
4280 : #ifndef CONFIG_ODD_RT_SIGACTION
4281 : /**
4282 : * sys_rt_sigaction - alter an action taken by a process
4283 : * @sig: signal to be sent
4284 : * @act: new sigaction
4285 : * @oact: used to save the previous sigaction
4286 : * @sigsetsize: size of sigset_t type
4287 : */
4288 7316 : SYSCALL_DEFINE4(rt_sigaction, int, sig,
4289 : const struct sigaction __user *, act,
4290 : struct sigaction __user *, oact,
4291 : size_t, sigsetsize)
4292 : {
4293 3658 : struct k_sigaction new_sa, old_sa;
4294 3658 : int ret;
4295 :
4296 : /* XXX: Don't preclude handling different sized sigset_t's. */
4297 3658 : if (sigsetsize != sizeof(sigset_t))
4298 : return -EINVAL;
4299 :
4300 6561 : if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4301 : return -EFAULT;
4302 :
4303 6694 : ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4304 3658 : if (ret)
4305 0 : return ret;
4306 :
4307 5035 : if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4308 0 : return -EFAULT;
4309 :
4310 : return 0;
4311 : }
4312 : #ifdef CONFIG_COMPAT
4313 0 : COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4314 : const struct compat_sigaction __user *, act,
4315 : struct compat_sigaction __user *, oact,
4316 : compat_size_t, sigsetsize)
4317 : {
4318 0 : struct k_sigaction new_ka, old_ka;
4319 : #ifdef __ARCH_HAS_SA_RESTORER
4320 0 : compat_uptr_t restorer;
4321 : #endif
4322 0 : int ret;
4323 :
4324 : /* XXX: Don't preclude handling different sized sigset_t's. */
4325 0 : if (sigsetsize != sizeof(compat_sigset_t))
4326 : return -EINVAL;
4327 :
4328 0 : if (act) {
4329 0 : compat_uptr_t handler;
4330 0 : ret = get_user(handler, &act->sa_handler);
4331 0 : new_ka.sa.sa_handler = compat_ptr(handler);
4332 : #ifdef __ARCH_HAS_SA_RESTORER
4333 0 : ret |= get_user(restorer, &act->sa_restorer);
4334 0 : new_ka.sa.sa_restorer = compat_ptr(restorer);
4335 : #endif
4336 0 : ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4337 0 : ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4338 0 : if (ret)
4339 : return -EFAULT;
4340 : }
4341 :
4342 0 : ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4343 0 : if (!ret && oact) {
4344 0 : ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4345 : &oact->sa_handler);
4346 0 : ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4347 : sizeof(oact->sa_mask));
4348 0 : ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4349 : #ifdef __ARCH_HAS_SA_RESTORER
4350 0 : ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4351 : &oact->sa_restorer);
4352 : #endif
4353 : }
4354 0 : return ret;
4355 : }
4356 : #endif
4357 : #endif /* !CONFIG_ODD_RT_SIGACTION */
4358 :
4359 : #ifdef CONFIG_OLD_SIGACTION
4360 : SYSCALL_DEFINE3(sigaction, int, sig,
4361 : const struct old_sigaction __user *, act,
4362 : struct old_sigaction __user *, oact)
4363 : {
4364 : struct k_sigaction new_ka, old_ka;
4365 : int ret;
4366 :
4367 : if (act) {
4368 : old_sigset_t mask;
4369 : if (!access_ok(act, sizeof(*act)) ||
4370 : __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4371 : __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4372 : __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4373 : __get_user(mask, &act->sa_mask))
4374 : return -EFAULT;
4375 : #ifdef __ARCH_HAS_KA_RESTORER
4376 : new_ka.ka_restorer = NULL;
4377 : #endif
4378 : siginitset(&new_ka.sa.sa_mask, mask);
4379 : }
4380 :
4381 : ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4382 :
4383 : if (!ret && oact) {
4384 : if (!access_ok(oact, sizeof(*oact)) ||
4385 : __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4386 : __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4387 : __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4388 : __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4389 : return -EFAULT;
4390 : }
4391 :
4392 : return ret;
4393 : }
4394 : #endif
4395 : #ifdef CONFIG_COMPAT_OLD_SIGACTION
4396 0 : COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4397 : const struct compat_old_sigaction __user *, act,
4398 : struct compat_old_sigaction __user *, oact)
4399 : {
4400 0 : struct k_sigaction new_ka, old_ka;
4401 0 : int ret;
4402 0 : compat_old_sigset_t mask;
4403 0 : compat_uptr_t handler, restorer;
4404 :
4405 0 : if (act) {
4406 0 : if (!access_ok(act, sizeof(*act)) ||
4407 0 : __get_user(handler, &act->sa_handler) ||
4408 0 : __get_user(restorer, &act->sa_restorer) ||
4409 0 : __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4410 0 : __get_user(mask, &act->sa_mask))
4411 0 : return -EFAULT;
4412 :
4413 : #ifdef __ARCH_HAS_KA_RESTORER
4414 : new_ka.ka_restorer = NULL;
4415 : #endif
4416 0 : new_ka.sa.sa_handler = compat_ptr(handler);
4417 0 : new_ka.sa.sa_restorer = compat_ptr(restorer);
4418 0 : siginitset(&new_ka.sa.sa_mask, mask);
4419 : }
4420 :
4421 0 : ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4422 :
4423 0 : if (!ret && oact) {
4424 0 : if (!access_ok(oact, sizeof(*oact)) ||
4425 0 : __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4426 0 : &oact->sa_handler) ||
4427 0 : __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4428 0 : &oact->sa_restorer) ||
4429 0 : __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4430 0 : __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4431 0 : return -EFAULT;
4432 : }
4433 0 : return ret;
4434 : }
4435 : #endif
4436 :
4437 : #ifdef CONFIG_SGETMASK_SYSCALL
4438 :
4439 : /*
4440 : * For backwards compatibility. Functionality superseded by sigprocmask.
4441 : */
4442 : SYSCALL_DEFINE0(sgetmask)
4443 : {
4444 : /* SMP safe */
4445 : return current->blocked.sig[0];
4446 : }
4447 :
4448 : SYSCALL_DEFINE1(ssetmask, int, newmask)
4449 : {
4450 : int old = current->blocked.sig[0];
4451 : sigset_t newset;
4452 :
4453 : siginitset(&newset, newmask);
4454 : set_current_blocked(&newset);
4455 :
4456 : return old;
4457 : }
4458 : #endif /* CONFIG_SGETMASK_SYSCALL */
4459 :
4460 : #ifdef __ARCH_WANT_SYS_SIGNAL
4461 : /*
4462 : * For backwards compatibility. Functionality superseded by sigaction.
4463 : */
4464 0 : SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4465 : {
4466 0 : struct k_sigaction new_sa, old_sa;
4467 0 : int ret;
4468 :
4469 0 : new_sa.sa.sa_handler = handler;
4470 0 : new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4471 0 : sigemptyset(&new_sa.sa.sa_mask);
4472 :
4473 0 : ret = do_sigaction(sig, &new_sa, &old_sa);
4474 :
4475 0 : return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4476 : }
4477 : #endif /* __ARCH_WANT_SYS_SIGNAL */
4478 :
4479 : #ifdef __ARCH_WANT_SYS_PAUSE
4480 :
4481 0 : SYSCALL_DEFINE0(pause)
4482 : {
4483 0 : while (!signal_pending(current)) {
4484 0 : __set_current_state(TASK_INTERRUPTIBLE);
4485 0 : schedule();
4486 : }
4487 0 : return -ERESTARTNOHAND;
4488 : }
4489 :
4490 : #endif
4491 :
4492 4 : static int sigsuspend(sigset_t *set)
4493 : {
4494 4 : current->saved_sigmask = current->blocked;
4495 4 : set_current_blocked(set);
4496 :
4497 7 : while (!signal_pending(current)) {
4498 4 : __set_current_state(TASK_INTERRUPTIBLE);
4499 4 : schedule();
4500 : }
4501 3 : set_restore_sigmask();
4502 3 : return -ERESTARTNOHAND;
4503 : }
4504 :
4505 : /**
4506 : * sys_rt_sigsuspend - replace the signal mask for a value with the
4507 : * @unewset value until a signal is received
4508 : * @unewset: new signal mask value
4509 : * @sigsetsize: size of sigset_t type
4510 : */
4511 8 : SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4512 : {
4513 4 : sigset_t newset;
4514 :
4515 : /* XXX: Don't preclude handling different sized sigset_t's. */
4516 4 : if (sigsetsize != sizeof(sigset_t))
4517 : return -EINVAL;
4518 :
4519 4 : if (copy_from_user(&newset, unewset, sizeof(newset)))
4520 : return -EFAULT;
4521 4 : return sigsuspend(&newset);
4522 : }
4523 :
4524 : #ifdef CONFIG_COMPAT
4525 0 : COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4526 : {
4527 0 : sigset_t newset;
4528 :
4529 : /* XXX: Don't preclude handling different sized sigset_t's. */
4530 0 : if (sigsetsize != sizeof(sigset_t))
4531 : return -EINVAL;
4532 :
4533 0 : if (get_compat_sigset(&newset, unewset))
4534 : return -EFAULT;
4535 0 : return sigsuspend(&newset);
4536 : }
4537 : #endif
4538 :
4539 : #ifdef CONFIG_OLD_SIGSUSPEND
4540 : SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4541 : {
4542 : sigset_t blocked;
4543 : siginitset(&blocked, mask);
4544 : return sigsuspend(&blocked);
4545 : }
4546 : #endif
4547 : #ifdef CONFIG_OLD_SIGSUSPEND3
4548 0 : SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4549 : {
4550 0 : sigset_t blocked;
4551 0 : siginitset(&blocked, mask);
4552 0 : return sigsuspend(&blocked);
4553 : }
4554 : #endif
4555 :
4556 0 : __weak const char *arch_vma_name(struct vm_area_struct *vma)
4557 : {
4558 0 : return NULL;
4559 : }
4560 :
4561 1 : static inline void siginfo_buildtime_checks(void)
4562 : {
4563 1 : BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4564 :
4565 : /* Verify the offsets in the two siginfos match */
4566 : #define CHECK_OFFSET(field) \
4567 : BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4568 :
4569 : /* kill */
4570 1 : CHECK_OFFSET(si_pid);
4571 1 : CHECK_OFFSET(si_uid);
4572 :
4573 : /* timer */
4574 1 : CHECK_OFFSET(si_tid);
4575 1 : CHECK_OFFSET(si_overrun);
4576 1 : CHECK_OFFSET(si_value);
4577 :
4578 : /* rt */
4579 1 : CHECK_OFFSET(si_pid);
4580 1 : CHECK_OFFSET(si_uid);
4581 1 : CHECK_OFFSET(si_value);
4582 :
4583 : /* sigchld */
4584 1 : CHECK_OFFSET(si_pid);
4585 1 : CHECK_OFFSET(si_uid);
4586 1 : CHECK_OFFSET(si_status);
4587 1 : CHECK_OFFSET(si_utime);
4588 1 : CHECK_OFFSET(si_stime);
4589 :
4590 : /* sigfault */
4591 1 : CHECK_OFFSET(si_addr);
4592 1 : CHECK_OFFSET(si_addr_lsb);
4593 1 : CHECK_OFFSET(si_lower);
4594 1 : CHECK_OFFSET(si_upper);
4595 1 : CHECK_OFFSET(si_pkey);
4596 :
4597 : /* sigpoll */
4598 1 : CHECK_OFFSET(si_band);
4599 1 : CHECK_OFFSET(si_fd);
4600 :
4601 : /* sigsys */
4602 1 : CHECK_OFFSET(si_call_addr);
4603 1 : CHECK_OFFSET(si_syscall);
4604 1 : CHECK_OFFSET(si_arch);
4605 : #undef CHECK_OFFSET
4606 :
4607 : /* usb asyncio */
4608 1 : BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4609 : offsetof(struct siginfo, si_addr));
4610 1 : if (sizeof(int) == sizeof(void __user *)) {
4611 : BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4612 : sizeof(void __user *));
4613 : } else {
4614 1 : BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4615 : sizeof_field(struct siginfo, si_uid)) !=
4616 : sizeof(void __user *));
4617 1 : BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4618 : offsetof(struct siginfo, si_uid));
4619 : }
4620 : #ifdef CONFIG_COMPAT
4621 1 : BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4622 : offsetof(struct compat_siginfo, si_addr));
4623 1 : BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4624 : sizeof(compat_uptr_t));
4625 1 : BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4626 : sizeof_field(struct siginfo, si_pid));
4627 : #endif
4628 : }
4629 :
4630 1 : void __init signals_init(void)
4631 : {
4632 1 : siginfo_buildtime_checks();
4633 :
4634 1 : sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4635 1 : }
4636 :
4637 : #ifdef CONFIG_KGDB_KDB
4638 : #include <linux/kdb.h>
4639 : /*
4640 : * kdb_send_sig - Allows kdb to send signals without exposing
4641 : * signal internals. This function checks if the required locks are
4642 : * available before calling the main signal code, to avoid kdb
4643 : * deadlocks.
4644 : */
4645 : void kdb_send_sig(struct task_struct *t, int sig)
4646 : {
4647 : static struct task_struct *kdb_prev_t;
4648 : int new_t, ret;
4649 : if (!spin_trylock(&t->sighand->siglock)) {
4650 : kdb_printf("Can't do kill command now.\n"
4651 : "The sigmask lock is held somewhere else in "
4652 : "kernel, try again later\n");
4653 : return;
4654 : }
4655 : new_t = kdb_prev_t != t;
4656 : kdb_prev_t = t;
4657 : if (t->state != TASK_RUNNING && new_t) {
4658 : spin_unlock(&t->sighand->siglock);
4659 : kdb_printf("Process is not RUNNING, sending a signal from "
4660 : "kdb risks deadlock\n"
4661 : "on the run queue locks. "
4662 : "The signal has _not_ been sent.\n"
4663 : "Reissue the kill command if you want to risk "
4664 : "the deadlock.\n");
4665 : return;
4666 : }
4667 : ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4668 : spin_unlock(&t->sighand->siglock);
4669 : if (ret)
4670 : kdb_printf("Fail to deliver Signal %d to process %d.\n",
4671 : sig, t->pid);
4672 : else
4673 : kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4674 : }
4675 : #endif /* CONFIG_KGDB_KDB */
|