Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_PTRACE_H
3 : #define _LINUX_PTRACE_H
4 :
5 : #include <linux/compiler.h> /* For unlikely. */
6 : #include <linux/sched.h> /* For struct task_struct. */
7 : #include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */
8 : #include <linux/err.h> /* for IS_ERR_VALUE */
9 : #include <linux/bug.h> /* For BUG_ON. */
10 : #include <linux/pid_namespace.h> /* For task_active_pid_ns. */
11 : #include <uapi/linux/ptrace.h>
12 : #include <linux/seccomp.h>
13 :
14 : /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
15 : struct syscall_info {
16 : __u64 sp;
17 : struct seccomp_data data;
18 : };
19 :
20 : extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
21 : void *buf, int len, unsigned int gup_flags);
22 :
23 : /*
24 : * Ptrace flags
25 : *
26 : * The owner ship rules for task->ptrace which holds the ptrace
27 : * flags is simple. When a task is running it owns it's task->ptrace
28 : * flags. When the a task is stopped the ptracer owns task->ptrace.
29 : */
30 :
31 : #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
32 : #define PT_PTRACED 0x00000001
33 : #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
34 :
35 : #define PT_OPT_FLAG_SHIFT 3
36 : /* PT_TRACE_* event enable flags */
37 : #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
38 : #define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
39 : #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
40 : #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
41 : #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
42 : #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
43 : #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
44 : #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
45 : #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
46 :
47 : #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
48 : #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
49 :
50 : /* single stepping state bits (used on ARM and PA-RISC) */
51 : #define PT_SINGLESTEP_BIT 31
52 : #define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
53 : #define PT_BLOCKSTEP_BIT 30
54 : #define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
55 :
56 : extern long arch_ptrace(struct task_struct *child, long request,
57 : unsigned long addr, unsigned long data);
58 : extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
59 : extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
60 : extern void ptrace_disable(struct task_struct *);
61 : extern int ptrace_request(struct task_struct *child, long request,
62 : unsigned long addr, unsigned long data);
63 : extern void ptrace_notify(int exit_code);
64 : extern void __ptrace_link(struct task_struct *child,
65 : struct task_struct *new_parent,
66 : const struct cred *ptracer_cred);
67 : extern void __ptrace_unlink(struct task_struct *child);
68 : extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
69 : #define PTRACE_MODE_READ 0x01
70 : #define PTRACE_MODE_ATTACH 0x02
71 : #define PTRACE_MODE_NOAUDIT 0x04
72 : #define PTRACE_MODE_FSCREDS 0x08
73 : #define PTRACE_MODE_REALCREDS 0x10
74 :
75 : /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
76 : #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
77 : #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
78 : #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
79 : #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
80 :
81 : /**
82 : * ptrace_may_access - check whether the caller is permitted to access
83 : * a target task.
84 : * @task: target task
85 : * @mode: selects type of access and caller credentials
86 : *
87 : * Returns true on success, false on denial.
88 : *
89 : * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
90 : * be set in @mode to specify whether the access was requested through
91 : * a filesystem syscall (should use effective capabilities and fsuid
92 : * of the caller) or through an explicit syscall such as
93 : * process_vm_writev or ptrace (and should use the real credentials).
94 : */
95 : extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
96 :
97 0 : static inline int ptrace_reparented(struct task_struct *child)
98 : {
99 0 : return !same_thread_group(child->real_parent, child->parent);
100 : }
101 :
102 0 : static inline void ptrace_unlink(struct task_struct *child)
103 : {
104 0 : if (unlikely(child->ptrace))
105 0 : __ptrace_unlink(child);
106 : }
107 :
108 : int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
109 : unsigned long data);
110 : int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
111 : unsigned long data);
112 :
113 : /**
114 : * ptrace_parent - return the task that is tracing the given task
115 : * @task: task to consider
116 : *
117 : * Returns %NULL if no one is tracing @task, or the &struct task_struct
118 : * pointer to its tracer.
119 : *
120 : * Must called under rcu_read_lock(). The pointer returned might be kept
121 : * live only by RCU. During exec, this may be called with task_lock() held
122 : * on @task, still held from when check_unsafe_exec() was called.
123 : */
124 56 : static inline struct task_struct *ptrace_parent(struct task_struct *task)
125 : {
126 56 : if (unlikely(task->ptrace))
127 0 : return rcu_dereference(task->parent);
128 : return NULL;
129 : }
130 :
131 : /**
132 : * ptrace_event_enabled - test whether a ptrace event is enabled
133 : * @task: ptracee of interest
134 : * @event: %PTRACE_EVENT_* to test
135 : *
136 : * Test whether @event is enabled for ptracee @task.
137 : *
138 : * Returns %true if @event is enabled, %false otherwise.
139 : */
140 1884 : static inline bool ptrace_event_enabled(struct task_struct *task, int event)
141 : {
142 1884 : return task->ptrace & PT_EVENT_FLAG(event);
143 : }
144 :
145 : /**
146 : * ptrace_event - possibly stop for a ptrace event notification
147 : * @event: %PTRACE_EVENT_* value to report
148 : * @message: value for %PTRACE_GETEVENTMSG to return
149 : *
150 : * Check whether @event is enabled and, if so, report @event and @message
151 : * to the ptrace parent.
152 : *
153 : * Called without locks.
154 : */
155 1021 : static inline void ptrace_event(int event, unsigned long message)
156 : {
157 1021 : if (unlikely(ptrace_event_enabled(current, event))) {
158 0 : current->ptrace_message = message;
159 0 : ptrace_notify((event << 8) | SIGTRAP);
160 1021 : } else if (event == PTRACE_EVENT_EXEC) {
161 : /* legacy EXEC report via SIGTRAP */
162 1021 : if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
163 0 : send_sig(SIGTRAP, current, 0);
164 : }
165 1021 : }
166 :
167 : /**
168 : * ptrace_event_pid - possibly stop for a ptrace event notification
169 : * @event: %PTRACE_EVENT_* value to report
170 : * @pid: process identifier for %PTRACE_GETEVENTMSG to return
171 : *
172 : * Check whether @event is enabled and, if so, report @event and @pid
173 : * to the ptrace parent. @pid is reported as the pid_t seen from the
174 : * ptrace parent's pid namespace.
175 : *
176 : * Called without locks.
177 : */
178 0 : static inline void ptrace_event_pid(int event, struct pid *pid)
179 : {
180 : /*
181 : * FIXME: There's a potential race if a ptracer in a different pid
182 : * namespace than parent attaches between computing message below and
183 : * when we acquire tasklist_lock in ptrace_stop(). If this happens,
184 : * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
185 : */
186 0 : unsigned long message = 0;
187 0 : struct pid_namespace *ns;
188 :
189 0 : rcu_read_lock();
190 0 : ns = task_active_pid_ns(rcu_dereference(current->parent));
191 0 : if (ns)
192 0 : message = pid_nr_ns(pid, ns);
193 0 : rcu_read_unlock();
194 :
195 0 : ptrace_event(event, message);
196 0 : }
197 :
198 : /**
199 : * ptrace_init_task - initialize ptrace state for a new child
200 : * @child: new child task
201 : * @ptrace: true if child should be ptrace'd by parent's tracer
202 : *
203 : * This is called immediately after adding @child to its parent's children
204 : * list. @ptrace is false in the normal case, and true to ptrace @child.
205 : *
206 : * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
207 : */
208 913 : static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
209 : {
210 913 : INIT_LIST_HEAD(&child->ptrace_entry);
211 913 : INIT_LIST_HEAD(&child->ptraced);
212 913 : child->jobctl = 0;
213 913 : child->ptrace = 0;
214 913 : child->parent = child->real_parent;
215 :
216 913 : if (unlikely(ptrace) && current->ptrace) {
217 0 : child->ptrace = current->ptrace;
218 0 : __ptrace_link(child, current->parent, current->ptracer_cred);
219 :
220 0 : if (child->ptrace & PT_SEIZED)
221 0 : task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
222 : else
223 0 : sigaddset(&child->pending.signal, SIGSTOP);
224 : }
225 : else
226 913 : child->ptracer_cred = NULL;
227 913 : }
228 :
229 : /**
230 : * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
231 : * @task: task in %EXIT_DEAD state
232 : *
233 : * Called with write_lock(&tasklist_lock) held.
234 : */
235 0 : static inline void ptrace_release_task(struct task_struct *task)
236 : {
237 0 : BUG_ON(!list_empty(&task->ptraced));
238 0 : ptrace_unlink(task);
239 0 : BUG_ON(!list_empty(&task->ptrace_entry));
240 0 : }
241 :
242 : #ifndef force_successful_syscall_return
243 : /*
244 : * System call handlers that, upon successful completion, need to return a
245 : * negative value should call force_successful_syscall_return() right before
246 : * returning. On architectures where the syscall convention provides for a
247 : * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
248 : * others), this macro can be used to ensure that the error flag will not get
249 : * set. On architectures which do not support a separate error flag, the macro
250 : * is a no-op and the spurious error condition needs to be filtered out by some
251 : * other means (e.g., in user-level, by passing an extra argument to the
252 : * syscall handler, or something along those lines).
253 : */
254 : #define force_successful_syscall_return() do { } while (0)
255 : #endif
256 :
257 : #ifndef is_syscall_success
258 : /*
259 : * On most systems we can tell if a syscall is a success based on if the retval
260 : * is an error value. On some systems like ia64 and powerpc they have different
261 : * indicators of success/failure and must define their own.
262 : */
263 : #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
264 : #endif
265 :
266 : /*
267 : * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
268 : *
269 : * These do-nothing inlines are used when the arch does not
270 : * implement single-step. The kerneldoc comments are here
271 : * to document the interface for all arch definitions.
272 : */
273 :
274 : #ifndef arch_has_single_step
275 : /**
276 : * arch_has_single_step - does this CPU support user-mode single-step?
277 : *
278 : * If this is defined, then there must be function declarations or
279 : * inlines for user_enable_single_step() and user_disable_single_step().
280 : * arch_has_single_step() should evaluate to nonzero iff the machine
281 : * supports instruction single-step for user mode.
282 : * It can be a constant or it can test a CPU feature bit.
283 : */
284 : #define arch_has_single_step() (0)
285 :
286 : /**
287 : * user_enable_single_step - single-step in user-mode task
288 : * @task: either current or a task stopped in %TASK_TRACED
289 : *
290 : * This can only be called when arch_has_single_step() has returned nonzero.
291 : * Set @task so that when it returns to user mode, it will trap after the
292 : * next single instruction executes. If arch_has_block_step() is defined,
293 : * this must clear the effects of user_enable_block_step() too.
294 : */
295 : static inline void user_enable_single_step(struct task_struct *task)
296 : {
297 : BUG(); /* This can never be called. */
298 : }
299 :
300 : /**
301 : * user_disable_single_step - cancel user-mode single-step
302 : * @task: either current or a task stopped in %TASK_TRACED
303 : *
304 : * Clear @task of the effects of user_enable_single_step() and
305 : * user_enable_block_step(). This can be called whether or not either
306 : * of those was ever called on @task, and even if arch_has_single_step()
307 : * returned zero.
308 : */
309 : static inline void user_disable_single_step(struct task_struct *task)
310 : {
311 : }
312 : #else
313 : extern void user_enable_single_step(struct task_struct *);
314 : extern void user_disable_single_step(struct task_struct *);
315 : #endif /* arch_has_single_step */
316 :
317 : #ifndef arch_has_block_step
318 : /**
319 : * arch_has_block_step - does this CPU support user-mode block-step?
320 : *
321 : * If this is defined, then there must be a function declaration or inline
322 : * for user_enable_block_step(), and arch_has_single_step() must be defined
323 : * too. arch_has_block_step() should evaluate to nonzero iff the machine
324 : * supports step-until-branch for user mode. It can be a constant or it
325 : * can test a CPU feature bit.
326 : */
327 : #define arch_has_block_step() (0)
328 :
329 : /**
330 : * user_enable_block_step - step until branch in user-mode task
331 : * @task: either current or a task stopped in %TASK_TRACED
332 : *
333 : * This can only be called when arch_has_block_step() has returned nonzero,
334 : * and will never be called when single-instruction stepping is being used.
335 : * Set @task so that when it returns to user mode, it will trap after the
336 : * next branch or trap taken.
337 : */
338 : static inline void user_enable_block_step(struct task_struct *task)
339 : {
340 : BUG(); /* This can never be called. */
341 : }
342 : #else
343 : extern void user_enable_block_step(struct task_struct *);
344 : #endif /* arch_has_block_step */
345 :
346 : #ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT
347 : extern void user_single_step_report(struct pt_regs *regs);
348 : #else
349 : static inline void user_single_step_report(struct pt_regs *regs)
350 : {
351 : kernel_siginfo_t info;
352 : clear_siginfo(&info);
353 : info.si_signo = SIGTRAP;
354 : info.si_errno = 0;
355 : info.si_code = SI_USER;
356 : info.si_pid = 0;
357 : info.si_uid = 0;
358 : force_sig_info(&info);
359 : }
360 : #endif
361 :
362 : #ifndef arch_ptrace_stop_needed
363 : /**
364 : * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
365 : * @code: current->exit_code value ptrace will stop with
366 : * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
367 : *
368 : * This is called with the siglock held, to decide whether or not it's
369 : * necessary to release the siglock and call arch_ptrace_stop() with the
370 : * same @code and @info arguments. It can be defined to a constant if
371 : * arch_ptrace_stop() is never required, or always is. On machines where
372 : * this makes sense, it should be defined to a quick test to optimize out
373 : * calling arch_ptrace_stop() when it would be superfluous. For example,
374 : * if the thread has not been back to user mode since the last stop, the
375 : * thread state might indicate that nothing needs to be done.
376 : *
377 : * This is guaranteed to be invoked once before a task stops for ptrace and
378 : * may include arch-specific operations necessary prior to a ptrace stop.
379 : */
380 : #define arch_ptrace_stop_needed(code, info) (0)
381 : #endif
382 :
383 : #ifndef arch_ptrace_stop
384 : /**
385 : * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
386 : * @code: current->exit_code value ptrace will stop with
387 : * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
388 : *
389 : * This is called with no locks held when arch_ptrace_stop_needed() has
390 : * just returned nonzero. It is allowed to block, e.g. for user memory
391 : * access. The arch can have machine-specific work to be done before
392 : * ptrace stops. On ia64, register backing store gets written back to user
393 : * memory here. Since this can be costly (requires dropping the siglock),
394 : * we only do it when the arch requires it for this particular stop, as
395 : * indicated by arch_ptrace_stop_needed().
396 : */
397 : #define arch_ptrace_stop(code, info) do { } while (0)
398 : #endif
399 :
400 : #ifndef current_pt_regs
401 : #define current_pt_regs() task_pt_regs(current)
402 : #endif
403 :
404 : /*
405 : * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
406 : * on *all* architectures; the only reason to have a per-arch definition
407 : * is optimisation.
408 : */
409 : #ifndef signal_pt_regs
410 : #define signal_pt_regs() task_pt_regs(current)
411 : #endif
412 :
413 : #ifndef current_user_stack_pointer
414 : #define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
415 : #endif
416 :
417 : extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
418 :
419 : extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
420 : #endif
|