Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-or-later */
2 : #ifndef _LINUX_KPROBES_H
3 : #define _LINUX_KPROBES_H
4 : /*
5 : * Kernel Probes (KProbes)
6 : * include/linux/kprobes.h
7 : *
8 : * Copyright (C) IBM Corporation, 2002, 2004
9 : *
10 : * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
11 : * Probes initial implementation ( includes suggestions from
12 : * Rusty Russell).
13 : * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14 : * interface to access function arguments.
15 : * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston
16 : * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
17 : * <prasanna@in.ibm.com> added function-return probes.
18 : */
19 : #include <linux/compiler.h>
20 : #include <linux/linkage.h>
21 : #include <linux/list.h>
22 : #include <linux/notifier.h>
23 : #include <linux/smp.h>
24 : #include <linux/bug.h>
25 : #include <linux/percpu.h>
26 : #include <linux/spinlock.h>
27 : #include <linux/rcupdate.h>
28 : #include <linux/mutex.h>
29 : #include <linux/ftrace.h>
30 : #include <linux/refcount.h>
31 : #include <linux/freelist.h>
32 : #include <asm/kprobes.h>
33 :
34 : #ifdef CONFIG_KPROBES
35 :
36 : /* kprobe_status settings */
37 : #define KPROBE_HIT_ACTIVE 0x00000001
38 : #define KPROBE_HIT_SS 0x00000002
39 : #define KPROBE_REENTER 0x00000004
40 : #define KPROBE_HIT_SSDONE 0x00000008
41 :
42 : #else /* CONFIG_KPROBES */
43 : #include <asm-generic/kprobes.h>
44 : typedef int kprobe_opcode_t;
45 : struct arch_specific_insn {
46 : int dummy;
47 : };
48 : #endif /* CONFIG_KPROBES */
49 :
50 : struct kprobe;
51 : struct pt_regs;
52 : struct kretprobe;
53 : struct kretprobe_instance;
54 : typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
55 : typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
56 : unsigned long flags);
57 : typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
58 : int trapnr);
59 : typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
60 : struct pt_regs *);
61 :
62 : struct kprobe {
63 : struct hlist_node hlist;
64 :
65 : /* list of kprobes for multi-handler support */
66 : struct list_head list;
67 :
68 : /*count the number of times this probe was temporarily disarmed */
69 : unsigned long nmissed;
70 :
71 : /* location of the probe point */
72 : kprobe_opcode_t *addr;
73 :
74 : /* Allow user to indicate symbol name of the probe point */
75 : const char *symbol_name;
76 :
77 : /* Offset into the symbol */
78 : unsigned int offset;
79 :
80 : /* Called before addr is executed. */
81 : kprobe_pre_handler_t pre_handler;
82 :
83 : /* Called after addr is executed, unless... */
84 : kprobe_post_handler_t post_handler;
85 :
86 : /*
87 : * ... called if executing addr causes a fault (eg. page fault).
88 : * Return 1 if it handled fault, otherwise kernel will see it.
89 : */
90 : kprobe_fault_handler_t fault_handler;
91 :
92 : /* Saved opcode (which has been replaced with breakpoint) */
93 : kprobe_opcode_t opcode;
94 :
95 : /* copy of the original instruction */
96 : struct arch_specific_insn ainsn;
97 :
98 : /*
99 : * Indicates various status flags.
100 : * Protected by kprobe_mutex after this kprobe is registered.
101 : */
102 : u32 flags;
103 : };
104 :
105 : /* Kprobe status flags */
106 : #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
107 : #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
108 : #define KPROBE_FLAG_OPTIMIZED 4 /*
109 : * probe is really optimized.
110 : * NOTE:
111 : * this flag is only for optimized_kprobe.
112 : */
113 : #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
114 :
115 : /* Has this kprobe gone ? */
116 : static inline int kprobe_gone(struct kprobe *p)
117 : {
118 : return p->flags & KPROBE_FLAG_GONE;
119 : }
120 :
121 : /* Is this kprobe disabled ? */
122 : static inline int kprobe_disabled(struct kprobe *p)
123 : {
124 : return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
125 : }
126 :
127 : /* Is this kprobe really running optimized path ? */
128 : static inline int kprobe_optimized(struct kprobe *p)
129 : {
130 : return p->flags & KPROBE_FLAG_OPTIMIZED;
131 : }
132 :
133 : /* Is this kprobe uses ftrace ? */
134 : static inline int kprobe_ftrace(struct kprobe *p)
135 : {
136 : return p->flags & KPROBE_FLAG_FTRACE;
137 : }
138 :
139 : /*
140 : * Function-return probe -
141 : * Note:
142 : * User needs to provide a handler function, and initialize maxactive.
143 : * maxactive - The maximum number of instances of the probed function that
144 : * can be active concurrently.
145 : * nmissed - tracks the number of times the probed function's return was
146 : * ignored, due to maxactive being too low.
147 : *
148 : */
149 : struct kretprobe_holder {
150 : struct kretprobe *rp;
151 : refcount_t ref;
152 : };
153 :
154 : struct kretprobe {
155 : struct kprobe kp;
156 : kretprobe_handler_t handler;
157 : kretprobe_handler_t entry_handler;
158 : int maxactive;
159 : int nmissed;
160 : size_t data_size;
161 : struct freelist_head freelist;
162 : struct kretprobe_holder *rph;
163 : };
164 :
165 : struct kretprobe_instance {
166 : union {
167 : struct freelist_node freelist;
168 : struct rcu_head rcu;
169 : };
170 : struct llist_node llist;
171 : struct kretprobe_holder *rph;
172 : kprobe_opcode_t *ret_addr;
173 : void *fp;
174 : char data[];
175 : };
176 :
177 : struct kretprobe_blackpoint {
178 : const char *name;
179 : void *addr;
180 : };
181 :
182 : struct kprobe_blacklist_entry {
183 : struct list_head list;
184 : unsigned long start_addr;
185 : unsigned long end_addr;
186 : };
187 :
188 : #ifdef CONFIG_KPROBES
189 : DECLARE_PER_CPU(struct kprobe *, current_kprobe);
190 : DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
191 :
192 : /*
193 : * For #ifdef avoidance:
194 : */
195 : static inline int kprobes_built_in(void)
196 : {
197 : return 1;
198 : }
199 :
200 : extern void kprobe_busy_begin(void);
201 : extern void kprobe_busy_end(void);
202 :
203 : #ifdef CONFIG_KRETPROBES
204 : extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
205 : struct pt_regs *regs);
206 : extern int arch_trampoline_kprobe(struct kprobe *p);
207 :
208 : /* If the trampoline handler called from a kprobe, use this version */
209 : unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
210 : void *trampoline_address,
211 : void *frame_pointer);
212 :
213 : static nokprobe_inline
214 : unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
215 : void *trampoline_address,
216 : void *frame_pointer)
217 : {
218 : unsigned long ret;
219 : /*
220 : * Set a dummy kprobe for avoiding kretprobe recursion.
221 : * Since kretprobe never runs in kprobe handler, no kprobe must
222 : * be running at this point.
223 : */
224 : kprobe_busy_begin();
225 : ret = __kretprobe_trampoline_handler(regs, trampoline_address, frame_pointer);
226 : kprobe_busy_end();
227 :
228 : return ret;
229 : }
230 :
231 : static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
232 : {
233 : RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
234 : "Kretprobe is accessed from instance under preemptive context");
235 :
236 : return READ_ONCE(ri->rph->rp);
237 : }
238 :
239 : #else /* CONFIG_KRETPROBES */
240 : static inline void arch_prepare_kretprobe(struct kretprobe *rp,
241 : struct pt_regs *regs)
242 : {
243 : }
244 : static inline int arch_trampoline_kprobe(struct kprobe *p)
245 : {
246 : return 0;
247 : }
248 : #endif /* CONFIG_KRETPROBES */
249 :
250 : extern struct kretprobe_blackpoint kretprobe_blacklist[];
251 :
252 : #ifdef CONFIG_KPROBES_SANITY_TEST
253 : extern int init_test_probes(void);
254 : #else
255 : static inline int init_test_probes(void)
256 : {
257 : return 0;
258 : }
259 : #endif /* CONFIG_KPROBES_SANITY_TEST */
260 :
261 : extern int arch_prepare_kprobe(struct kprobe *p);
262 : extern void arch_arm_kprobe(struct kprobe *p);
263 : extern void arch_disarm_kprobe(struct kprobe *p);
264 : extern int arch_init_kprobes(void);
265 : extern void kprobes_inc_nmissed_count(struct kprobe *p);
266 : extern bool arch_within_kprobe_blacklist(unsigned long addr);
267 : extern int arch_populate_kprobe_blacklist(void);
268 : extern bool arch_kprobe_on_func_entry(unsigned long offset);
269 : extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
270 :
271 : extern bool within_kprobe_blacklist(unsigned long addr);
272 : extern int kprobe_add_ksym_blacklist(unsigned long entry);
273 : extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
274 :
275 : struct kprobe_insn_cache {
276 : struct mutex mutex;
277 : void *(*alloc)(void); /* allocate insn page */
278 : void (*free)(void *); /* free insn page */
279 : const char *sym; /* symbol for insn pages */
280 : struct list_head pages; /* list of kprobe_insn_page */
281 : size_t insn_size; /* size of instruction slot */
282 : int nr_garbage;
283 : };
284 :
285 : #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
286 : extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
287 : extern void __free_insn_slot(struct kprobe_insn_cache *c,
288 : kprobe_opcode_t *slot, int dirty);
289 : /* sleep-less address checking routine */
290 : extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
291 : unsigned long addr);
292 :
293 : #define DEFINE_INSN_CACHE_OPS(__name) \
294 : extern struct kprobe_insn_cache kprobe_##__name##_slots; \
295 : \
296 : static inline kprobe_opcode_t *get_##__name##_slot(void) \
297 : { \
298 : return __get_insn_slot(&kprobe_##__name##_slots); \
299 : } \
300 : \
301 : static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
302 : { \
303 : __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
304 : } \
305 : \
306 : static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
307 : { \
308 : return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \
309 : }
310 : #define KPROBE_INSN_PAGE_SYM "kprobe_insn_page"
311 : #define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
312 : int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
313 : unsigned long *value, char *type, char *sym);
314 : #else /* __ARCH_WANT_KPROBES_INSN_SLOT */
315 : #define DEFINE_INSN_CACHE_OPS(__name) \
316 : static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
317 : { \
318 : return 0; \
319 : }
320 : #endif
321 :
322 : DEFINE_INSN_CACHE_OPS(insn);
323 :
324 : #ifdef CONFIG_OPTPROBES
325 : /*
326 : * Internal structure for direct jump optimized probe
327 : */
328 : struct optimized_kprobe {
329 : struct kprobe kp;
330 : struct list_head list; /* list for optimizing queue */
331 : struct arch_optimized_insn optinsn;
332 : };
333 :
334 : /* Architecture dependent functions for direct jump optimization */
335 : extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
336 : extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
337 : extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
338 : struct kprobe *orig);
339 : extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
340 : extern void arch_optimize_kprobes(struct list_head *oplist);
341 : extern void arch_unoptimize_kprobes(struct list_head *oplist,
342 : struct list_head *done_list);
343 : extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
344 : extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
345 : unsigned long addr);
346 :
347 : extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
348 :
349 : DEFINE_INSN_CACHE_OPS(optinsn);
350 :
351 : #ifdef CONFIG_SYSCTL
352 : extern int sysctl_kprobes_optimization;
353 : extern int proc_kprobes_optimization_handler(struct ctl_table *table,
354 : int write, void *buffer,
355 : size_t *length, loff_t *ppos);
356 : #endif
357 : extern void wait_for_kprobe_optimizer(void);
358 : #else
359 : static inline void wait_for_kprobe_optimizer(void) { }
360 : #endif /* CONFIG_OPTPROBES */
361 : #ifdef CONFIG_KPROBES_ON_FTRACE
362 : extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
363 : struct ftrace_ops *ops, struct ftrace_regs *fregs);
364 : extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
365 : #endif
366 :
367 : int arch_check_ftrace_location(struct kprobe *p);
368 :
369 : /* Get the kprobe at this addr (if any) - called with preemption disabled */
370 : struct kprobe *get_kprobe(void *addr);
371 :
372 : /* kprobe_running() will just return the current_kprobe on this CPU */
373 : static inline struct kprobe *kprobe_running(void)
374 : {
375 : return (__this_cpu_read(current_kprobe));
376 : }
377 :
378 : static inline void reset_current_kprobe(void)
379 : {
380 : __this_cpu_write(current_kprobe, NULL);
381 : }
382 :
383 : static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
384 : {
385 : return this_cpu_ptr(&kprobe_ctlblk);
386 : }
387 :
388 : kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
389 : int register_kprobe(struct kprobe *p);
390 : void unregister_kprobe(struct kprobe *p);
391 : int register_kprobes(struct kprobe **kps, int num);
392 : void unregister_kprobes(struct kprobe **kps, int num);
393 : unsigned long arch_deref_entry_point(void *);
394 :
395 : int register_kretprobe(struct kretprobe *rp);
396 : void unregister_kretprobe(struct kretprobe *rp);
397 : int register_kretprobes(struct kretprobe **rps, int num);
398 : void unregister_kretprobes(struct kretprobe **rps, int num);
399 :
400 : void kprobe_flush_task(struct task_struct *tk);
401 :
402 : void kprobe_free_init_mem(void);
403 :
404 : int disable_kprobe(struct kprobe *kp);
405 : int enable_kprobe(struct kprobe *kp);
406 :
407 : void dump_kprobe(struct kprobe *kp);
408 :
409 : void *alloc_insn_page(void);
410 : void free_insn_page(void *page);
411 :
412 : int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
413 : char *sym);
414 :
415 : int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
416 : char *type, char *sym);
417 : #else /* !CONFIG_KPROBES: */
418 :
419 295861 : static inline int kprobes_built_in(void)
420 : {
421 295861 : return 0;
422 : }
423 0 : static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
424 : {
425 0 : return 0;
426 : }
427 : static inline struct kprobe *get_kprobe(void *addr)
428 : {
429 : return NULL;
430 : }
431 0 : static inline struct kprobe *kprobe_running(void)
432 : {
433 0 : return NULL;
434 : }
435 : static inline int register_kprobe(struct kprobe *p)
436 : {
437 : return -ENOSYS;
438 : }
439 : static inline int register_kprobes(struct kprobe **kps, int num)
440 : {
441 : return -ENOSYS;
442 : }
443 : static inline void unregister_kprobe(struct kprobe *p)
444 : {
445 : }
446 : static inline void unregister_kprobes(struct kprobe **kps, int num)
447 : {
448 : }
449 : static inline int register_kretprobe(struct kretprobe *rp)
450 : {
451 : return -ENOSYS;
452 : }
453 : static inline int register_kretprobes(struct kretprobe **rps, int num)
454 : {
455 : return -ENOSYS;
456 : }
457 : static inline void unregister_kretprobe(struct kretprobe *rp)
458 : {
459 : }
460 : static inline void unregister_kretprobes(struct kretprobe **rps, int num)
461 : {
462 : }
463 900 : static inline void kprobe_flush_task(struct task_struct *tk)
464 : {
465 900 : }
466 1 : static inline void kprobe_free_init_mem(void)
467 : {
468 1 : }
469 : static inline int disable_kprobe(struct kprobe *kp)
470 : {
471 : return -ENOSYS;
472 : }
473 : static inline int enable_kprobe(struct kprobe *kp)
474 : {
475 : return -ENOSYS;
476 : }
477 :
478 : static inline bool within_kprobe_blacklist(unsigned long addr)
479 : {
480 : return true;
481 : }
482 0 : static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
483 : char *type, char *sym)
484 : {
485 0 : return -ERANGE;
486 : }
487 : #endif /* CONFIG_KPROBES */
488 : static inline int disable_kretprobe(struct kretprobe *rp)
489 : {
490 : return disable_kprobe(&rp->kp);
491 : }
492 : static inline int enable_kretprobe(struct kretprobe *rp)
493 : {
494 : return enable_kprobe(&rp->kp);
495 : }
496 :
497 : #ifndef CONFIG_KPROBES
498 2634183 : static inline bool is_kprobe_insn_slot(unsigned long addr)
499 : {
500 2634183 : return false;
501 : }
502 : #endif
503 : #ifndef CONFIG_OPTPROBES
504 2634183 : static inline bool is_kprobe_optinsn_slot(unsigned long addr)
505 : {
506 2634183 : return false;
507 : }
508 : #endif
509 :
510 : /* Returns true if kprobes handled the fault */
511 295861 : static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
512 : unsigned int trap)
513 : {
514 295861 : if (!kprobes_built_in())
515 295861 : return false;
516 : if (user_mode(regs))
517 : return false;
518 : /*
519 : * To be potentially processing a kprobe fault and to be allowed
520 : * to call kprobe_running(), we have to be non-preemptible.
521 : */
522 : if (preemptible())
523 : return false;
524 : if (!kprobe_running())
525 : return false;
526 : return kprobe_fault_handler(regs, trap);
527 : }
528 :
529 : #endif /* _LINUX_KPROBES_H */
|