Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Ftrace header. For implementation details beyond the random comments
4 : * scattered below, see: Documentation/trace/ftrace-design.rst
5 : */
6 :
7 : #ifndef _LINUX_FTRACE_H
8 : #define _LINUX_FTRACE_H
9 :
10 : #include <linux/trace_recursion.h>
11 : #include <linux/trace_clock.h>
12 : #include <linux/kallsyms.h>
13 : #include <linux/linkage.h>
14 : #include <linux/bitops.h>
15 : #include <linux/ptrace.h>
16 : #include <linux/ktime.h>
17 : #include <linux/sched.h>
18 : #include <linux/types.h>
19 : #include <linux/init.h>
20 : #include <linux/fs.h>
21 :
22 : #include <asm/ftrace.h>
23 :
24 : /*
25 : * If the arch supports passing the variable contents of
26 : * function_trace_op as the third parameter back from the
27 : * mcount call, then the arch should define this as 1.
28 : */
29 : #ifndef ARCH_SUPPORTS_FTRACE_OPS
30 : #define ARCH_SUPPORTS_FTRACE_OPS 0
31 : #endif
32 :
33 : /*
34 : * If the arch's mcount caller does not support all of ftrace's
35 : * features, then it must call an indirect function that
36 : * does. Or at least does enough to prevent any unwelcomed side effects.
37 : */
38 : #if !ARCH_SUPPORTS_FTRACE_OPS
39 : # define FTRACE_FORCE_LIST_FUNC 1
40 : #else
41 : # define FTRACE_FORCE_LIST_FUNC 0
42 : #endif
43 :
44 : /* Main tracing buffer and events set up */
45 : #ifdef CONFIG_TRACING
46 : void trace_init(void);
47 : void early_trace_init(void);
48 : #else
49 : static inline void trace_init(void) { }
50 : static inline void early_trace_init(void) { }
51 : #endif
52 :
53 : struct module;
54 : struct ftrace_hash;
55 : struct ftrace_direct_func;
56 :
57 : #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
58 : defined(CONFIG_DYNAMIC_FTRACE)
59 : const char *
60 : ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
61 : unsigned long *off, char **modname, char *sym);
62 : #else
63 : static inline const char *
64 : ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
65 : unsigned long *off, char **modname, char *sym)
66 : {
67 : return NULL;
68 : }
69 : #endif
70 :
71 : #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
72 : int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
73 : char *type, char *name,
74 : char *module_name, int *exported);
75 : #else
76 0 : static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
77 : char *type, char *name,
78 : char *module_name, int *exported)
79 : {
80 0 : return -1;
81 : }
82 : #endif
83 :
84 : #ifdef CONFIG_FUNCTION_TRACER
85 :
86 : extern int ftrace_enabled;
87 : extern int
88 : ftrace_enable_sysctl(struct ctl_table *table, int write,
89 : void *buffer, size_t *lenp, loff_t *ppos);
90 :
91 : struct ftrace_ops;
92 :
93 : #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
94 :
95 : struct ftrace_regs {
96 : struct pt_regs regs;
97 : };
98 : #define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
99 :
100 : /*
101 : * ftrace_instruction_pointer_set() is to be defined by the architecture
102 : * if to allow setting of the instruction pointer from the ftrace_regs
103 : * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
104 : * live kernel patching.
105 : */
106 : #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
107 : #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
108 :
109 : static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
110 : {
111 : if (!fregs)
112 : return NULL;
113 :
114 : return arch_ftrace_get_regs(fregs);
115 : }
116 :
117 : typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
118 : struct ftrace_ops *op, struct ftrace_regs *fregs);
119 :
120 : ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
121 :
122 : /*
123 : * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
124 : * set in the flags member.
125 : * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and
126 : * IPMODIFY are a kind of attribute flags which can be set only before
127 : * registering the ftrace_ops, and can not be modified while registered.
128 : * Changing those attribute flags after registering ftrace_ops will
129 : * cause unexpected results.
130 : *
131 : * ENABLED - set/unset when ftrace_ops is registered/unregistered
132 : * DYNAMIC - set when ftrace_ops is registered to denote dynamically
133 : * allocated ftrace_ops which need special care
134 : * SAVE_REGS - The ftrace_ops wants regs saved at each function called
135 : * and passed to the callback. If this flag is set, but the
136 : * architecture does not support passing regs
137 : * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
138 : * ftrace_ops will fail to register, unless the next flag
139 : * is set.
140 : * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
141 : * handler can handle an arch that does not save regs
142 : * (the handler tests if regs == NULL), then it can set
143 : * this flag instead. It will not fail registering the ftrace_ops
144 : * but, the regs field will be NULL if the arch does not support
145 : * passing regs to the handler.
146 : * Note, if this flag is set, the SAVE_REGS flag will automatically
147 : * get set upon registering the ftrace_ops, if the arch supports it.
148 : * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure
149 : * that the call back needs recursion protection. If it does
150 : * not set this, then the ftrace infrastructure will assume
151 : * that the callback can handle recursion on its own.
152 : * STUB - The ftrace_ops is just a place holder.
153 : * INITIALIZED - The ftrace_ops has already been initialized (first use time
154 : * register_ftrace_function() is called, it will initialized the ops)
155 : * DELETED - The ops are being deleted, do not let them be registered again.
156 : * ADDING - The ops is in the process of being added.
157 : * REMOVING - The ops is in the process of being removed.
158 : * MODIFYING - The ops is in the process of changing its filter functions.
159 : * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
160 : * The arch specific code sets this flag when it allocated a
161 : * trampoline. This lets the arch know that it can update the
162 : * trampoline in case the callback function changes.
163 : * The ftrace_ops trampoline can be set by the ftrace users, and
164 : * in such cases the arch must not modify it. Only the arch ftrace
165 : * core code should set this flag.
166 : * IPMODIFY - The ops can modify the IP register. This can only be set with
167 : * SAVE_REGS. If another ops with this flag set is already registered
168 : * for any of the functions that this ops will be registered for, then
169 : * this ops will fail to register or set_filter_ip.
170 : * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
171 : * RCU - Set when the ops can only be called when RCU is watching.
172 : * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
173 : * PERMANENT - Set when the ops is permanent and should not be affected by
174 : * ftrace_enabled.
175 : * DIRECT - Used by the direct ftrace_ops helper for direct functions
176 : * (internal ftrace only, should not be used by others)
177 : */
178 : enum {
179 : FTRACE_OPS_FL_ENABLED = BIT(0),
180 : FTRACE_OPS_FL_DYNAMIC = BIT(1),
181 : FTRACE_OPS_FL_SAVE_REGS = BIT(2),
182 : FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
183 : FTRACE_OPS_FL_RECURSION = BIT(4),
184 : FTRACE_OPS_FL_STUB = BIT(5),
185 : FTRACE_OPS_FL_INITIALIZED = BIT(6),
186 : FTRACE_OPS_FL_DELETED = BIT(7),
187 : FTRACE_OPS_FL_ADDING = BIT(8),
188 : FTRACE_OPS_FL_REMOVING = BIT(9),
189 : FTRACE_OPS_FL_MODIFYING = BIT(10),
190 : FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
191 : FTRACE_OPS_FL_IPMODIFY = BIT(12),
192 : FTRACE_OPS_FL_PID = BIT(13),
193 : FTRACE_OPS_FL_RCU = BIT(14),
194 : FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
195 : FTRACE_OPS_FL_PERMANENT = BIT(16),
196 : FTRACE_OPS_FL_DIRECT = BIT(17),
197 : };
198 :
199 : #ifdef CONFIG_DYNAMIC_FTRACE
200 : /* The hash used to know what functions callbacks trace */
201 : struct ftrace_ops_hash {
202 : struct ftrace_hash __rcu *notrace_hash;
203 : struct ftrace_hash __rcu *filter_hash;
204 : struct mutex regex_lock;
205 : };
206 :
207 : void ftrace_free_init_mem(void);
208 : void ftrace_free_mem(struct module *mod, void *start, void *end);
209 : #else
210 : static inline void ftrace_free_init_mem(void) { }
211 : static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
212 : #endif
213 :
214 : /*
215 : * Note, ftrace_ops can be referenced outside of RCU protection, unless
216 : * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
217 : * core data, the unregistering of it will perform a scheduling on all CPUs
218 : * to make sure that there are no more users. Depending on the load of the
219 : * system that may take a bit of time.
220 : *
221 : * Any private data added must also take care not to be freed and if private
222 : * data is added to a ftrace_ops that is in core code, the user of the
223 : * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
224 : */
225 : struct ftrace_ops {
226 : ftrace_func_t func;
227 : struct ftrace_ops __rcu *next;
228 : unsigned long flags;
229 : void *private;
230 : ftrace_func_t saved_func;
231 : #ifdef CONFIG_DYNAMIC_FTRACE
232 : struct ftrace_ops_hash local_hash;
233 : struct ftrace_ops_hash *func_hash;
234 : struct ftrace_ops_hash old_hash;
235 : unsigned long trampoline;
236 : unsigned long trampoline_size;
237 : struct list_head list;
238 : #endif
239 : };
240 :
241 : extern struct ftrace_ops __rcu *ftrace_ops_list;
242 : extern struct ftrace_ops ftrace_list_end;
243 :
244 : /*
245 : * Traverse the ftrace_ops_list, invoking all entries. The reason that we
246 : * can use rcu_dereference_raw_check() is that elements removed from this list
247 : * are simply leaked, so there is no need to interact with a grace-period
248 : * mechanism. The rcu_dereference_raw_check() calls are needed to handle
249 : * concurrent insertions into the ftrace_ops_list.
250 : *
251 : * Silly Alpha and silly pointer-speculation compiler optimizations!
252 : */
253 : #define do_for_each_ftrace_op(op, list) \
254 : op = rcu_dereference_raw_check(list); \
255 : do
256 :
257 : /*
258 : * Optimized for just a single item in the list (as that is the normal case).
259 : */
260 : #define while_for_each_ftrace_op(op) \
261 : while (likely(op = rcu_dereference_raw_check((op)->next)) && \
262 : unlikely((op) != &ftrace_list_end))
263 :
264 : /*
265 : * Type of the current tracing.
266 : */
267 : enum ftrace_tracing_type_t {
268 : FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
269 : FTRACE_TYPE_RETURN, /* Hook the return of the function */
270 : };
271 :
272 : /* Current tracing type, default is FTRACE_TYPE_ENTER */
273 : extern enum ftrace_tracing_type_t ftrace_tracing_type;
274 :
275 : /*
276 : * The ftrace_ops must be a static and should also
277 : * be read_mostly. These functions do modify read_mostly variables
278 : * so use them sparely. Never free an ftrace_op or modify the
279 : * next pointer after it has been registered. Even after unregistering
280 : * it, the next pointer may still be used internally.
281 : */
282 : int register_ftrace_function(struct ftrace_ops *ops);
283 : int unregister_ftrace_function(struct ftrace_ops *ops);
284 :
285 : extern void ftrace_stub(unsigned long a0, unsigned long a1,
286 : struct ftrace_ops *op, struct ftrace_regs *fregs);
287 :
288 : #else /* !CONFIG_FUNCTION_TRACER */
289 : /*
290 : * (un)register_ftrace_function must be a macro since the ops parameter
291 : * must not be evaluated.
292 : */
293 : #define register_ftrace_function(ops) ({ 0; })
294 : #define unregister_ftrace_function(ops) ({ 0; })
295 : static inline void ftrace_kill(void) { }
296 1 : static inline void ftrace_free_init_mem(void) { }
297 : static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
298 : #endif /* CONFIG_FUNCTION_TRACER */
299 :
300 : struct ftrace_func_entry {
301 : struct hlist_node hlist;
302 : unsigned long ip;
303 : unsigned long direct; /* for direct lookup only */
304 : };
305 :
306 : struct dyn_ftrace;
307 :
308 : #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
309 : extern int ftrace_direct_func_count;
310 : int register_ftrace_direct(unsigned long ip, unsigned long addr);
311 : int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
312 : int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
313 : struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
314 : int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
315 : struct dyn_ftrace *rec,
316 : unsigned long old_addr,
317 : unsigned long new_addr);
318 : unsigned long ftrace_find_rec_direct(unsigned long ip);
319 : #else
320 : # define ftrace_direct_func_count 0
321 : static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
322 : {
323 : return -ENOTSUPP;
324 : }
325 : static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
326 : {
327 : return -ENOTSUPP;
328 : }
329 : static inline int modify_ftrace_direct(unsigned long ip,
330 : unsigned long old_addr, unsigned long new_addr)
331 : {
332 : return -ENOTSUPP;
333 : }
334 : static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
335 : {
336 : return NULL;
337 : }
338 : static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
339 : struct dyn_ftrace *rec,
340 : unsigned long old_addr,
341 : unsigned long new_addr)
342 : {
343 : return -ENODEV;
344 : }
345 : static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
346 : {
347 : return 0;
348 : }
349 : #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
350 :
351 : #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
352 : /*
353 : * This must be implemented by the architecture.
354 : * It is the way the ftrace direct_ops helper, when called
355 : * via ftrace (because there's other callbacks besides the
356 : * direct call), can inform the architecture's trampoline that this
357 : * routine has a direct caller, and what the caller is.
358 : *
359 : * For example, in x86, it returns the direct caller
360 : * callback function via the regs->orig_ax parameter.
361 : * Then in the ftrace trampoline, if this is set, it makes
362 : * the return from the trampoline jump to the direct caller
363 : * instead of going back to the function it just traced.
364 : */
365 : static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
366 : unsigned long addr) { }
367 : #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
368 :
369 : #ifdef CONFIG_STACK_TRACER
370 :
371 : extern int stack_tracer_enabled;
372 :
373 : int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
374 : size_t *lenp, loff_t *ppos);
375 :
376 : /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
377 : DECLARE_PER_CPU(int, disable_stack_tracer);
378 :
379 : /**
380 : * stack_tracer_disable - temporarily disable the stack tracer
381 : *
382 : * There's a few locations (namely in RCU) where stack tracing
383 : * cannot be executed. This function is used to disable stack
384 : * tracing during those critical sections.
385 : *
386 : * This function must be called with preemption or interrupts
387 : * disabled and stack_tracer_enable() must be called shortly after
388 : * while preemption or interrupts are still disabled.
389 : */
390 : static inline void stack_tracer_disable(void)
391 : {
392 : /* Preemption or interupts must be disabled */
393 : if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
394 : WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
395 : this_cpu_inc(disable_stack_tracer);
396 : }
397 :
398 : /**
399 : * stack_tracer_enable - re-enable the stack tracer
400 : *
401 : * After stack_tracer_disable() is called, stack_tracer_enable()
402 : * must be called shortly afterward.
403 : */
404 : static inline void stack_tracer_enable(void)
405 : {
406 : if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
407 : WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
408 : this_cpu_dec(disable_stack_tracer);
409 : }
410 : #else
411 : static inline void stack_tracer_disable(void) { }
412 : static inline void stack_tracer_enable(void) { }
413 : #endif
414 :
415 : #ifdef CONFIG_DYNAMIC_FTRACE
416 :
417 : int ftrace_arch_code_modify_prepare(void);
418 : int ftrace_arch_code_modify_post_process(void);
419 :
420 : enum ftrace_bug_type {
421 : FTRACE_BUG_UNKNOWN,
422 : FTRACE_BUG_INIT,
423 : FTRACE_BUG_NOP,
424 : FTRACE_BUG_CALL,
425 : FTRACE_BUG_UPDATE,
426 : };
427 : extern enum ftrace_bug_type ftrace_bug_type;
428 :
429 : /*
430 : * Archs can set this to point to a variable that holds the value that was
431 : * expected at the call site before calling ftrace_bug().
432 : */
433 : extern const void *ftrace_expected;
434 :
435 : void ftrace_bug(int err, struct dyn_ftrace *rec);
436 :
437 : struct seq_file;
438 :
439 : extern int ftrace_text_reserved(const void *start, const void *end);
440 :
441 : struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
442 :
443 : bool is_ftrace_trampoline(unsigned long addr);
444 :
445 : /*
446 : * The dyn_ftrace record's flags field is split into two parts.
447 : * the first part which is '0-FTRACE_REF_MAX' is a counter of
448 : * the number of callbacks that have registered the function that
449 : * the dyn_ftrace descriptor represents.
450 : *
451 : * The second part is a mask:
452 : * ENABLED - the function is being traced
453 : * REGS - the record wants the function to save regs
454 : * REGS_EN - the function is set up to save regs.
455 : * IPMODIFY - the record allows for the IP address to be changed.
456 : * DISABLED - the record is not ready to be touched yet
457 : * DIRECT - there is a direct function to call
458 : *
459 : * When a new ftrace_ops is registered and wants a function to save
460 : * pt_regs, the rec->flags REGS is set. When the function has been
461 : * set up to save regs, the REG_EN flag is set. Once a function
462 : * starts saving regs it will do so until all ftrace_ops are removed
463 : * from tracing that function.
464 : */
465 : enum {
466 : FTRACE_FL_ENABLED = (1UL << 31),
467 : FTRACE_FL_REGS = (1UL << 30),
468 : FTRACE_FL_REGS_EN = (1UL << 29),
469 : FTRACE_FL_TRAMP = (1UL << 28),
470 : FTRACE_FL_TRAMP_EN = (1UL << 27),
471 : FTRACE_FL_IPMODIFY = (1UL << 26),
472 : FTRACE_FL_DISABLED = (1UL << 25),
473 : FTRACE_FL_DIRECT = (1UL << 24),
474 : FTRACE_FL_DIRECT_EN = (1UL << 23),
475 : };
476 :
477 : #define FTRACE_REF_MAX_SHIFT 23
478 : #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
479 :
480 : #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
481 :
482 : struct dyn_ftrace {
483 : unsigned long ip; /* address of mcount call-site */
484 : unsigned long flags;
485 : struct dyn_arch_ftrace arch;
486 : };
487 :
488 : int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
489 : int remove, int reset);
490 : int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
491 : int len, int reset);
492 : int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
493 : int len, int reset);
494 : void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
495 : void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
496 : void ftrace_free_filter(struct ftrace_ops *ops);
497 : void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
498 :
499 : enum {
500 : FTRACE_UPDATE_CALLS = (1 << 0),
501 : FTRACE_DISABLE_CALLS = (1 << 1),
502 : FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
503 : FTRACE_START_FUNC_RET = (1 << 3),
504 : FTRACE_STOP_FUNC_RET = (1 << 4),
505 : FTRACE_MAY_SLEEP = (1 << 5),
506 : };
507 :
508 : /*
509 : * The FTRACE_UPDATE_* enum is used to pass information back
510 : * from the ftrace_update_record() and ftrace_test_record()
511 : * functions. These are called by the code update routines
512 : * to find out what is to be done for a given function.
513 : *
514 : * IGNORE - The function is already what we want it to be
515 : * MAKE_CALL - Start tracing the function
516 : * MODIFY_CALL - Stop saving regs for the function
517 : * MAKE_NOP - Stop tracing the function
518 : */
519 : enum {
520 : FTRACE_UPDATE_IGNORE,
521 : FTRACE_UPDATE_MAKE_CALL,
522 : FTRACE_UPDATE_MODIFY_CALL,
523 : FTRACE_UPDATE_MAKE_NOP,
524 : };
525 :
526 : enum {
527 : FTRACE_ITER_FILTER = (1 << 0),
528 : FTRACE_ITER_NOTRACE = (1 << 1),
529 : FTRACE_ITER_PRINTALL = (1 << 2),
530 : FTRACE_ITER_DO_PROBES = (1 << 3),
531 : FTRACE_ITER_PROBE = (1 << 4),
532 : FTRACE_ITER_MOD = (1 << 5),
533 : FTRACE_ITER_ENABLED = (1 << 6),
534 : };
535 :
536 : void arch_ftrace_update_code(int command);
537 : void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
538 : void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
539 : void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
540 :
541 : struct ftrace_rec_iter;
542 :
543 : struct ftrace_rec_iter *ftrace_rec_iter_start(void);
544 : struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
545 : struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
546 :
547 : #define for_ftrace_rec_iter(iter) \
548 : for (iter = ftrace_rec_iter_start(); \
549 : iter; \
550 : iter = ftrace_rec_iter_next(iter))
551 :
552 :
553 : int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
554 : int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
555 : void ftrace_run_stop_machine(int command);
556 : unsigned long ftrace_location(unsigned long ip);
557 : unsigned long ftrace_location_range(unsigned long start, unsigned long end);
558 : unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
559 : unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
560 :
561 : extern ftrace_func_t ftrace_trace_function;
562 :
563 : int ftrace_regex_open(struct ftrace_ops *ops, int flag,
564 : struct inode *inode, struct file *file);
565 : ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
566 : size_t cnt, loff_t *ppos);
567 : ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
568 : size_t cnt, loff_t *ppos);
569 : int ftrace_regex_release(struct inode *inode, struct file *file);
570 :
571 : void __init
572 : ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
573 :
574 : /* defined in arch */
575 : extern int ftrace_ip_converted(unsigned long ip);
576 : extern int ftrace_dyn_arch_init(void);
577 : extern void ftrace_replace_code(int enable);
578 : extern int ftrace_update_ftrace_func(ftrace_func_t func);
579 : extern void ftrace_caller(void);
580 : extern void ftrace_regs_caller(void);
581 : extern void ftrace_call(void);
582 : extern void ftrace_regs_call(void);
583 : extern void mcount_call(void);
584 :
585 : void ftrace_modify_all_code(int command);
586 :
587 : #ifndef FTRACE_ADDR
588 : #define FTRACE_ADDR ((unsigned long)ftrace_caller)
589 : #endif
590 :
591 : #ifndef FTRACE_GRAPH_ADDR
592 : #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
593 : #endif
594 :
595 : #ifndef FTRACE_REGS_ADDR
596 : #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
597 : # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
598 : #else
599 : # define FTRACE_REGS_ADDR FTRACE_ADDR
600 : #endif
601 : #endif
602 :
603 : /*
604 : * If an arch would like functions that are only traced
605 : * by the function graph tracer to jump directly to its own
606 : * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
607 : * to be that address to jump to.
608 : */
609 : #ifndef FTRACE_GRAPH_TRAMP_ADDR
610 : #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
611 : #endif
612 :
613 : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
614 : extern void ftrace_graph_caller(void);
615 : extern int ftrace_enable_ftrace_graph_caller(void);
616 : extern int ftrace_disable_ftrace_graph_caller(void);
617 : #else
618 : static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
619 : static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
620 : #endif
621 :
622 : /**
623 : * ftrace_make_nop - convert code into nop
624 : * @mod: module structure if called by module load initialization
625 : * @rec: the call site record (e.g. mcount/fentry)
626 : * @addr: the address that the call site should be calling
627 : *
628 : * This is a very sensitive operation and great care needs
629 : * to be taken by the arch. The operation should carefully
630 : * read the location, check to see if what is read is indeed
631 : * what we expect it to be, and then on success of the compare,
632 : * it should write to the location.
633 : *
634 : * The code segment at @rec->ip should be a caller to @addr
635 : *
636 : * Return must be:
637 : * 0 on success
638 : * -EFAULT on error reading the location
639 : * -EINVAL on a failed compare of the contents
640 : * -EPERM on error writing to the location
641 : * Any other value will be considered a failure.
642 : */
643 : extern int ftrace_make_nop(struct module *mod,
644 : struct dyn_ftrace *rec, unsigned long addr);
645 :
646 :
647 : /**
648 : * ftrace_init_nop - initialize a nop call site
649 : * @mod: module structure if called by module load initialization
650 : * @rec: the call site record (e.g. mcount/fentry)
651 : *
652 : * This is a very sensitive operation and great care needs
653 : * to be taken by the arch. The operation should carefully
654 : * read the location, check to see if what is read is indeed
655 : * what we expect it to be, and then on success of the compare,
656 : * it should write to the location.
657 : *
658 : * The code segment at @rec->ip should contain the contents created by
659 : * the compiler
660 : *
661 : * Return must be:
662 : * 0 on success
663 : * -EFAULT on error reading the location
664 : * -EINVAL on a failed compare of the contents
665 : * -EPERM on error writing to the location
666 : * Any other value will be considered a failure.
667 : */
668 : #ifndef ftrace_init_nop
669 : static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
670 : {
671 : return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
672 : }
673 : #endif
674 :
675 : /**
676 : * ftrace_make_call - convert a nop call site into a call to addr
677 : * @rec: the call site record (e.g. mcount/fentry)
678 : * @addr: the address that the call site should call
679 : *
680 : * This is a very sensitive operation and great care needs
681 : * to be taken by the arch. The operation should carefully
682 : * read the location, check to see if what is read is indeed
683 : * what we expect it to be, and then on success of the compare,
684 : * it should write to the location.
685 : *
686 : * The code segment at @rec->ip should be a nop
687 : *
688 : * Return must be:
689 : * 0 on success
690 : * -EFAULT on error reading the location
691 : * -EINVAL on a failed compare of the contents
692 : * -EPERM on error writing to the location
693 : * Any other value will be considered a failure.
694 : */
695 : extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
696 :
697 : #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
698 : /**
699 : * ftrace_modify_call - convert from one addr to another (no nop)
700 : * @rec: the call site record (e.g. mcount/fentry)
701 : * @old_addr: the address expected to be currently called to
702 : * @addr: the address to change to
703 : *
704 : * This is a very sensitive operation and great care needs
705 : * to be taken by the arch. The operation should carefully
706 : * read the location, check to see if what is read is indeed
707 : * what we expect it to be, and then on success of the compare,
708 : * it should write to the location.
709 : *
710 : * The code segment at @rec->ip should be a caller to @old_addr
711 : *
712 : * Return must be:
713 : * 0 on success
714 : * -EFAULT on error reading the location
715 : * -EINVAL on a failed compare of the contents
716 : * -EPERM on error writing to the location
717 : * Any other value will be considered a failure.
718 : */
719 : extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
720 : unsigned long addr);
721 : #else
722 : /* Should never be called */
723 : static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
724 : unsigned long addr)
725 : {
726 : return -EINVAL;
727 : }
728 : #endif
729 :
730 : /* May be defined in arch */
731 : extern int ftrace_arch_read_dyn_info(char *buf, int size);
732 :
733 : extern int skip_trace(unsigned long ip);
734 : extern void ftrace_module_init(struct module *mod);
735 : extern void ftrace_module_enable(struct module *mod);
736 : extern void ftrace_release_mod(struct module *mod);
737 :
738 : extern void ftrace_disable_daemon(void);
739 : extern void ftrace_enable_daemon(void);
740 : #else /* CONFIG_DYNAMIC_FTRACE */
741 : static inline int skip_trace(unsigned long ip) { return 0; }
742 : static inline void ftrace_disable_daemon(void) { }
743 : static inline void ftrace_enable_daemon(void) { }
744 : static inline void ftrace_module_init(struct module *mod) { }
745 : static inline void ftrace_module_enable(struct module *mod) { }
746 : static inline void ftrace_release_mod(struct module *mod) { }
747 : static inline int ftrace_text_reserved(const void *start, const void *end)
748 : {
749 : return 0;
750 : }
751 : static inline unsigned long ftrace_location(unsigned long ip)
752 : {
753 : return 0;
754 : }
755 :
756 : /*
757 : * Again users of functions that have ftrace_ops may not
758 : * have them defined when ftrace is not enabled, but these
759 : * functions may still be called. Use a macro instead of inline.
760 : */
761 : #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
762 : #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
763 : #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
764 : #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
765 : #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
766 : #define ftrace_free_filter(ops) do { } while (0)
767 : #define ftrace_ops_set_global_filter(ops) do { } while (0)
768 :
769 : static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
770 : size_t cnt, loff_t *ppos) { return -ENODEV; }
771 : static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
772 : size_t cnt, loff_t *ppos) { return -ENODEV; }
773 : static inline int
774 : ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
775 :
776 2634183 : static inline bool is_ftrace_trampoline(unsigned long addr)
777 : {
778 2634183 : return false;
779 : }
780 : #endif /* CONFIG_DYNAMIC_FTRACE */
781 :
782 : /* totally disable ftrace - can not re-enable after this */
783 : void ftrace_kill(void);
784 :
785 : static inline void tracer_disable(void)
786 : {
787 : #ifdef CONFIG_FUNCTION_TRACER
788 : ftrace_enabled = 0;
789 : #endif
790 : }
791 :
792 : /*
793 : * Ftrace disable/restore without lock. Some synchronization mechanism
794 : * must be used to prevent ftrace_enabled to be changed between
795 : * disable/restore.
796 : */
797 : static inline int __ftrace_enabled_save(void)
798 : {
799 : #ifdef CONFIG_FUNCTION_TRACER
800 : int saved_ftrace_enabled = ftrace_enabled;
801 : ftrace_enabled = 0;
802 : return saved_ftrace_enabled;
803 : #else
804 : return 0;
805 : #endif
806 : }
807 :
808 : static inline void __ftrace_enabled_restore(int enabled)
809 : {
810 : #ifdef CONFIG_FUNCTION_TRACER
811 : ftrace_enabled = enabled;
812 : #endif
813 : }
814 :
815 : /* All archs should have this, but we define it for consistency */
816 : #ifndef ftrace_return_address0
817 : # define ftrace_return_address0 __builtin_return_address(0)
818 : #endif
819 :
820 : /* Archs may use other ways for ADDR1 and beyond */
821 : #ifndef ftrace_return_address
822 : # ifdef CONFIG_FRAME_POINTER
823 : # define ftrace_return_address(n) __builtin_return_address(n)
824 : # else
825 : # define ftrace_return_address(n) 0UL
826 : # endif
827 : #endif
828 :
829 : #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
830 : #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
831 : #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
832 : #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
833 : #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
834 : #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
835 : #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
836 :
837 : static inline unsigned long get_lock_parent_ip(void)
838 : {
839 : unsigned long addr = CALLER_ADDR0;
840 :
841 : if (!in_lock_functions(addr))
842 : return addr;
843 : addr = CALLER_ADDR1;
844 : if (!in_lock_functions(addr))
845 : return addr;
846 : return CALLER_ADDR2;
847 : }
848 :
849 : #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
850 : extern void trace_preempt_on(unsigned long a0, unsigned long a1);
851 : extern void trace_preempt_off(unsigned long a0, unsigned long a1);
852 : #else
853 : /*
854 : * Use defines instead of static inlines because some arches will make code out
855 : * of the CALLER_ADDR, when we really want these to be a real nop.
856 : */
857 : # define trace_preempt_on(a0, a1) do { } while (0)
858 : # define trace_preempt_off(a0, a1) do { } while (0)
859 : #endif
860 :
861 : #ifdef CONFIG_FTRACE_MCOUNT_RECORD
862 : extern void ftrace_init(void);
863 : #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
864 : #define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
865 : #else
866 : #define FTRACE_CALLSITE_SECTION "__mcount_loc"
867 : #endif
868 : #else
869 : static inline void ftrace_init(void) { }
870 : #endif
871 :
872 : /*
873 : * Structure that defines an entry function trace.
874 : * It's already packed but the attribute "packed" is needed
875 : * to remove extra padding at the end.
876 : */
877 : struct ftrace_graph_ent {
878 : unsigned long func; /* Current function */
879 : int depth;
880 : } __packed;
881 :
882 : /*
883 : * Structure that defines a return function trace.
884 : * It's already packed but the attribute "packed" is needed
885 : * to remove extra padding at the end.
886 : */
887 : struct ftrace_graph_ret {
888 : unsigned long func; /* Current function */
889 : int depth;
890 : /* Number of functions that overran the depth limit for current task */
891 : unsigned int overrun;
892 : unsigned long long calltime;
893 : unsigned long long rettime;
894 : } __packed;
895 :
896 : /* Type of the callback handlers for tracing function graph*/
897 : typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
898 : typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
899 :
900 : extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
901 :
902 : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
903 :
904 : struct fgraph_ops {
905 : trace_func_graph_ent_t entryfunc;
906 : trace_func_graph_ret_t retfunc;
907 : };
908 :
909 : /*
910 : * Stack of return addresses for functions
911 : * of a thread.
912 : * Used in struct thread_info
913 : */
914 : struct ftrace_ret_stack {
915 : unsigned long ret;
916 : unsigned long func;
917 : unsigned long long calltime;
918 : #ifdef CONFIG_FUNCTION_PROFILER
919 : unsigned long long subtime;
920 : #endif
921 : #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
922 : unsigned long fp;
923 : #endif
924 : #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
925 : unsigned long *retp;
926 : #endif
927 : };
928 :
929 : /*
930 : * Primary handler of a function return.
931 : * It relays on ftrace_return_to_handler.
932 : * Defined in entry_32/64.S
933 : */
934 : extern void return_to_handler(void);
935 :
936 : extern int
937 : function_graph_enter(unsigned long ret, unsigned long func,
938 : unsigned long frame_pointer, unsigned long *retp);
939 :
940 : struct ftrace_ret_stack *
941 : ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
942 :
943 : unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
944 : unsigned long ret, unsigned long *retp);
945 :
946 : /*
947 : * Sometimes we don't want to trace a function with the function
948 : * graph tracer but we want them to keep traced by the usual function
949 : * tracer if the function graph tracer is not configured.
950 : */
951 : #define __notrace_funcgraph notrace
952 :
953 : #define FTRACE_RETFUNC_DEPTH 50
954 : #define FTRACE_RETSTACK_ALLOC_SIZE 32
955 :
956 : extern int register_ftrace_graph(struct fgraph_ops *ops);
957 : extern void unregister_ftrace_graph(struct fgraph_ops *ops);
958 :
959 : extern bool ftrace_graph_is_dead(void);
960 : extern void ftrace_graph_stop(void);
961 :
962 : /* The current handlers in use */
963 : extern trace_func_graph_ret_t ftrace_graph_return;
964 : extern trace_func_graph_ent_t ftrace_graph_entry;
965 :
966 : extern void ftrace_graph_init_task(struct task_struct *t);
967 : extern void ftrace_graph_exit_task(struct task_struct *t);
968 : extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
969 :
970 : static inline void pause_graph_tracing(void)
971 : {
972 : atomic_inc(¤t->tracing_graph_pause);
973 : }
974 :
975 : static inline void unpause_graph_tracing(void)
976 : {
977 : atomic_dec(¤t->tracing_graph_pause);
978 : }
979 : #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
980 :
981 : #define __notrace_funcgraph
982 :
983 916 : static inline void ftrace_graph_init_task(struct task_struct *t) { }
984 834 : static inline void ftrace_graph_exit_task(struct task_struct *t) { }
985 10 : static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
986 :
987 : /* Define as macros as fgraph_ops may not be defined */
988 : #define register_ftrace_graph(ops) ({ -1; })
989 : #define unregister_ftrace_graph(ops) do { } while (0)
990 :
991 : static inline unsigned long
992 131333212 : ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
993 : unsigned long *retp)
994 : {
995 131333212 : return ret;
996 : }
997 :
998 0 : static inline void pause_graph_tracing(void) { }
999 0 : static inline void unpause_graph_tracing(void) { }
1000 : #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1001 :
1002 : #ifdef CONFIG_TRACING
1003 :
1004 : /* flags for current->trace */
1005 : enum {
1006 : TSK_TRACE_FL_TRACE_BIT = 0,
1007 : TSK_TRACE_FL_GRAPH_BIT = 1,
1008 : };
1009 : enum {
1010 : TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
1011 : TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
1012 : };
1013 :
1014 : static inline void set_tsk_trace_trace(struct task_struct *tsk)
1015 : {
1016 : set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1017 : }
1018 :
1019 : static inline void clear_tsk_trace_trace(struct task_struct *tsk)
1020 : {
1021 : clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1022 : }
1023 :
1024 : static inline int test_tsk_trace_trace(struct task_struct *tsk)
1025 : {
1026 : return tsk->trace & TSK_TRACE_FL_TRACE;
1027 : }
1028 :
1029 : static inline void set_tsk_trace_graph(struct task_struct *tsk)
1030 : {
1031 : set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1032 : }
1033 :
1034 : static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1035 : {
1036 : clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1037 : }
1038 :
1039 : static inline int test_tsk_trace_graph(struct task_struct *tsk)
1040 : {
1041 : return tsk->trace & TSK_TRACE_FL_GRAPH;
1042 : }
1043 :
1044 : enum ftrace_dump_mode;
1045 :
1046 : extern enum ftrace_dump_mode ftrace_dump_on_oops;
1047 : extern int tracepoint_printk;
1048 :
1049 : extern void disable_trace_on_warning(void);
1050 : extern int __disable_trace_on_warning;
1051 :
1052 : int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1053 : void *buffer, size_t *lenp, loff_t *ppos);
1054 :
1055 : #else /* CONFIG_TRACING */
1056 : static inline void disable_trace_on_warning(void) { }
1057 : #endif /* CONFIG_TRACING */
1058 :
1059 : #ifdef CONFIG_FTRACE_SYSCALLS
1060 :
1061 : unsigned long arch_syscall_addr(int nr);
1062 :
1063 : #endif /* CONFIG_FTRACE_SYSCALLS */
1064 :
1065 : #endif /* _LINUX_FTRACE_H */
|