LCOV - code coverage report
Current view: top level - kernel/trace - trace.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 332 3464 9.6 %
Date: 2021-04-22 12:43:58 Functions: 33 294 11.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * ring buffer based function tracer
       4             :  *
       5             :  * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
       6             :  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
       7             :  *
       8             :  * Originally taken from the RT patch by:
       9             :  *    Arnaldo Carvalho de Melo <acme@redhat.com>
      10             :  *
      11             :  * Based on code from the latency_tracer, that is:
      12             :  *  Copyright (C) 2004-2006 Ingo Molnar
      13             :  *  Copyright (C) 2004 Nadia Yvette Chambers
      14             :  */
      15             : #include <linux/ring_buffer.h>
      16             : #include <generated/utsrelease.h>
      17             : #include <linux/stacktrace.h>
      18             : #include <linux/writeback.h>
      19             : #include <linux/kallsyms.h>
      20             : #include <linux/security.h>
      21             : #include <linux/seq_file.h>
      22             : #include <linux/notifier.h>
      23             : #include <linux/irqflags.h>
      24             : #include <linux/debugfs.h>
      25             : #include <linux/tracefs.h>
      26             : #include <linux/pagemap.h>
      27             : #include <linux/hardirq.h>
      28             : #include <linux/linkage.h>
      29             : #include <linux/uaccess.h>
      30             : #include <linux/vmalloc.h>
      31             : #include <linux/ftrace.h>
      32             : #include <linux/module.h>
      33             : #include <linux/percpu.h>
      34             : #include <linux/splice.h>
      35             : #include <linux/kdebug.h>
      36             : #include <linux/string.h>
      37             : #include <linux/mount.h>
      38             : #include <linux/rwsem.h>
      39             : #include <linux/slab.h>
      40             : #include <linux/ctype.h>
      41             : #include <linux/init.h>
      42             : #include <linux/poll.h>
      43             : #include <linux/nmi.h>
      44             : #include <linux/fs.h>
      45             : #include <linux/trace.h>
      46             : #include <linux/sched/clock.h>
      47             : #include <linux/sched/rt.h>
      48             : #include <linux/fsnotify.h>
      49             : #include <linux/irq_work.h>
      50             : #include <linux/workqueue.h>
      51             : 
      52             : #include "trace.h"
      53             : #include "trace_output.h"
      54             : 
      55             : /*
      56             :  * On boot up, the ring buffer is set to the minimum size, so that
      57             :  * we do not waste memory on systems that are not using tracing.
      58             :  */
      59             : bool ring_buffer_expanded;
      60             : 
      61             : /*
      62             :  * We need to change this state when a selftest is running.
      63             :  * A selftest will lurk into the ring-buffer to count the
      64             :  * entries inserted during the selftest although some concurrent
      65             :  * insertions into the ring-buffer such as trace_printk could occurred
      66             :  * at the same time, giving false positive or negative results.
      67             :  */
      68             : static bool __read_mostly tracing_selftest_running;
      69             : 
      70             : /*
      71             :  * If boot-time tracing including tracers/events via kernel cmdline
      72             :  * is running, we do not want to run SELFTEST.
      73             :  */
      74             : bool __read_mostly tracing_selftest_disabled;
      75             : 
      76             : #ifdef CONFIG_FTRACE_STARTUP_TEST
      77             : void __init disable_tracing_selftest(const char *reason)
      78             : {
      79             :         if (!tracing_selftest_disabled) {
      80             :                 tracing_selftest_disabled = true;
      81             :                 pr_info("Ftrace startup test is disabled due to %s\n", reason);
      82             :         }
      83             : }
      84             : #endif
      85             : 
      86             : /* Pipe tracepoints to printk */
      87             : struct trace_iterator *tracepoint_print_iter;
      88             : int tracepoint_printk;
      89             : static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
      90             : 
      91             : /* For tracers that don't implement custom flags */
      92             : static struct tracer_opt dummy_tracer_opt[] = {
      93             :         { }
      94             : };
      95             : 
      96             : static int
      97           0 : dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
      98             : {
      99           0 :         return 0;
     100             : }
     101             : 
     102             : /*
     103             :  * To prevent the comm cache from being overwritten when no
     104             :  * tracing is active, only save the comm when a trace event
     105             :  * occurred.
     106             :  */
     107             : static DEFINE_PER_CPU(bool, trace_taskinfo_save);
     108             : 
     109             : /*
     110             :  * Kill all tracing for good (never come back).
     111             :  * It is initialized to 1 but will turn to zero if the initialization
     112             :  * of the tracer is successful. But that is the only place that sets
     113             :  * this back to zero.
     114             :  */
     115             : static int tracing_disabled = 1;
     116             : 
     117             : cpumask_var_t __read_mostly     tracing_buffer_mask;
     118             : 
     119             : /*
     120             :  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
     121             :  *
     122             :  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
     123             :  * is set, then ftrace_dump is called. This will output the contents
     124             :  * of the ftrace buffers to the console.  This is very useful for
     125             :  * capturing traces that lead to crashes and outputing it to a
     126             :  * serial console.
     127             :  *
     128             :  * It is default off, but you can enable it with either specifying
     129             :  * "ftrace_dump_on_oops" in the kernel command line, or setting
     130             :  * /proc/sys/kernel/ftrace_dump_on_oops
     131             :  * Set 1 if you want to dump buffers of all CPUs
     132             :  * Set 2 if you want to dump the buffer of the CPU that triggered oops
     133             :  */
     134             : 
     135             : enum ftrace_dump_mode ftrace_dump_on_oops;
     136             : 
     137             : /* When set, tracing will stop when a WARN*() is hit */
     138             : int __disable_trace_on_warning;
     139             : 
     140             : #ifdef CONFIG_TRACE_EVAL_MAP_FILE
     141             : /* Map of enums to their values, for "eval_map" file */
     142             : struct trace_eval_map_head {
     143             :         struct module                   *mod;
     144             :         unsigned long                   length;
     145             : };
     146             : 
     147             : union trace_eval_map_item;
     148             : 
     149             : struct trace_eval_map_tail {
     150             :         /*
     151             :          * "end" is first and points to NULL as it must be different
     152             :          * than "mod" or "eval_string"
     153             :          */
     154             :         union trace_eval_map_item       *next;
     155             :         const char                      *end;   /* points to NULL */
     156             : };
     157             : 
     158             : static DEFINE_MUTEX(trace_eval_mutex);
     159             : 
     160             : /*
     161             :  * The trace_eval_maps are saved in an array with two extra elements,
     162             :  * one at the beginning, and one at the end. The beginning item contains
     163             :  * the count of the saved maps (head.length), and the module they
     164             :  * belong to if not built in (head.mod). The ending item contains a
     165             :  * pointer to the next array of saved eval_map items.
     166             :  */
     167             : union trace_eval_map_item {
     168             :         struct trace_eval_map           map;
     169             :         struct trace_eval_map_head      head;
     170             :         struct trace_eval_map_tail      tail;
     171             : };
     172             : 
     173             : static union trace_eval_map_item *trace_eval_maps;
     174             : #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
     175             : 
     176             : int tracing_set_tracer(struct trace_array *tr, const char *buf);
     177             : static void ftrace_trace_userstack(struct trace_array *tr,
     178             :                                    struct trace_buffer *buffer,
     179             :                                    unsigned int trace_ctx);
     180             : 
     181             : #define MAX_TRACER_SIZE         100
     182             : static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
     183             : static char *default_bootup_tracer;
     184             : 
     185             : static bool allocate_snapshot;
     186             : 
     187           0 : static int __init set_cmdline_ftrace(char *str)
     188             : {
     189           0 :         strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
     190           0 :         default_bootup_tracer = bootup_tracer_buf;
     191             :         /* We are using ftrace early, expand it */
     192           0 :         ring_buffer_expanded = true;
     193           0 :         return 1;
     194             : }
     195             : __setup("ftrace=", set_cmdline_ftrace);
     196             : 
     197           0 : static int __init set_ftrace_dump_on_oops(char *str)
     198             : {
     199           0 :         if (*str++ != '=' || !*str) {
     200           0 :                 ftrace_dump_on_oops = DUMP_ALL;
     201           0 :                 return 1;
     202             :         }
     203             : 
     204           0 :         if (!strcmp("orig_cpu", str)) {
     205           0 :                 ftrace_dump_on_oops = DUMP_ORIG;
     206           0 :                 return 1;
     207             :         }
     208             : 
     209             :         return 0;
     210             : }
     211             : __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
     212             : 
     213           0 : static int __init stop_trace_on_warning(char *str)
     214             : {
     215           0 :         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
     216           0 :                 __disable_trace_on_warning = 1;
     217           0 :         return 1;
     218             : }
     219             : __setup("traceoff_on_warning", stop_trace_on_warning);
     220             : 
     221           0 : static int __init boot_alloc_snapshot(char *str)
     222             : {
     223           0 :         allocate_snapshot = true;
     224             :         /* We also need the main ring buffer expanded */
     225           0 :         ring_buffer_expanded = true;
     226           0 :         return 1;
     227             : }
     228             : __setup("alloc_snapshot", boot_alloc_snapshot);
     229             : 
     230             : 
     231             : static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
     232             : 
     233           0 : static int __init set_trace_boot_options(char *str)
     234             : {
     235           0 :         strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
     236           0 :         return 0;
     237             : }
     238             : __setup("trace_options=", set_trace_boot_options);
     239             : 
     240             : static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
     241             : static char *trace_boot_clock __initdata;
     242             : 
     243           0 : static int __init set_trace_boot_clock(char *str)
     244             : {
     245           0 :         strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
     246           0 :         trace_boot_clock = trace_boot_clock_buf;
     247           0 :         return 0;
     248             : }
     249             : __setup("trace_clock=", set_trace_boot_clock);
     250             : 
     251           0 : static int __init set_tracepoint_printk(char *str)
     252             : {
     253           0 :         if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
     254           0 :                 tracepoint_printk = 1;
     255           0 :         return 1;
     256             : }
     257             : __setup("tp_printk", set_tracepoint_printk);
     258             : 
     259           0 : unsigned long long ns2usecs(u64 nsec)
     260             : {
     261           0 :         nsec += 500;
     262           0 :         do_div(nsec, 1000);
     263           0 :         return nsec;
     264             : }
     265             : 
     266             : static void
     267           0 : trace_process_export(struct trace_export *export,
     268             :                struct ring_buffer_event *event, int flag)
     269             : {
     270           0 :         struct trace_entry *entry;
     271           0 :         unsigned int size = 0;
     272             : 
     273           0 :         if (export->flags & flag) {
     274           0 :                 entry = ring_buffer_event_data(event);
     275           0 :                 size = ring_buffer_event_length(event);
     276           0 :                 export->write(export, entry, size);
     277             :         }
     278           0 : }
     279             : 
     280             : static DEFINE_MUTEX(ftrace_export_lock);
     281             : 
     282             : static struct trace_export __rcu *ftrace_exports_list __read_mostly;
     283             : 
     284             : static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
     285             : static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
     286             : static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
     287             : 
     288           0 : static inline void ftrace_exports_enable(struct trace_export *export)
     289             : {
     290           0 :         if (export->flags & TRACE_EXPORT_FUNCTION)
     291           0 :                 static_branch_inc(&trace_function_exports_enabled);
     292             : 
     293           0 :         if (export->flags & TRACE_EXPORT_EVENT)
     294           0 :                 static_branch_inc(&trace_event_exports_enabled);
     295             : 
     296           0 :         if (export->flags & TRACE_EXPORT_MARKER)
     297           0 :                 static_branch_inc(&trace_marker_exports_enabled);
     298           0 : }
     299             : 
     300           0 : static inline void ftrace_exports_disable(struct trace_export *export)
     301             : {
     302           0 :         if (export->flags & TRACE_EXPORT_FUNCTION)
     303           0 :                 static_branch_dec(&trace_function_exports_enabled);
     304             : 
     305           0 :         if (export->flags & TRACE_EXPORT_EVENT)
     306           0 :                 static_branch_dec(&trace_event_exports_enabled);
     307             : 
     308           0 :         if (export->flags & TRACE_EXPORT_MARKER)
     309           0 :                 static_branch_dec(&trace_marker_exports_enabled);
     310           0 : }
     311             : 
     312           0 : static void ftrace_exports(struct ring_buffer_event *event, int flag)
     313             : {
     314           0 :         struct trace_export *export;
     315             : 
     316           0 :         preempt_disable_notrace();
     317             : 
     318           0 :         export = rcu_dereference_raw_check(ftrace_exports_list);
     319           0 :         while (export) {
     320           0 :                 trace_process_export(export, event, flag);
     321           0 :                 export = rcu_dereference_raw_check(export->next);
     322             :         }
     323             : 
     324           0 :         preempt_enable_notrace();
     325           0 : }
     326             : 
     327             : static inline void
     328           0 : add_trace_export(struct trace_export **list, struct trace_export *export)
     329             : {
     330           0 :         rcu_assign_pointer(export->next, *list);
     331             :         /*
     332             :          * We are entering export into the list but another
     333             :          * CPU might be walking that list. We need to make sure
     334             :          * the export->next pointer is valid before another CPU sees
     335             :          * the export pointer included into the list.
     336             :          */
     337           0 :         rcu_assign_pointer(*list, export);
     338             : }
     339             : 
     340             : static inline int
     341           0 : rm_trace_export(struct trace_export **list, struct trace_export *export)
     342             : {
     343           0 :         struct trace_export **p;
     344             : 
     345           0 :         for (p = list; *p != NULL; p = &(*p)->next)
     346           0 :                 if (*p == export)
     347             :                         break;
     348             : 
     349           0 :         if (*p != export)
     350             :                 return -1;
     351             : 
     352           0 :         rcu_assign_pointer(*p, (*p)->next);
     353             : 
     354           0 :         return 0;
     355             : }
     356             : 
     357             : static inline void
     358           0 : add_ftrace_export(struct trace_export **list, struct trace_export *export)
     359             : {
     360           0 :         ftrace_exports_enable(export);
     361             : 
     362           0 :         add_trace_export(list, export);
     363             : }
     364             : 
     365             : static inline int
     366           0 : rm_ftrace_export(struct trace_export **list, struct trace_export *export)
     367             : {
     368           0 :         int ret;
     369             : 
     370           0 :         ret = rm_trace_export(list, export);
     371           0 :         ftrace_exports_disable(export);
     372             : 
     373           0 :         return ret;
     374             : }
     375             : 
     376           0 : int register_ftrace_export(struct trace_export *export)
     377             : {
     378           0 :         if (WARN_ON_ONCE(!export->write))
     379             :                 return -1;
     380             : 
     381           0 :         mutex_lock(&ftrace_export_lock);
     382             : 
     383           0 :         add_ftrace_export(&ftrace_exports_list, export);
     384             : 
     385           0 :         mutex_unlock(&ftrace_export_lock);
     386             : 
     387           0 :         return 0;
     388             : }
     389             : EXPORT_SYMBOL_GPL(register_ftrace_export);
     390             : 
     391           0 : int unregister_ftrace_export(struct trace_export *export)
     392             : {
     393           0 :         int ret;
     394             : 
     395           0 :         mutex_lock(&ftrace_export_lock);
     396             : 
     397           0 :         ret = rm_ftrace_export(&ftrace_exports_list, export);
     398             : 
     399           0 :         mutex_unlock(&ftrace_export_lock);
     400             : 
     401           0 :         return ret;
     402             : }
     403             : EXPORT_SYMBOL_GPL(unregister_ftrace_export);
     404             : 
     405             : /* trace_flags holds trace_options default values */
     406             : #define TRACE_DEFAULT_FLAGS                                             \
     407             :         (FUNCTION_DEFAULT_FLAGS |                                       \
     408             :          TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |                  \
     409             :          TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |                \
     410             :          TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |                 \
     411             :          TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |                     \
     412             :          TRACE_ITER_HASH_PTR)
     413             : 
     414             : /* trace_options that are only supported by global_trace */
     415             : #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |                      \
     416             :                TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
     417             : 
     418             : /* trace_flags that are default zero for instances */
     419             : #define ZEROED_TRACE_FLAGS \
     420             :         (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
     421             : 
     422             : /*
     423             :  * The global_trace is the descriptor that holds the top-level tracing
     424             :  * buffers for the live tracing.
     425             :  */
     426             : static struct trace_array global_trace = {
     427             :         .trace_flags = TRACE_DEFAULT_FLAGS,
     428             : };
     429             : 
     430             : LIST_HEAD(ftrace_trace_arrays);
     431             : 
     432           0 : int trace_array_get(struct trace_array *this_tr)
     433             : {
     434           0 :         struct trace_array *tr;
     435           0 :         int ret = -ENODEV;
     436             : 
     437           0 :         mutex_lock(&trace_types_lock);
     438           0 :         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
     439           0 :                 if (tr == this_tr) {
     440           0 :                         tr->ref++;
     441           0 :                         ret = 0;
     442           0 :                         break;
     443             :                 }
     444             :         }
     445           0 :         mutex_unlock(&trace_types_lock);
     446             : 
     447           0 :         return ret;
     448             : }
     449             : 
     450           0 : static void __trace_array_put(struct trace_array *this_tr)
     451             : {
     452           0 :         WARN_ON(!this_tr->ref);
     453           0 :         this_tr->ref--;
     454           0 : }
     455             : 
     456             : /**
     457             :  * trace_array_put - Decrement the reference counter for this trace array.
     458             :  * @this_tr : pointer to the trace array
     459             :  *
     460             :  * NOTE: Use this when we no longer need the trace array returned by
     461             :  * trace_array_get_by_name(). This ensures the trace array can be later
     462             :  * destroyed.
     463             :  *
     464             :  */
     465           0 : void trace_array_put(struct trace_array *this_tr)
     466             : {
     467           0 :         if (!this_tr)
     468             :                 return;
     469             : 
     470           0 :         mutex_lock(&trace_types_lock);
     471           0 :         __trace_array_put(this_tr);
     472           0 :         mutex_unlock(&trace_types_lock);
     473             : }
     474             : EXPORT_SYMBOL_GPL(trace_array_put);
     475             : 
     476           0 : int tracing_check_open_get_tr(struct trace_array *tr)
     477             : {
     478           0 :         int ret;
     479             : 
     480           0 :         ret = security_locked_down(LOCKDOWN_TRACEFS);
     481           0 :         if (ret)
     482             :                 return ret;
     483             : 
     484           0 :         if (tracing_disabled)
     485             :                 return -ENODEV;
     486             : 
     487           0 :         if (tr && trace_array_get(tr) < 0)
     488           0 :                 return -ENODEV;
     489             : 
     490             :         return 0;
     491             : }
     492             : 
     493           0 : int call_filter_check_discard(struct trace_event_call *call, void *rec,
     494             :                               struct trace_buffer *buffer,
     495             :                               struct ring_buffer_event *event)
     496             : {
     497           0 :         if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
     498           0 :             !filter_match_preds(call->filter, rec)) {
     499           0 :                 __trace_event_discard_commit(buffer, event);
     500           0 :                 return 1;
     501             :         }
     502             : 
     503             :         return 0;
     504             : }
     505             : 
     506           0 : void trace_free_pid_list(struct trace_pid_list *pid_list)
     507             : {
     508           0 :         vfree(pid_list->pids);
     509           0 :         kfree(pid_list);
     510           0 : }
     511             : 
     512             : /**
     513             :  * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
     514             :  * @filtered_pids: The list of pids to check
     515             :  * @search_pid: The PID to find in @filtered_pids
     516             :  *
     517             :  * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
     518             :  */
     519             : bool
     520           0 : trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
     521             : {
     522             :         /*
     523             :          * If pid_max changed after filtered_pids was created, we
     524             :          * by default ignore all pids greater than the previous pid_max.
     525             :          */
     526           0 :         if (search_pid >= filtered_pids->pid_max)
     527             :                 return false;
     528             : 
     529           0 :         return test_bit(search_pid, filtered_pids->pids);
     530             : }
     531             : 
     532             : /**
     533             :  * trace_ignore_this_task - should a task be ignored for tracing
     534             :  * @filtered_pids: The list of pids to check
     535             :  * @filtered_no_pids: The list of pids not to be traced
     536             :  * @task: The task that should be ignored if not filtered
     537             :  *
     538             :  * Checks if @task should be traced or not from @filtered_pids.
     539             :  * Returns true if @task should *NOT* be traced.
     540             :  * Returns false if @task should be traced.
     541             :  */
     542             : bool
     543           0 : trace_ignore_this_task(struct trace_pid_list *filtered_pids,
     544             :                        struct trace_pid_list *filtered_no_pids,
     545             :                        struct task_struct *task)
     546             : {
     547             :         /*
     548             :          * If filterd_no_pids is not empty, and the task's pid is listed
     549             :          * in filtered_no_pids, then return true.
     550             :          * Otherwise, if filtered_pids is empty, that means we can
     551             :          * trace all tasks. If it has content, then only trace pids
     552             :          * within filtered_pids.
     553             :          */
     554             : 
     555           0 :         return (filtered_pids &&
     556           0 :                 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
     557           0 :                 (filtered_no_pids &&
     558           0 :                  trace_find_filtered_pid(filtered_no_pids, task->pid));
     559             : }
     560             : 
     561             : /**
     562             :  * trace_filter_add_remove_task - Add or remove a task from a pid_list
     563             :  * @pid_list: The list to modify
     564             :  * @self: The current task for fork or NULL for exit
     565             :  * @task: The task to add or remove
     566             :  *
     567             :  * If adding a task, if @self is defined, the task is only added if @self
     568             :  * is also included in @pid_list. This happens on fork and tasks should
     569             :  * only be added when the parent is listed. If @self is NULL, then the
     570             :  * @task pid will be removed from the list, which would happen on exit
     571             :  * of a task.
     572             :  */
     573           0 : void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
     574             :                                   struct task_struct *self,
     575             :                                   struct task_struct *task)
     576             : {
     577           0 :         if (!pid_list)
     578             :                 return;
     579             : 
     580             :         /* For forks, we only add if the forking task is listed */
     581           0 :         if (self) {
     582           0 :                 if (!trace_find_filtered_pid(pid_list, self->pid))
     583           0 :                         return;
     584             :         }
     585             : 
     586             :         /* Sorry, but we don't support pid_max changing after setting */
     587           0 :         if (task->pid >= pid_list->pid_max)
     588             :                 return;
     589             : 
     590             :         /* "self" is set for forks, and NULL for exits */
     591           0 :         if (self)
     592           0 :                 set_bit(task->pid, pid_list->pids);
     593             :         else
     594           0 :                 clear_bit(task->pid, pid_list->pids);
     595             : }
     596             : 
     597             : /**
     598             :  * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
     599             :  * @pid_list: The pid list to show
     600             :  * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
     601             :  * @pos: The position of the file
     602             :  *
     603             :  * This is used by the seq_file "next" operation to iterate the pids
     604             :  * listed in a trace_pid_list structure.
     605             :  *
     606             :  * Returns the pid+1 as we want to display pid of zero, but NULL would
     607             :  * stop the iteration.
     608             :  */
     609           0 : void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
     610             : {
     611           0 :         unsigned long pid = (unsigned long)v;
     612             : 
     613           0 :         (*pos)++;
     614             : 
     615             :         /* pid already is +1 of the actual prevous bit */
     616           0 :         pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
     617             : 
     618             :         /* Return pid + 1 to allow zero to be represented */
     619           0 :         if (pid < pid_list->pid_max)
     620           0 :                 return (void *)(pid + 1);
     621             : 
     622             :         return NULL;
     623             : }
     624             : 
     625             : /**
     626             :  * trace_pid_start - Used for seq_file to start reading pid lists
     627             :  * @pid_list: The pid list to show
     628             :  * @pos: The position of the file
     629             :  *
     630             :  * This is used by seq_file "start" operation to start the iteration
     631             :  * of listing pids.
     632             :  *
     633             :  * Returns the pid+1 as we want to display pid of zero, but NULL would
     634             :  * stop the iteration.
     635             :  */
     636           0 : void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
     637             : {
     638           0 :         unsigned long pid;
     639           0 :         loff_t l = 0;
     640             : 
     641           0 :         pid = find_first_bit(pid_list->pids, pid_list->pid_max);
     642           0 :         if (pid >= pid_list->pid_max)
     643             :                 return NULL;
     644             : 
     645             :         /* Return pid + 1 so that zero can be the exit value */
     646           0 :         for (pid++; pid && l < *pos;
     647           0 :              pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
     648           0 :                 ;
     649           0 :         return (void *)pid;
     650             : }
     651             : 
     652             : /**
     653             :  * trace_pid_show - show the current pid in seq_file processing
     654             :  * @m: The seq_file structure to write into
     655             :  * @v: A void pointer of the pid (+1) value to display
     656             :  *
     657             :  * Can be directly used by seq_file operations to display the current
     658             :  * pid value.
     659             :  */
     660           0 : int trace_pid_show(struct seq_file *m, void *v)
     661             : {
     662           0 :         unsigned long pid = (unsigned long)v - 1;
     663             : 
     664           0 :         seq_printf(m, "%lu\n", pid);
     665           0 :         return 0;
     666             : }
     667             : 
     668             : /* 128 should be much more than enough */
     669             : #define PID_BUF_SIZE            127
     670             : 
     671           0 : int trace_pid_write(struct trace_pid_list *filtered_pids,
     672             :                     struct trace_pid_list **new_pid_list,
     673             :                     const char __user *ubuf, size_t cnt)
     674             : {
     675           0 :         struct trace_pid_list *pid_list;
     676           0 :         struct trace_parser parser;
     677           0 :         unsigned long val;
     678           0 :         int nr_pids = 0;
     679           0 :         ssize_t read = 0;
     680           0 :         ssize_t ret = 0;
     681           0 :         loff_t pos;
     682           0 :         pid_t pid;
     683             : 
     684           0 :         if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
     685             :                 return -ENOMEM;
     686             : 
     687             :         /*
     688             :          * Always recreate a new array. The write is an all or nothing
     689             :          * operation. Always create a new array when adding new pids by
     690             :          * the user. If the operation fails, then the current list is
     691             :          * not modified.
     692             :          */
     693           0 :         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
     694           0 :         if (!pid_list) {
     695           0 :                 trace_parser_put(&parser);
     696           0 :                 return -ENOMEM;
     697             :         }
     698             : 
     699           0 :         pid_list->pid_max = READ_ONCE(pid_max);
     700             : 
     701             :         /* Only truncating will shrink pid_max */
     702           0 :         if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
     703           0 :                 pid_list->pid_max = filtered_pids->pid_max;
     704             : 
     705           0 :         pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
     706           0 :         if (!pid_list->pids) {
     707           0 :                 trace_parser_put(&parser);
     708           0 :                 kfree(pid_list);
     709           0 :                 return -ENOMEM;
     710             :         }
     711             : 
     712           0 :         if (filtered_pids) {
     713             :                 /* copy the current bits to the new max */
     714           0 :                 for_each_set_bit(pid, filtered_pids->pids,
     715             :                                  filtered_pids->pid_max) {
     716           0 :                         set_bit(pid, pid_list->pids);
     717           0 :                         nr_pids++;
     718             :                 }
     719             :         }
     720             : 
     721           0 :         while (cnt > 0) {
     722             : 
     723           0 :                 pos = 0;
     724             : 
     725           0 :                 ret = trace_get_user(&parser, ubuf, cnt, &pos);
     726           0 :                 if (ret < 0 || !trace_parser_loaded(&parser))
     727             :                         break;
     728             : 
     729           0 :                 read += ret;
     730           0 :                 ubuf += ret;
     731           0 :                 cnt -= ret;
     732             : 
     733           0 :                 ret = -EINVAL;
     734           0 :                 if (kstrtoul(parser.buffer, 0, &val))
     735             :                         break;
     736           0 :                 if (val >= pid_list->pid_max)
     737             :                         break;
     738             : 
     739           0 :                 pid = (pid_t)val;
     740             : 
     741           0 :                 set_bit(pid, pid_list->pids);
     742           0 :                 nr_pids++;
     743             : 
     744           0 :                 trace_parser_clear(&parser);
     745           0 :                 ret = 0;
     746             :         }
     747           0 :         trace_parser_put(&parser);
     748             : 
     749           0 :         if (ret < 0) {
     750           0 :                 trace_free_pid_list(pid_list);
     751           0 :                 return ret;
     752             :         }
     753             : 
     754           0 :         if (!nr_pids) {
     755             :                 /* Cleared the list of pids */
     756           0 :                 trace_free_pid_list(pid_list);
     757           0 :                 read = ret;
     758           0 :                 pid_list = NULL;
     759             :         }
     760             : 
     761           0 :         *new_pid_list = pid_list;
     762             : 
     763           0 :         return read;
     764             : }
     765             : 
     766           0 : static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
     767             : {
     768           0 :         u64 ts;
     769             : 
     770             :         /* Early boot up does not have a buffer yet */
     771           0 :         if (!buf->buffer)
     772           0 :                 return trace_clock_local();
     773             : 
     774           0 :         ts = ring_buffer_time_stamp(buf->buffer, cpu);
     775           0 :         ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
     776             : 
     777           0 :         return ts;
     778             : }
     779             : 
     780           0 : u64 ftrace_now(int cpu)
     781             : {
     782           0 :         return buffer_ftrace_now(&global_trace.array_buffer, cpu);
     783             : }
     784             : 
     785             : /**
     786             :  * tracing_is_enabled - Show if global_trace has been enabled
     787             :  *
     788             :  * Shows if the global trace has been enabled or not. It uses the
     789             :  * mirror flag "buffer_disabled" to be used in fast paths such as for
     790             :  * the irqsoff tracer. But it may be inaccurate due to races. If you
     791             :  * need to know the accurate state, use tracing_is_on() which is a little
     792             :  * slower, but accurate.
     793             :  */
     794           0 : int tracing_is_enabled(void)
     795             : {
     796             :         /*
     797             :          * For quick access (irqsoff uses this in fast path), just
     798             :          * return the mirror variable of the state of the ring buffer.
     799             :          * It's a little racy, but we don't really care.
     800             :          */
     801           0 :         smp_rmb();
     802           0 :         return !global_trace.buffer_disabled;
     803             : }
     804             : 
     805             : /*
     806             :  * trace_buf_size is the size in bytes that is allocated
     807             :  * for a buffer. Note, the number of bytes is always rounded
     808             :  * to page size.
     809             :  *
     810             :  * This number is purposely set to a low number of 16384.
     811             :  * If the dump on oops happens, it will be much appreciated
     812             :  * to not have to wait for all that output. Anyway this can be
     813             :  * boot time and run time configurable.
     814             :  */
     815             : #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
     816             : 
     817             : static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
     818             : 
     819             : /* trace_types holds a link list of available tracers. */
     820             : static struct tracer            *trace_types __read_mostly;
     821             : 
     822             : /*
     823             :  * trace_types_lock is used to protect the trace_types list.
     824             :  */
     825             : DEFINE_MUTEX(trace_types_lock);
     826             : 
     827             : /*
     828             :  * serialize the access of the ring buffer
     829             :  *
     830             :  * ring buffer serializes readers, but it is low level protection.
     831             :  * The validity of the events (which returns by ring_buffer_peek() ..etc)
     832             :  * are not protected by ring buffer.
     833             :  *
     834             :  * The content of events may become garbage if we allow other process consumes
     835             :  * these events concurrently:
     836             :  *   A) the page of the consumed events may become a normal page
     837             :  *      (not reader page) in ring buffer, and this page will be rewrited
     838             :  *      by events producer.
     839             :  *   B) The page of the consumed events may become a page for splice_read,
     840             :  *      and this page will be returned to system.
     841             :  *
     842             :  * These primitives allow multi process access to different cpu ring buffer
     843             :  * concurrently.
     844             :  *
     845             :  * These primitives don't distinguish read-only and read-consume access.
     846             :  * Multi read-only access are also serialized.
     847             :  */
     848             : 
     849             : #ifdef CONFIG_SMP
     850             : static DECLARE_RWSEM(all_cpu_access_lock);
     851             : static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
     852             : 
     853           0 : static inline void trace_access_lock(int cpu)
     854             : {
     855           0 :         if (cpu == RING_BUFFER_ALL_CPUS) {
     856             :                 /* gain it for accessing the whole ring buffer. */
     857           0 :                 down_write(&all_cpu_access_lock);
     858             :         } else {
     859             :                 /* gain it for accessing a cpu ring buffer. */
     860             : 
     861             :                 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
     862           0 :                 down_read(&all_cpu_access_lock);
     863             : 
     864             :                 /* Secondly block other access to this @cpu ring buffer. */
     865           0 :                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
     866             :         }
     867           0 : }
     868             : 
     869           0 : static inline void trace_access_unlock(int cpu)
     870             : {
     871           0 :         if (cpu == RING_BUFFER_ALL_CPUS) {
     872           0 :                 up_write(&all_cpu_access_lock);
     873             :         } else {
     874           0 :                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
     875           0 :                 up_read(&all_cpu_access_lock);
     876             :         }
     877           0 : }
     878             : 
     879           1 : static inline void trace_access_lock_init(void)
     880             : {
     881           1 :         int cpu;
     882             : 
     883           6 :         for_each_possible_cpu(cpu)
     884           5 :                 mutex_init(&per_cpu(cpu_access_lock, cpu));
     885           1 : }
     886             : 
     887             : #else
     888             : 
     889             : static DEFINE_MUTEX(access_lock);
     890             : 
     891             : static inline void trace_access_lock(int cpu)
     892             : {
     893             :         (void)cpu;
     894             :         mutex_lock(&access_lock);
     895             : }
     896             : 
     897             : static inline void trace_access_unlock(int cpu)
     898             : {
     899             :         (void)cpu;
     900             :         mutex_unlock(&access_lock);
     901             : }
     902             : 
     903             : static inline void trace_access_lock_init(void)
     904             : {
     905             : }
     906             : 
     907             : #endif
     908             : 
     909             : #ifdef CONFIG_STACKTRACE
     910             : static void __ftrace_trace_stack(struct trace_buffer *buffer,
     911             :                                  unsigned int trace_ctx,
     912             :                                  int skip, struct pt_regs *regs);
     913             : static inline void ftrace_trace_stack(struct trace_array *tr,
     914             :                                       struct trace_buffer *buffer,
     915             :                                       unsigned int trace_ctx,
     916             :                                       int skip, struct pt_regs *regs);
     917             : 
     918             : #else
     919             : static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
     920             :                                         unsigned int trace_ctx,
     921             :                                         int skip, struct pt_regs *regs)
     922             : {
     923             : }
     924             : static inline void ftrace_trace_stack(struct trace_array *tr,
     925             :                                       struct trace_buffer *buffer,
     926             :                                       unsigned long trace_ctx,
     927             :                                       int skip, struct pt_regs *regs)
     928             : {
     929             : }
     930             : 
     931             : #endif
     932             : 
     933             : static __always_inline void
     934           0 : trace_event_setup(struct ring_buffer_event *event,
     935             :                   int type, unsigned int trace_ctx)
     936             : {
     937           0 :         struct trace_entry *ent = ring_buffer_event_data(event);
     938             : 
     939           0 :         tracing_generic_entry_update(ent, type, trace_ctx);
     940             : }
     941             : 
     942             : static __always_inline struct ring_buffer_event *
     943           0 : __trace_buffer_lock_reserve(struct trace_buffer *buffer,
     944             :                           int type,
     945             :                           unsigned long len,
     946             :                           unsigned int trace_ctx)
     947             : {
     948           0 :         struct ring_buffer_event *event;
     949             : 
     950           0 :         event = ring_buffer_lock_reserve(buffer, len);
     951           0 :         if (event != NULL)
     952           0 :                 trace_event_setup(event, type, trace_ctx);
     953             : 
     954           0 :         return event;
     955             : }
     956             : 
     957           0 : void tracer_tracing_on(struct trace_array *tr)
     958             : {
     959           0 :         if (tr->array_buffer.buffer)
     960           0 :                 ring_buffer_record_on(tr->array_buffer.buffer);
     961             :         /*
     962             :          * This flag is looked at when buffers haven't been allocated
     963             :          * yet, or by some tracers (like irqsoff), that just want to
     964             :          * know if the ring buffer has been disabled, but it can handle
     965             :          * races of where it gets disabled but we still do a record.
     966             :          * As the check is in the fast path of the tracers, it is more
     967             :          * important to be fast than accurate.
     968             :          */
     969           0 :         tr->buffer_disabled = 0;
     970             :         /* Make the flag seen by readers */
     971           0 :         smp_wmb();
     972           0 : }
     973             : 
     974             : /**
     975             :  * tracing_on - enable tracing buffers
     976             :  *
     977             :  * This function enables tracing buffers that may have been
     978             :  * disabled with tracing_off.
     979             :  */
     980           0 : void tracing_on(void)
     981             : {
     982           0 :         tracer_tracing_on(&global_trace);
     983           0 : }
     984             : EXPORT_SYMBOL_GPL(tracing_on);
     985             : 
     986             : 
     987             : static __always_inline void
     988           0 : __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
     989             : {
     990           0 :         __this_cpu_write(trace_taskinfo_save, true);
     991             : 
     992             :         /* If this is the temp buffer, we need to commit fully */
     993           0 :         if (this_cpu_read(trace_buffered_event) == event) {
     994             :                 /* Length is in event->array[0] */
     995           0 :                 ring_buffer_write(buffer, event->array[0], &event->array[1]);
     996             :                 /* Release the temp buffer */
     997           0 :                 this_cpu_dec(trace_buffered_event_cnt);
     998             :         } else
     999           0 :                 ring_buffer_unlock_commit(buffer, event);
    1000             : }
    1001             : 
    1002             : /**
    1003             :  * __trace_puts - write a constant string into the trace buffer.
    1004             :  * @ip:    The address of the caller
    1005             :  * @str:   The constant string to write
    1006             :  * @size:  The size of the string.
    1007             :  */
    1008           0 : int __trace_puts(unsigned long ip, const char *str, int size)
    1009             : {
    1010           0 :         struct ring_buffer_event *event;
    1011           0 :         struct trace_buffer *buffer;
    1012           0 :         struct print_entry *entry;
    1013           0 :         unsigned int trace_ctx;
    1014           0 :         int alloc;
    1015             : 
    1016           0 :         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
    1017             :                 return 0;
    1018             : 
    1019           0 :         if (unlikely(tracing_selftest_running || tracing_disabled))
    1020             :                 return 0;
    1021             : 
    1022           0 :         alloc = sizeof(*entry) + size + 2; /* possible \n added */
    1023             : 
    1024           0 :         trace_ctx = tracing_gen_ctx();
    1025           0 :         buffer = global_trace.array_buffer.buffer;
    1026           0 :         ring_buffer_nest_start(buffer);
    1027           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
    1028             :                                             trace_ctx);
    1029           0 :         if (!event) {
    1030           0 :                 size = 0;
    1031           0 :                 goto out;
    1032             :         }
    1033             : 
    1034           0 :         entry = ring_buffer_event_data(event);
    1035           0 :         entry->ip = ip;
    1036             : 
    1037           0 :         memcpy(&entry->buf, str, size);
    1038             : 
    1039             :         /* Add a newline if necessary */
    1040           0 :         if (entry->buf[size - 1] != '\n') {
    1041           0 :                 entry->buf[size] = '\n';
    1042           0 :                 entry->buf[size + 1] = '\0';
    1043             :         } else
    1044           0 :                 entry->buf[size] = '\0';
    1045             : 
    1046           0 :         __buffer_unlock_commit(buffer, event);
    1047           0 :         ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
    1048           0 :  out:
    1049           0 :         ring_buffer_nest_end(buffer);
    1050           0 :         return size;
    1051             : }
    1052             : EXPORT_SYMBOL_GPL(__trace_puts);
    1053             : 
    1054             : /**
    1055             :  * __trace_bputs - write the pointer to a constant string into trace buffer
    1056             :  * @ip:    The address of the caller
    1057             :  * @str:   The constant string to write to the buffer to
    1058             :  */
    1059           0 : int __trace_bputs(unsigned long ip, const char *str)
    1060             : {
    1061           0 :         struct ring_buffer_event *event;
    1062           0 :         struct trace_buffer *buffer;
    1063           0 :         struct bputs_entry *entry;
    1064           0 :         unsigned int trace_ctx;
    1065           0 :         int size = sizeof(struct bputs_entry);
    1066           0 :         int ret = 0;
    1067             : 
    1068           0 :         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
    1069             :                 return 0;
    1070             : 
    1071           0 :         if (unlikely(tracing_selftest_running || tracing_disabled))
    1072             :                 return 0;
    1073             : 
    1074           0 :         trace_ctx = tracing_gen_ctx();
    1075           0 :         buffer = global_trace.array_buffer.buffer;
    1076             : 
    1077           0 :         ring_buffer_nest_start(buffer);
    1078           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
    1079             :                                             trace_ctx);
    1080           0 :         if (!event)
    1081           0 :                 goto out;
    1082             : 
    1083           0 :         entry = ring_buffer_event_data(event);
    1084           0 :         entry->ip                    = ip;
    1085           0 :         entry->str                   = str;
    1086             : 
    1087           0 :         __buffer_unlock_commit(buffer, event);
    1088           0 :         ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
    1089             : 
    1090             :         ret = 1;
    1091           0 :  out:
    1092           0 :         ring_buffer_nest_end(buffer);
    1093           0 :         return ret;
    1094             : }
    1095             : EXPORT_SYMBOL_GPL(__trace_bputs);
    1096             : 
    1097             : #ifdef CONFIG_TRACER_SNAPSHOT
    1098             : static void tracing_snapshot_instance_cond(struct trace_array *tr,
    1099             :                                            void *cond_data)
    1100             : {
    1101             :         struct tracer *tracer = tr->current_trace;
    1102             :         unsigned long flags;
    1103             : 
    1104             :         if (in_nmi()) {
    1105             :                 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
    1106             :                 internal_trace_puts("*** snapshot is being ignored        ***\n");
    1107             :                 return;
    1108             :         }
    1109             : 
    1110             :         if (!tr->allocated_snapshot) {
    1111             :                 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
    1112             :                 internal_trace_puts("*** stopping trace here!   ***\n");
    1113             :                 tracing_off();
    1114             :                 return;
    1115             :         }
    1116             : 
    1117             :         /* Note, snapshot can not be used when the tracer uses it */
    1118             :         if (tracer->use_max_tr) {
    1119             :                 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
    1120             :                 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
    1121             :                 return;
    1122             :         }
    1123             : 
    1124             :         local_irq_save(flags);
    1125             :         update_max_tr(tr, current, smp_processor_id(), cond_data);
    1126             :         local_irq_restore(flags);
    1127             : }
    1128             : 
    1129             : void tracing_snapshot_instance(struct trace_array *tr)
    1130             : {
    1131             :         tracing_snapshot_instance_cond(tr, NULL);
    1132             : }
    1133             : 
    1134             : /**
    1135             :  * tracing_snapshot - take a snapshot of the current buffer.
    1136             :  *
    1137             :  * This causes a swap between the snapshot buffer and the current live
    1138             :  * tracing buffer. You can use this to take snapshots of the live
    1139             :  * trace when some condition is triggered, but continue to trace.
    1140             :  *
    1141             :  * Note, make sure to allocate the snapshot with either
    1142             :  * a tracing_snapshot_alloc(), or by doing it manually
    1143             :  * with: echo 1 > /sys/kernel/debug/tracing/snapshot
    1144             :  *
    1145             :  * If the snapshot buffer is not allocated, it will stop tracing.
    1146             :  * Basically making a permanent snapshot.
    1147             :  */
    1148             : void tracing_snapshot(void)
    1149             : {
    1150             :         struct trace_array *tr = &global_trace;
    1151             : 
    1152             :         tracing_snapshot_instance(tr);
    1153             : }
    1154             : EXPORT_SYMBOL_GPL(tracing_snapshot);
    1155             : 
    1156             : /**
    1157             :  * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
    1158             :  * @tr:         The tracing instance to snapshot
    1159             :  * @cond_data:  The data to be tested conditionally, and possibly saved
    1160             :  *
    1161             :  * This is the same as tracing_snapshot() except that the snapshot is
    1162             :  * conditional - the snapshot will only happen if the
    1163             :  * cond_snapshot.update() implementation receiving the cond_data
    1164             :  * returns true, which means that the trace array's cond_snapshot
    1165             :  * update() operation used the cond_data to determine whether the
    1166             :  * snapshot should be taken, and if it was, presumably saved it along
    1167             :  * with the snapshot.
    1168             :  */
    1169             : void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
    1170             : {
    1171             :         tracing_snapshot_instance_cond(tr, cond_data);
    1172             : }
    1173             : EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
    1174             : 
    1175             : /**
    1176             :  * tracing_snapshot_cond_data - get the user data associated with a snapshot
    1177             :  * @tr:         The tracing instance
    1178             :  *
    1179             :  * When the user enables a conditional snapshot using
    1180             :  * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
    1181             :  * with the snapshot.  This accessor is used to retrieve it.
    1182             :  *
    1183             :  * Should not be called from cond_snapshot.update(), since it takes
    1184             :  * the tr->max_lock lock, which the code calling
    1185             :  * cond_snapshot.update() has already done.
    1186             :  *
    1187             :  * Returns the cond_data associated with the trace array's snapshot.
    1188             :  */
    1189             : void *tracing_cond_snapshot_data(struct trace_array *tr)
    1190             : {
    1191             :         void *cond_data = NULL;
    1192             : 
    1193             :         arch_spin_lock(&tr->max_lock);
    1194             : 
    1195             :         if (tr->cond_snapshot)
    1196             :                 cond_data = tr->cond_snapshot->cond_data;
    1197             : 
    1198             :         arch_spin_unlock(&tr->max_lock);
    1199             : 
    1200             :         return cond_data;
    1201             : }
    1202             : EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
    1203             : 
    1204             : static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
    1205             :                                         struct array_buffer *size_buf, int cpu_id);
    1206             : static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
    1207             : 
    1208             : int tracing_alloc_snapshot_instance(struct trace_array *tr)
    1209             : {
    1210             :         int ret;
    1211             : 
    1212             :         if (!tr->allocated_snapshot) {
    1213             : 
    1214             :                 /* allocate spare buffer */
    1215             :                 ret = resize_buffer_duplicate_size(&tr->max_buffer,
    1216             :                                    &tr->array_buffer, RING_BUFFER_ALL_CPUS);
    1217             :                 if (ret < 0)
    1218             :                         return ret;
    1219             : 
    1220             :                 tr->allocated_snapshot = true;
    1221             :         }
    1222             : 
    1223             :         return 0;
    1224             : }
    1225             : 
    1226             : static void free_snapshot(struct trace_array *tr)
    1227             : {
    1228             :         /*
    1229             :          * We don't free the ring buffer. instead, resize it because
    1230             :          * The max_tr ring buffer has some state (e.g. ring->clock) and
    1231             :          * we want preserve it.
    1232             :          */
    1233             :         ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
    1234             :         set_buffer_entries(&tr->max_buffer, 1);
    1235             :         tracing_reset_online_cpus(&tr->max_buffer);
    1236             :         tr->allocated_snapshot = false;
    1237             : }
    1238             : 
    1239             : /**
    1240             :  * tracing_alloc_snapshot - allocate snapshot buffer.
    1241             :  *
    1242             :  * This only allocates the snapshot buffer if it isn't already
    1243             :  * allocated - it doesn't also take a snapshot.
    1244             :  *
    1245             :  * This is meant to be used in cases where the snapshot buffer needs
    1246             :  * to be set up for events that can't sleep but need to be able to
    1247             :  * trigger a snapshot.
    1248             :  */
    1249             : int tracing_alloc_snapshot(void)
    1250             : {
    1251             :         struct trace_array *tr = &global_trace;
    1252             :         int ret;
    1253             : 
    1254             :         ret = tracing_alloc_snapshot_instance(tr);
    1255             :         WARN_ON(ret < 0);
    1256             : 
    1257             :         return ret;
    1258             : }
    1259             : EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
    1260             : 
    1261             : /**
    1262             :  * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
    1263             :  *
    1264             :  * This is similar to tracing_snapshot(), but it will allocate the
    1265             :  * snapshot buffer if it isn't already allocated. Use this only
    1266             :  * where it is safe to sleep, as the allocation may sleep.
    1267             :  *
    1268             :  * This causes a swap between the snapshot buffer and the current live
    1269             :  * tracing buffer. You can use this to take snapshots of the live
    1270             :  * trace when some condition is triggered, but continue to trace.
    1271             :  */
    1272             : void tracing_snapshot_alloc(void)
    1273             : {
    1274             :         int ret;
    1275             : 
    1276             :         ret = tracing_alloc_snapshot();
    1277             :         if (ret < 0)
    1278             :                 return;
    1279             : 
    1280             :         tracing_snapshot();
    1281             : }
    1282             : EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
    1283             : 
    1284             : /**
    1285             :  * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
    1286             :  * @tr:         The tracing instance
    1287             :  * @cond_data:  User data to associate with the snapshot
    1288             :  * @update:     Implementation of the cond_snapshot update function
    1289             :  *
    1290             :  * Check whether the conditional snapshot for the given instance has
    1291             :  * already been enabled, or if the current tracer is already using a
    1292             :  * snapshot; if so, return -EBUSY, else create a cond_snapshot and
    1293             :  * save the cond_data and update function inside.
    1294             :  *
    1295             :  * Returns 0 if successful, error otherwise.
    1296             :  */
    1297             : int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
    1298             :                                  cond_update_fn_t update)
    1299             : {
    1300             :         struct cond_snapshot *cond_snapshot;
    1301             :         int ret = 0;
    1302             : 
    1303             :         cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
    1304             :         if (!cond_snapshot)
    1305             :                 return -ENOMEM;
    1306             : 
    1307             :         cond_snapshot->cond_data = cond_data;
    1308             :         cond_snapshot->update = update;
    1309             : 
    1310             :         mutex_lock(&trace_types_lock);
    1311             : 
    1312             :         ret = tracing_alloc_snapshot_instance(tr);
    1313             :         if (ret)
    1314             :                 goto fail_unlock;
    1315             : 
    1316             :         if (tr->current_trace->use_max_tr) {
    1317             :                 ret = -EBUSY;
    1318             :                 goto fail_unlock;
    1319             :         }
    1320             : 
    1321             :         /*
    1322             :          * The cond_snapshot can only change to NULL without the
    1323             :          * trace_types_lock. We don't care if we race with it going
    1324             :          * to NULL, but we want to make sure that it's not set to
    1325             :          * something other than NULL when we get here, which we can
    1326             :          * do safely with only holding the trace_types_lock and not
    1327             :          * having to take the max_lock.
    1328             :          */
    1329             :         if (tr->cond_snapshot) {
    1330             :                 ret = -EBUSY;
    1331             :                 goto fail_unlock;
    1332             :         }
    1333             : 
    1334             :         arch_spin_lock(&tr->max_lock);
    1335             :         tr->cond_snapshot = cond_snapshot;
    1336             :         arch_spin_unlock(&tr->max_lock);
    1337             : 
    1338             :         mutex_unlock(&trace_types_lock);
    1339             : 
    1340             :         return ret;
    1341             : 
    1342             :  fail_unlock:
    1343             :         mutex_unlock(&trace_types_lock);
    1344             :         kfree(cond_snapshot);
    1345             :         return ret;
    1346             : }
    1347             : EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
    1348             : 
    1349             : /**
    1350             :  * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
    1351             :  * @tr:         The tracing instance
    1352             :  *
    1353             :  * Check whether the conditional snapshot for the given instance is
    1354             :  * enabled; if so, free the cond_snapshot associated with it,
    1355             :  * otherwise return -EINVAL.
    1356             :  *
    1357             :  * Returns 0 if successful, error otherwise.
    1358             :  */
    1359             : int tracing_snapshot_cond_disable(struct trace_array *tr)
    1360             : {
    1361             :         int ret = 0;
    1362             : 
    1363             :         arch_spin_lock(&tr->max_lock);
    1364             : 
    1365             :         if (!tr->cond_snapshot)
    1366             :                 ret = -EINVAL;
    1367             :         else {
    1368             :                 kfree(tr->cond_snapshot);
    1369             :                 tr->cond_snapshot = NULL;
    1370             :         }
    1371             : 
    1372             :         arch_spin_unlock(&tr->max_lock);
    1373             : 
    1374             :         return ret;
    1375             : }
    1376             : EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
    1377             : #else
    1378           0 : void tracing_snapshot(void)
    1379             : {
    1380           0 :         WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
    1381           0 : }
    1382             : EXPORT_SYMBOL_GPL(tracing_snapshot);
    1383           0 : void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
    1384             : {
    1385           0 :         WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
    1386           0 : }
    1387             : EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
    1388           0 : int tracing_alloc_snapshot(void)
    1389             : {
    1390           0 :         WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
    1391           0 :         return -ENODEV;
    1392             : }
    1393             : EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
    1394           0 : void tracing_snapshot_alloc(void)
    1395             : {
    1396             :         /* Give warning */
    1397           0 :         tracing_snapshot();
    1398           0 : }
    1399             : EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
    1400           0 : void *tracing_cond_snapshot_data(struct trace_array *tr)
    1401             : {
    1402           0 :         return NULL;
    1403             : }
    1404             : EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
    1405           0 : int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
    1406             : {
    1407           0 :         return -ENODEV;
    1408             : }
    1409             : EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
    1410           0 : int tracing_snapshot_cond_disable(struct trace_array *tr)
    1411             : {
    1412           0 :         return false;
    1413             : }
    1414             : EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
    1415             : #endif /* CONFIG_TRACER_SNAPSHOT */
    1416             : 
    1417           0 : void tracer_tracing_off(struct trace_array *tr)
    1418             : {
    1419           0 :         if (tr->array_buffer.buffer)
    1420           0 :                 ring_buffer_record_off(tr->array_buffer.buffer);
    1421             :         /*
    1422             :          * This flag is looked at when buffers haven't been allocated
    1423             :          * yet, or by some tracers (like irqsoff), that just want to
    1424             :          * know if the ring buffer has been disabled, but it can handle
    1425             :          * races of where it gets disabled but we still do a record.
    1426             :          * As the check is in the fast path of the tracers, it is more
    1427             :          * important to be fast than accurate.
    1428             :          */
    1429           0 :         tr->buffer_disabled = 1;
    1430             :         /* Make the flag seen by readers */
    1431           0 :         smp_wmb();
    1432           0 : }
    1433             : 
    1434             : /**
    1435             :  * tracing_off - turn off tracing buffers
    1436             :  *
    1437             :  * This function stops the tracing buffers from recording data.
    1438             :  * It does not disable any overhead the tracers themselves may
    1439             :  * be causing. This function simply causes all recording to
    1440             :  * the ring buffers to fail.
    1441             :  */
    1442           0 : void tracing_off(void)
    1443             : {
    1444           0 :         tracer_tracing_off(&global_trace);
    1445           0 : }
    1446             : EXPORT_SYMBOL_GPL(tracing_off);
    1447             : 
    1448           2 : void disable_trace_on_warning(void)
    1449             : {
    1450           2 :         if (__disable_trace_on_warning) {
    1451           0 :                 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
    1452             :                         "Disabling tracing due to warning\n");
    1453           0 :                 tracing_off();
    1454             :         }
    1455           2 : }
    1456             : 
    1457             : /**
    1458             :  * tracer_tracing_is_on - show real state of ring buffer enabled
    1459             :  * @tr : the trace array to know if ring buffer is enabled
    1460             :  *
    1461             :  * Shows real state of the ring buffer if it is enabled or not.
    1462             :  */
    1463           0 : bool tracer_tracing_is_on(struct trace_array *tr)
    1464             : {
    1465           0 :         if (tr->array_buffer.buffer)
    1466           0 :                 return ring_buffer_record_is_on(tr->array_buffer.buffer);
    1467           0 :         return !tr->buffer_disabled;
    1468             : }
    1469             : 
    1470             : /**
    1471             :  * tracing_is_on - show state of ring buffers enabled
    1472             :  */
    1473           0 : int tracing_is_on(void)
    1474             : {
    1475           0 :         return tracer_tracing_is_on(&global_trace);
    1476             : }
    1477             : EXPORT_SYMBOL_GPL(tracing_is_on);
    1478             : 
    1479           0 : static int __init set_buf_size(char *str)
    1480             : {
    1481           0 :         unsigned long buf_size;
    1482             : 
    1483           0 :         if (!str)
    1484             :                 return 0;
    1485           0 :         buf_size = memparse(str, &str);
    1486             :         /* nr_entries can not be zero */
    1487           0 :         if (buf_size == 0)
    1488             :                 return 0;
    1489           0 :         trace_buf_size = buf_size;
    1490           0 :         return 1;
    1491             : }
    1492             : __setup("trace_buf_size=", set_buf_size);
    1493             : 
    1494           0 : static int __init set_tracing_thresh(char *str)
    1495             : {
    1496           0 :         unsigned long threshold;
    1497           0 :         int ret;
    1498             : 
    1499           0 :         if (!str)
    1500             :                 return 0;
    1501           0 :         ret = kstrtoul(str, 0, &threshold);
    1502           0 :         if (ret < 0)
    1503             :                 return 0;
    1504           0 :         tracing_thresh = threshold * 1000;
    1505           0 :         return 1;
    1506             : }
    1507             : __setup("tracing_thresh=", set_tracing_thresh);
    1508             : 
    1509           0 : unsigned long nsecs_to_usecs(unsigned long nsecs)
    1510             : {
    1511           0 :         return nsecs / 1000;
    1512             : }
    1513             : 
    1514             : /*
    1515             :  * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
    1516             :  * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
    1517             :  * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
    1518             :  * of strings in the order that the evals (enum) were defined.
    1519             :  */
    1520             : #undef C
    1521             : #define C(a, b) b
    1522             : 
    1523             : /* These must match the bit postions in trace_iterator_flags */
    1524             : static const char *trace_options[] = {
    1525             :         TRACE_FLAGS
    1526             :         NULL
    1527             : };
    1528             : 
    1529             : static struct {
    1530             :         u64 (*func)(void);
    1531             :         const char *name;
    1532             :         int in_ns;              /* is this clock in nanoseconds? */
    1533             : } trace_clocks[] = {
    1534             :         { trace_clock_local,            "local",      1 },
    1535             :         { trace_clock_global,           "global",     1 },
    1536             :         { trace_clock_counter,          "counter",    0 },
    1537             :         { trace_clock_jiffies,          "uptime",     0 },
    1538             :         { trace_clock,                  "perf",               1 },
    1539             :         { ktime_get_mono_fast_ns,       "mono",               1 },
    1540             :         { ktime_get_raw_fast_ns,        "mono_raw",   1 },
    1541             :         { ktime_get_boot_fast_ns,       "boot",               1 },
    1542             :         ARCH_TRACE_CLOCKS
    1543             : };
    1544             : 
    1545           0 : bool trace_clock_in_ns(struct trace_array *tr)
    1546             : {
    1547           0 :         if (trace_clocks[tr->clock_id].in_ns)
    1548           0 :                 return true;
    1549             : 
    1550             :         return false;
    1551             : }
    1552             : 
    1553             : /*
    1554             :  * trace_parser_get_init - gets the buffer for trace parser
    1555             :  */
    1556           0 : int trace_parser_get_init(struct trace_parser *parser, int size)
    1557             : {
    1558           0 :         memset(parser, 0, sizeof(*parser));
    1559             : 
    1560           0 :         parser->buffer = kmalloc(size, GFP_KERNEL);
    1561           0 :         if (!parser->buffer)
    1562             :                 return 1;
    1563             : 
    1564           0 :         parser->size = size;
    1565           0 :         return 0;
    1566             : }
    1567             : 
    1568             : /*
    1569             :  * trace_parser_put - frees the buffer for trace parser
    1570             :  */
    1571           0 : void trace_parser_put(struct trace_parser *parser)
    1572             : {
    1573           0 :         kfree(parser->buffer);
    1574           0 :         parser->buffer = NULL;
    1575           0 : }
    1576             : 
    1577             : /*
    1578             :  * trace_get_user - reads the user input string separated by  space
    1579             :  * (matched by isspace(ch))
    1580             :  *
    1581             :  * For each string found the 'struct trace_parser' is updated,
    1582             :  * and the function returns.
    1583             :  *
    1584             :  * Returns number of bytes read.
    1585             :  *
    1586             :  * See kernel/trace/trace.h for 'struct trace_parser' details.
    1587             :  */
    1588           0 : int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
    1589             :         size_t cnt, loff_t *ppos)
    1590             : {
    1591           0 :         char ch;
    1592           0 :         size_t read = 0;
    1593           0 :         ssize_t ret;
    1594             : 
    1595           0 :         if (!*ppos)
    1596           0 :                 trace_parser_clear(parser);
    1597             : 
    1598           0 :         ret = get_user(ch, ubuf++);
    1599           0 :         if (ret)
    1600           0 :                 goto out;
    1601             : 
    1602           0 :         read++;
    1603           0 :         cnt--;
    1604             : 
    1605             :         /*
    1606             :          * The parser is not finished with the last write,
    1607             :          * continue reading the user input without skipping spaces.
    1608             :          */
    1609           0 :         if (!parser->cont) {
    1610             :                 /* skip white space */
    1611           0 :                 while (cnt && isspace(ch)) {
    1612           0 :                         ret = get_user(ch, ubuf++);
    1613           0 :                         if (ret)
    1614           0 :                                 goto out;
    1615           0 :                         read++;
    1616           0 :                         cnt--;
    1617             :                 }
    1618             : 
    1619           0 :                 parser->idx = 0;
    1620             : 
    1621             :                 /* only spaces were written */
    1622           0 :                 if (isspace(ch) || !ch) {
    1623           0 :                         *ppos += read;
    1624           0 :                         ret = read;
    1625           0 :                         goto out;
    1626             :                 }
    1627             :         }
    1628             : 
    1629             :         /* read the non-space input */
    1630           0 :         while (cnt && !isspace(ch) && ch) {
    1631           0 :                 if (parser->idx < parser->size - 1)
    1632           0 :                         parser->buffer[parser->idx++] = ch;
    1633             :                 else {
    1634           0 :                         ret = -EINVAL;
    1635           0 :                         goto out;
    1636             :                 }
    1637           0 :                 ret = get_user(ch, ubuf++);
    1638           0 :                 if (ret)
    1639           0 :                         goto out;
    1640           0 :                 read++;
    1641           0 :                 cnt--;
    1642             :         }
    1643             : 
    1644             :         /* We either got finished input or we have to wait for another call. */
    1645           0 :         if (isspace(ch) || !ch) {
    1646           0 :                 parser->buffer[parser->idx] = 0;
    1647           0 :                 parser->cont = false;
    1648           0 :         } else if (parser->idx < parser->size - 1) {
    1649           0 :                 parser->cont = true;
    1650           0 :                 parser->buffer[parser->idx++] = ch;
    1651             :                 /* Make sure the parsed string always terminates with '\0'. */
    1652           0 :                 parser->buffer[parser->idx] = 0;
    1653             :         } else {
    1654           0 :                 ret = -EINVAL;
    1655           0 :                 goto out;
    1656             :         }
    1657             : 
    1658           0 :         *ppos += read;
    1659           0 :         ret = read;
    1660             : 
    1661           0 : out:
    1662           0 :         return ret;
    1663             : }
    1664             : 
    1665             : /* TODO add a seq_buf_to_buffer() */
    1666           0 : static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
    1667             : {
    1668           0 :         int len;
    1669             : 
    1670           0 :         if (trace_seq_used(s) <= s->seq.readpos)
    1671             :                 return -EBUSY;
    1672             : 
    1673           0 :         len = trace_seq_used(s) - s->seq.readpos;
    1674           0 :         if (cnt > len)
    1675             :                 cnt = len;
    1676           0 :         memcpy(buf, s->buffer + s->seq.readpos, cnt);
    1677             : 
    1678           0 :         s->seq.readpos += cnt;
    1679           0 :         return cnt;
    1680             : }
    1681             : 
    1682             : unsigned long __read_mostly     tracing_thresh;
    1683             : static const struct file_operations tracing_max_lat_fops;
    1684             : 
    1685             : #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
    1686             :         defined(CONFIG_FSNOTIFY)
    1687             : 
    1688             : static struct workqueue_struct *fsnotify_wq;
    1689             : 
    1690             : static void latency_fsnotify_workfn(struct work_struct *work)
    1691             : {
    1692             :         struct trace_array *tr = container_of(work, struct trace_array,
    1693             :                                               fsnotify_work);
    1694             :         fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
    1695             : }
    1696             : 
    1697             : static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
    1698             : {
    1699             :         struct trace_array *tr = container_of(iwork, struct trace_array,
    1700             :                                               fsnotify_irqwork);
    1701             :         queue_work(fsnotify_wq, &tr->fsnotify_work);
    1702             : }
    1703             : 
    1704             : static void trace_create_maxlat_file(struct trace_array *tr,
    1705             :                                      struct dentry *d_tracer)
    1706             : {
    1707             :         INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
    1708             :         init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
    1709             :         tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
    1710             :                                               d_tracer, &tr->max_latency,
    1711             :                                               &tracing_max_lat_fops);
    1712             : }
    1713             : 
    1714             : __init static int latency_fsnotify_init(void)
    1715             : {
    1716             :         fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
    1717             :                                       WQ_UNBOUND | WQ_HIGHPRI, 0);
    1718             :         if (!fsnotify_wq) {
    1719             :                 pr_err("Unable to allocate tr_max_lat_wq\n");
    1720             :                 return -ENOMEM;
    1721             :         }
    1722             :         return 0;
    1723             : }
    1724             : 
    1725             : late_initcall_sync(latency_fsnotify_init);
    1726             : 
    1727             : void latency_fsnotify(struct trace_array *tr)
    1728             : {
    1729             :         if (!fsnotify_wq)
    1730             :                 return;
    1731             :         /*
    1732             :          * We cannot call queue_work(&tr->fsnotify_work) from here because it's
    1733             :          * possible that we are called from __schedule() or do_idle(), which
    1734             :          * could cause a deadlock.
    1735             :          */
    1736             :         irq_work_queue(&tr->fsnotify_irqwork);
    1737             : }
    1738             : 
    1739             : /*
    1740             :  * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
    1741             :  *  defined(CONFIG_FSNOTIFY)
    1742             :  */
    1743             : #else
    1744             : 
    1745             : #define trace_create_maxlat_file(tr, d_tracer)                          \
    1746             :         trace_create_file("tracing_max_latency", 0644, d_tracer,      \
    1747             :                           &tr->max_latency, &tracing_max_lat_fops)
    1748             : 
    1749             : #endif
    1750             : 
    1751             : #ifdef CONFIG_TRACER_MAX_TRACE
    1752             : /*
    1753             :  * Copy the new maximum trace into the separate maximum-trace
    1754             :  * structure. (this way the maximum trace is permanently saved,
    1755             :  * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
    1756             :  */
    1757             : static void
    1758             : __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
    1759             : {
    1760             :         struct array_buffer *trace_buf = &tr->array_buffer;
    1761             :         struct array_buffer *max_buf = &tr->max_buffer;
    1762             :         struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
    1763             :         struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
    1764             : 
    1765             :         max_buf->cpu = cpu;
    1766             :         max_buf->time_start = data->preempt_timestamp;
    1767             : 
    1768             :         max_data->saved_latency = tr->max_latency;
    1769             :         max_data->critical_start = data->critical_start;
    1770             :         max_data->critical_end = data->critical_end;
    1771             : 
    1772             :         strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
    1773             :         max_data->pid = tsk->pid;
    1774             :         /*
    1775             :          * If tsk == current, then use current_uid(), as that does not use
    1776             :          * RCU. The irq tracer can be called out of RCU scope.
    1777             :          */
    1778             :         if (tsk == current)
    1779             :                 max_data->uid = current_uid();
    1780             :         else
    1781             :                 max_data->uid = task_uid(tsk);
    1782             : 
    1783             :         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
    1784             :         max_data->policy = tsk->policy;
    1785             :         max_data->rt_priority = tsk->rt_priority;
    1786             : 
    1787             :         /* record this tasks comm */
    1788             :         tracing_record_cmdline(tsk);
    1789             :         latency_fsnotify(tr);
    1790             : }
    1791             : 
    1792             : /**
    1793             :  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
    1794             :  * @tr: tracer
    1795             :  * @tsk: the task with the latency
    1796             :  * @cpu: The cpu that initiated the trace.
    1797             :  * @cond_data: User data associated with a conditional snapshot
    1798             :  *
    1799             :  * Flip the buffers between the @tr and the max_tr and record information
    1800             :  * about which task was the cause of this latency.
    1801             :  */
    1802             : void
    1803             : update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
    1804             :               void *cond_data)
    1805             : {
    1806             :         if (tr->stop_count)
    1807             :                 return;
    1808             : 
    1809             :         WARN_ON_ONCE(!irqs_disabled());
    1810             : 
    1811             :         if (!tr->allocated_snapshot) {
    1812             :                 /* Only the nop tracer should hit this when disabling */
    1813             :                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
    1814             :                 return;
    1815             :         }
    1816             : 
    1817             :         arch_spin_lock(&tr->max_lock);
    1818             : 
    1819             :         /* Inherit the recordable setting from array_buffer */
    1820             :         if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
    1821             :                 ring_buffer_record_on(tr->max_buffer.buffer);
    1822             :         else
    1823             :                 ring_buffer_record_off(tr->max_buffer.buffer);
    1824             : 
    1825             : #ifdef CONFIG_TRACER_SNAPSHOT
    1826             :         if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
    1827             :                 goto out_unlock;
    1828             : #endif
    1829             :         swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
    1830             : 
    1831             :         __update_max_tr(tr, tsk, cpu);
    1832             : 
    1833             :  out_unlock:
    1834             :         arch_spin_unlock(&tr->max_lock);
    1835             : }
    1836             : 
    1837             : /**
    1838             :  * update_max_tr_single - only copy one trace over, and reset the rest
    1839             :  * @tr: tracer
    1840             :  * @tsk: task with the latency
    1841             :  * @cpu: the cpu of the buffer to copy.
    1842             :  *
    1843             :  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
    1844             :  */
    1845             : void
    1846             : update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
    1847             : {
    1848             :         int ret;
    1849             : 
    1850             :         if (tr->stop_count)
    1851             :                 return;
    1852             : 
    1853             :         WARN_ON_ONCE(!irqs_disabled());
    1854             :         if (!tr->allocated_snapshot) {
    1855             :                 /* Only the nop tracer should hit this when disabling */
    1856             :                 WARN_ON_ONCE(tr->current_trace != &nop_trace);
    1857             :                 return;
    1858             :         }
    1859             : 
    1860             :         arch_spin_lock(&tr->max_lock);
    1861             : 
    1862             :         ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
    1863             : 
    1864             :         if (ret == -EBUSY) {
    1865             :                 /*
    1866             :                  * We failed to swap the buffer due to a commit taking
    1867             :                  * place on this CPU. We fail to record, but we reset
    1868             :                  * the max trace buffer (no one writes directly to it)
    1869             :                  * and flag that it failed.
    1870             :                  */
    1871             :                 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
    1872             :                         "Failed to swap buffers due to commit in progress\n");
    1873             :         }
    1874             : 
    1875             :         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
    1876             : 
    1877             :         __update_max_tr(tr, tsk, cpu);
    1878             :         arch_spin_unlock(&tr->max_lock);
    1879             : }
    1880             : #endif /* CONFIG_TRACER_MAX_TRACE */
    1881             : 
    1882           0 : static int wait_on_pipe(struct trace_iterator *iter, int full)
    1883             : {
    1884             :         /* Iterators are static, they should be filled or empty */
    1885           0 :         if (trace_buffer_iter(iter, iter->cpu_file))
    1886             :                 return 0;
    1887             : 
    1888           0 :         return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
    1889             :                                 full);
    1890             : }
    1891             : 
    1892             : #ifdef CONFIG_FTRACE_STARTUP_TEST
    1893             : static bool selftests_can_run;
    1894             : 
    1895             : struct trace_selftests {
    1896             :         struct list_head                list;
    1897             :         struct tracer                   *type;
    1898             : };
    1899             : 
    1900             : static LIST_HEAD(postponed_selftests);
    1901             : 
    1902             : static int save_selftest(struct tracer *type)
    1903             : {
    1904             :         struct trace_selftests *selftest;
    1905             : 
    1906             :         selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
    1907             :         if (!selftest)
    1908             :                 return -ENOMEM;
    1909             : 
    1910             :         selftest->type = type;
    1911             :         list_add(&selftest->list, &postponed_selftests);
    1912             :         return 0;
    1913             : }
    1914             : 
    1915             : static int run_tracer_selftest(struct tracer *type)
    1916             : {
    1917             :         struct trace_array *tr = &global_trace;
    1918             :         struct tracer *saved_tracer = tr->current_trace;
    1919             :         int ret;
    1920             : 
    1921             :         if (!type->selftest || tracing_selftest_disabled)
    1922             :                 return 0;
    1923             : 
    1924             :         /*
    1925             :          * If a tracer registers early in boot up (before scheduling is
    1926             :          * initialized and such), then do not run its selftests yet.
    1927             :          * Instead, run it a little later in the boot process.
    1928             :          */
    1929             :         if (!selftests_can_run)
    1930             :                 return save_selftest(type);
    1931             : 
    1932             :         if (!tracing_is_on()) {
    1933             :                 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
    1934             :                         type->name);
    1935             :                 return 0;
    1936             :         }
    1937             : 
    1938             :         /*
    1939             :          * Run a selftest on this tracer.
    1940             :          * Here we reset the trace buffer, and set the current
    1941             :          * tracer to be this tracer. The tracer can then run some
    1942             :          * internal tracing to verify that everything is in order.
    1943             :          * If we fail, we do not register this tracer.
    1944             :          */
    1945             :         tracing_reset_online_cpus(&tr->array_buffer);
    1946             : 
    1947             :         tr->current_trace = type;
    1948             : 
    1949             : #ifdef CONFIG_TRACER_MAX_TRACE
    1950             :         if (type->use_max_tr) {
    1951             :                 /* If we expanded the buffers, make sure the max is expanded too */
    1952             :                 if (ring_buffer_expanded)
    1953             :                         ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
    1954             :                                            RING_BUFFER_ALL_CPUS);
    1955             :                 tr->allocated_snapshot = true;
    1956             :         }
    1957             : #endif
    1958             : 
    1959             :         /* the test is responsible for initializing and enabling */
    1960             :         pr_info("Testing tracer %s: ", type->name);
    1961             :         ret = type->selftest(type, tr);
    1962             :         /* the test is responsible for resetting too */
    1963             :         tr->current_trace = saved_tracer;
    1964             :         if (ret) {
    1965             :                 printk(KERN_CONT "FAILED!\n");
    1966             :                 /* Add the warning after printing 'FAILED' */
    1967             :                 WARN_ON(1);
    1968             :                 return -1;
    1969             :         }
    1970             :         /* Only reset on passing, to avoid touching corrupted buffers */
    1971             :         tracing_reset_online_cpus(&tr->array_buffer);
    1972             : 
    1973             : #ifdef CONFIG_TRACER_MAX_TRACE
    1974             :         if (type->use_max_tr) {
    1975             :                 tr->allocated_snapshot = false;
    1976             : 
    1977             :                 /* Shrink the max buffer again */
    1978             :                 if (ring_buffer_expanded)
    1979             :                         ring_buffer_resize(tr->max_buffer.buffer, 1,
    1980             :                                            RING_BUFFER_ALL_CPUS);
    1981             :         }
    1982             : #endif
    1983             : 
    1984             :         printk(KERN_CONT "PASSED\n");
    1985             :         return 0;
    1986             : }
    1987             : 
    1988             : static __init int init_trace_selftests(void)
    1989             : {
    1990             :         struct trace_selftests *p, *n;
    1991             :         struct tracer *t, **last;
    1992             :         int ret;
    1993             : 
    1994             :         selftests_can_run = true;
    1995             : 
    1996             :         mutex_lock(&trace_types_lock);
    1997             : 
    1998             :         if (list_empty(&postponed_selftests))
    1999             :                 goto out;
    2000             : 
    2001             :         pr_info("Running postponed tracer tests:\n");
    2002             : 
    2003             :         tracing_selftest_running = true;
    2004             :         list_for_each_entry_safe(p, n, &postponed_selftests, list) {
    2005             :                 /* This loop can take minutes when sanitizers are enabled, so
    2006             :                  * lets make sure we allow RCU processing.
    2007             :                  */
    2008             :                 cond_resched();
    2009             :                 ret = run_tracer_selftest(p->type);
    2010             :                 /* If the test fails, then warn and remove from available_tracers */
    2011             :                 if (ret < 0) {
    2012             :                         WARN(1, "tracer: %s failed selftest, disabling\n",
    2013             :                              p->type->name);
    2014             :                         last = &trace_types;
    2015             :                         for (t = trace_types; t; t = t->next) {
    2016             :                                 if (t == p->type) {
    2017             :                                         *last = t->next;
    2018             :                                         break;
    2019             :                                 }
    2020             :                                 last = &t->next;
    2021             :                         }
    2022             :                 }
    2023             :                 list_del(&p->list);
    2024             :                 kfree(p);
    2025             :         }
    2026             :         tracing_selftest_running = false;
    2027             : 
    2028             :  out:
    2029             :         mutex_unlock(&trace_types_lock);
    2030             : 
    2031             :         return 0;
    2032             : }
    2033             : core_initcall(init_trace_selftests);
    2034             : #else
    2035           1 : static inline int run_tracer_selftest(struct tracer *type)
    2036             : {
    2037           1 :         return 0;
    2038             : }
    2039             : #endif /* CONFIG_FTRACE_STARTUP_TEST */
    2040             : 
    2041             : static void add_tracer_options(struct trace_array *tr, struct tracer *t);
    2042             : 
    2043             : static void __init apply_trace_boot_options(void);
    2044             : 
    2045             : /**
    2046             :  * register_tracer - register a tracer with the ftrace system.
    2047             :  * @type: the plugin for the tracer
    2048             :  *
    2049             :  * Register a new plugin tracer.
    2050             :  */
    2051           1 : int __init register_tracer(struct tracer *type)
    2052             : {
    2053           1 :         struct tracer *t;
    2054           1 :         int ret = 0;
    2055             : 
    2056           1 :         if (!type->name) {
    2057           0 :                 pr_info("Tracer must have a name\n");
    2058           0 :                 return -1;
    2059             :         }
    2060             : 
    2061           1 :         if (strlen(type->name) >= MAX_TRACER_SIZE) {
    2062           0 :                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
    2063           0 :                 return -1;
    2064             :         }
    2065             : 
    2066           1 :         if (security_locked_down(LOCKDOWN_TRACEFS)) {
    2067           0 :                 pr_warn("Can not register tracer %s due to lockdown\n",
    2068             :                            type->name);
    2069           0 :                 return -EPERM;
    2070             :         }
    2071             : 
    2072           1 :         mutex_lock(&trace_types_lock);
    2073             : 
    2074           1 :         tracing_selftest_running = true;
    2075             : 
    2076           1 :         for (t = trace_types; t; t = t->next) {
    2077           0 :                 if (strcmp(type->name, t->name) == 0) {
    2078             :                         /* already found */
    2079           0 :                         pr_info("Tracer %s already registered\n",
    2080             :                                 type->name);
    2081           0 :                         ret = -1;
    2082           0 :                         goto out;
    2083             :                 }
    2084             :         }
    2085             : 
    2086           1 :         if (!type->set_flag)
    2087           0 :                 type->set_flag = &dummy_set_flag;
    2088           1 :         if (!type->flags) {
    2089             :                 /*allocate a dummy tracer_flags*/
    2090           0 :                 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
    2091           0 :                 if (!type->flags) {
    2092           0 :                         ret = -ENOMEM;
    2093           0 :                         goto out;
    2094             :                 }
    2095           0 :                 type->flags->val = 0;
    2096           0 :                 type->flags->opts = dummy_tracer_opt;
    2097             :         } else
    2098           1 :                 if (!type->flags->opts)
    2099           0 :                         type->flags->opts = dummy_tracer_opt;
    2100             : 
    2101             :         /* store the tracer for __set_tracer_option */
    2102           1 :         type->flags->trace = type;
    2103             : 
    2104           1 :         ret = run_tracer_selftest(type);
    2105           1 :         if (ret < 0)
    2106             :                 goto out;
    2107             : 
    2108           1 :         type->next = trace_types;
    2109           1 :         trace_types = type;
    2110           2 :         add_tracer_options(&global_trace, type);
    2111             : 
    2112           1 :  out:
    2113           1 :         tracing_selftest_running = false;
    2114           1 :         mutex_unlock(&trace_types_lock);
    2115             : 
    2116           1 :         if (ret || !default_bootup_tracer)
    2117           1 :                 goto out_unlock;
    2118             : 
    2119           0 :         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
    2120           0 :                 goto out_unlock;
    2121             : 
    2122           0 :         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
    2123             :         /* Do we want this tracer to start on bootup? */
    2124           0 :         tracing_set_tracer(&global_trace, type->name);
    2125           0 :         default_bootup_tracer = NULL;
    2126             : 
    2127           0 :         apply_trace_boot_options();
    2128             : 
    2129             :         /* disable other selftests, since this will break it. */
    2130           0 :         disable_tracing_selftest("running a tracer");
    2131             : 
    2132             :  out_unlock:
    2133             :         return ret;
    2134             : }
    2135             : 
    2136           0 : static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
    2137             : {
    2138           0 :         struct trace_buffer *buffer = buf->buffer;
    2139             : 
    2140           0 :         if (!buffer)
    2141             :                 return;
    2142             : 
    2143           0 :         ring_buffer_record_disable(buffer);
    2144             : 
    2145             :         /* Make sure all commits have finished */
    2146           0 :         synchronize_rcu();
    2147           0 :         ring_buffer_reset_cpu(buffer, cpu);
    2148             : 
    2149           0 :         ring_buffer_record_enable(buffer);
    2150             : }
    2151             : 
    2152           0 : void tracing_reset_online_cpus(struct array_buffer *buf)
    2153             : {
    2154           0 :         struct trace_buffer *buffer = buf->buffer;
    2155             : 
    2156           0 :         if (!buffer)
    2157             :                 return;
    2158             : 
    2159           0 :         ring_buffer_record_disable(buffer);
    2160             : 
    2161             :         /* Make sure all commits have finished */
    2162           0 :         synchronize_rcu();
    2163             : 
    2164           0 :         buf->time_start = buffer_ftrace_now(buf, buf->cpu);
    2165             : 
    2166           0 :         ring_buffer_reset_online_cpus(buffer);
    2167             : 
    2168           0 :         ring_buffer_record_enable(buffer);
    2169             : }
    2170             : 
    2171             : /* Must have trace_types_lock held */
    2172           0 : void tracing_reset_all_online_cpus(void)
    2173             : {
    2174           0 :         struct trace_array *tr;
    2175             : 
    2176           0 :         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
    2177           0 :                 if (!tr->clear_trace)
    2178           0 :                         continue;
    2179           0 :                 tr->clear_trace = false;
    2180           0 :                 tracing_reset_online_cpus(&tr->array_buffer);
    2181             : #ifdef CONFIG_TRACER_MAX_TRACE
    2182             :                 tracing_reset_online_cpus(&tr->max_buffer);
    2183             : #endif
    2184             :         }
    2185           0 : }
    2186             : 
    2187             : static int *tgid_map;
    2188             : 
    2189             : #define SAVED_CMDLINES_DEFAULT 128
    2190             : #define NO_CMDLINE_MAP UINT_MAX
    2191             : static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
    2192             : struct saved_cmdlines_buffer {
    2193             :         unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
    2194             :         unsigned *map_cmdline_to_pid;
    2195             :         unsigned cmdline_num;
    2196             :         int cmdline_idx;
    2197             :         char *saved_cmdlines;
    2198             : };
    2199             : static struct saved_cmdlines_buffer *savedcmd;
    2200             : 
    2201             : /* temporary disable recording */
    2202             : static atomic_t trace_record_taskinfo_disabled __read_mostly;
    2203             : 
    2204           0 : static inline char *get_saved_cmdlines(int idx)
    2205             : {
    2206           0 :         return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
    2207             : }
    2208             : 
    2209           0 : static inline void set_cmdline(int idx, const char *cmdline)
    2210             : {
    2211           0 :         strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
    2212           0 : }
    2213             : 
    2214           1 : static int allocate_cmdlines_buffer(unsigned int val,
    2215             :                                     struct saved_cmdlines_buffer *s)
    2216             : {
    2217           1 :         s->map_cmdline_to_pid = kmalloc_array(val,
    2218             :                                               sizeof(*s->map_cmdline_to_pid),
    2219             :                                               GFP_KERNEL);
    2220           1 :         if (!s->map_cmdline_to_pid)
    2221             :                 return -ENOMEM;
    2222             : 
    2223           1 :         s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
    2224           1 :         if (!s->saved_cmdlines) {
    2225           0 :                 kfree(s->map_cmdline_to_pid);
    2226           0 :                 return -ENOMEM;
    2227             :         }
    2228             : 
    2229           1 :         s->cmdline_idx = 0;
    2230           1 :         s->cmdline_num = val;
    2231           1 :         memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
    2232             :                sizeof(s->map_pid_to_cmdline));
    2233           1 :         memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
    2234             :                val * sizeof(*s->map_cmdline_to_pid));
    2235             : 
    2236           1 :         return 0;
    2237             : }
    2238             : 
    2239           1 : static int trace_create_savedcmd(void)
    2240             : {
    2241           1 :         int ret;
    2242             : 
    2243           1 :         savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
    2244           1 :         if (!savedcmd)
    2245             :                 return -ENOMEM;
    2246             : 
    2247           1 :         ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
    2248           1 :         if (ret < 0) {
    2249           0 :                 kfree(savedcmd);
    2250           0 :                 savedcmd = NULL;
    2251           0 :                 return -ENOMEM;
    2252             :         }
    2253             : 
    2254             :         return 0;
    2255             : }
    2256             : 
    2257           0 : int is_tracing_stopped(void)
    2258             : {
    2259           0 :         return global_trace.stop_count;
    2260             : }
    2261             : 
    2262             : /**
    2263             :  * tracing_start - quick start of the tracer
    2264             :  *
    2265             :  * If tracing is enabled but was stopped by tracing_stop,
    2266             :  * this will start the tracer back up.
    2267             :  */
    2268           0 : void tracing_start(void)
    2269             : {
    2270           0 :         struct trace_buffer *buffer;
    2271           0 :         unsigned long flags;
    2272             : 
    2273           0 :         if (tracing_disabled)
    2274             :                 return;
    2275             : 
    2276           0 :         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
    2277           0 :         if (--global_trace.stop_count) {
    2278           0 :                 if (global_trace.stop_count < 0) {
    2279             :                         /* Someone screwed up their debugging */
    2280           0 :                         WARN_ON_ONCE(1);
    2281           0 :                         global_trace.stop_count = 0;
    2282             :                 }
    2283           0 :                 goto out;
    2284             :         }
    2285             : 
    2286             :         /* Prevent the buffers from switching */
    2287           0 :         arch_spin_lock(&global_trace.max_lock);
    2288             : 
    2289           0 :         buffer = global_trace.array_buffer.buffer;
    2290           0 :         if (buffer)
    2291           0 :                 ring_buffer_record_enable(buffer);
    2292             : 
    2293             : #ifdef CONFIG_TRACER_MAX_TRACE
    2294             :         buffer = global_trace.max_buffer.buffer;
    2295             :         if (buffer)
    2296             :                 ring_buffer_record_enable(buffer);
    2297             : #endif
    2298             : 
    2299           0 :         arch_spin_unlock(&global_trace.max_lock);
    2300             : 
    2301           0 :  out:
    2302           0 :         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
    2303             : }
    2304             : 
    2305           0 : static void tracing_start_tr(struct trace_array *tr)
    2306             : {
    2307           0 :         struct trace_buffer *buffer;
    2308           0 :         unsigned long flags;
    2309             : 
    2310           0 :         if (tracing_disabled)
    2311             :                 return;
    2312             : 
    2313             :         /* If global, we need to also start the max tracer */
    2314           0 :         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
    2315           0 :                 return tracing_start();
    2316             : 
    2317           0 :         raw_spin_lock_irqsave(&tr->start_lock, flags);
    2318             : 
    2319           0 :         if (--tr->stop_count) {
    2320           0 :                 if (tr->stop_count < 0) {
    2321             :                         /* Someone screwed up their debugging */
    2322           0 :                         WARN_ON_ONCE(1);
    2323           0 :                         tr->stop_count = 0;
    2324             :                 }
    2325           0 :                 goto out;
    2326             :         }
    2327             : 
    2328           0 :         buffer = tr->array_buffer.buffer;
    2329           0 :         if (buffer)
    2330           0 :                 ring_buffer_record_enable(buffer);
    2331             : 
    2332           0 :  out:
    2333           0 :         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
    2334             : }
    2335             : 
    2336             : /**
    2337             :  * tracing_stop - quick stop of the tracer
    2338             :  *
    2339             :  * Light weight way to stop tracing. Use in conjunction with
    2340             :  * tracing_start.
    2341             :  */
    2342           0 : void tracing_stop(void)
    2343             : {
    2344           0 :         struct trace_buffer *buffer;
    2345           0 :         unsigned long flags;
    2346             : 
    2347           0 :         raw_spin_lock_irqsave(&global_trace.start_lock, flags);
    2348           0 :         if (global_trace.stop_count++)
    2349           0 :                 goto out;
    2350             : 
    2351             :         /* Prevent the buffers from switching */
    2352           0 :         arch_spin_lock(&global_trace.max_lock);
    2353             : 
    2354           0 :         buffer = global_trace.array_buffer.buffer;
    2355           0 :         if (buffer)
    2356           0 :                 ring_buffer_record_disable(buffer);
    2357             : 
    2358             : #ifdef CONFIG_TRACER_MAX_TRACE
    2359             :         buffer = global_trace.max_buffer.buffer;
    2360             :         if (buffer)
    2361             :                 ring_buffer_record_disable(buffer);
    2362             : #endif
    2363             : 
    2364           0 :         arch_spin_unlock(&global_trace.max_lock);
    2365             : 
    2366           0 :  out:
    2367           0 :         raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
    2368           0 : }
    2369             : 
    2370           0 : static void tracing_stop_tr(struct trace_array *tr)
    2371             : {
    2372           0 :         struct trace_buffer *buffer;
    2373           0 :         unsigned long flags;
    2374             : 
    2375             :         /* If global, we need to also stop the max tracer */
    2376           0 :         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
    2377           0 :                 return tracing_stop();
    2378             : 
    2379           0 :         raw_spin_lock_irqsave(&tr->start_lock, flags);
    2380           0 :         if (tr->stop_count++)
    2381           0 :                 goto out;
    2382             : 
    2383           0 :         buffer = tr->array_buffer.buffer;
    2384           0 :         if (buffer)
    2385           0 :                 ring_buffer_record_disable(buffer);
    2386             : 
    2387           0 :  out:
    2388           0 :         raw_spin_unlock_irqrestore(&tr->start_lock, flags);
    2389             : }
    2390             : 
    2391           0 : static int trace_save_cmdline(struct task_struct *tsk)
    2392             : {
    2393           0 :         unsigned pid, idx;
    2394             : 
    2395             :         /* treat recording of idle task as a success */
    2396           0 :         if (!tsk->pid)
    2397             :                 return 1;
    2398             : 
    2399           0 :         if (unlikely(tsk->pid > PID_MAX_DEFAULT))
    2400             :                 return 0;
    2401             : 
    2402             :         /*
    2403             :          * It's not the end of the world if we don't get
    2404             :          * the lock, but we also don't want to spin
    2405             :          * nor do we want to disable interrupts,
    2406             :          * so if we miss here, then better luck next time.
    2407             :          */
    2408           0 :         if (!arch_spin_trylock(&trace_cmdline_lock))
    2409           0 :                 return 0;
    2410             : 
    2411           0 :         idx = savedcmd->map_pid_to_cmdline[tsk->pid];
    2412           0 :         if (idx == NO_CMDLINE_MAP) {
    2413           0 :                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
    2414             : 
    2415             :                 /*
    2416             :                  * Check whether the cmdline buffer at idx has a pid
    2417             :                  * mapped. We are going to overwrite that entry so we
    2418             :                  * need to clear the map_pid_to_cmdline. Otherwise we
    2419             :                  * would read the new comm for the old pid.
    2420             :                  */
    2421           0 :                 pid = savedcmd->map_cmdline_to_pid[idx];
    2422           0 :                 if (pid != NO_CMDLINE_MAP)
    2423           0 :                         savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
    2424             : 
    2425           0 :                 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
    2426           0 :                 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
    2427             : 
    2428           0 :                 savedcmd->cmdline_idx = idx;
    2429             :         }
    2430             : 
    2431           0 :         set_cmdline(idx, tsk->comm);
    2432             : 
    2433           0 :         arch_spin_unlock(&trace_cmdline_lock);
    2434             : 
    2435           0 :         return 1;
    2436             : }
    2437             : 
    2438           0 : static void __trace_find_cmdline(int pid, char comm[])
    2439             : {
    2440           0 :         unsigned map;
    2441             : 
    2442           0 :         if (!pid) {
    2443           0 :                 strcpy(comm, "<idle>");
    2444           0 :                 return;
    2445             :         }
    2446             : 
    2447           0 :         if (WARN_ON_ONCE(pid < 0)) {
    2448           0 :                 strcpy(comm, "<XXX>");
    2449           0 :                 return;
    2450             :         }
    2451             : 
    2452           0 :         if (pid > PID_MAX_DEFAULT) {
    2453           0 :                 strcpy(comm, "<...>");
    2454           0 :                 return;
    2455             :         }
    2456             : 
    2457           0 :         map = savedcmd->map_pid_to_cmdline[pid];
    2458           0 :         if (map != NO_CMDLINE_MAP)
    2459           0 :                 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
    2460             :         else
    2461           0 :                 strcpy(comm, "<...>");
    2462             : }
    2463             : 
    2464           0 : void trace_find_cmdline(int pid, char comm[])
    2465             : {
    2466           0 :         preempt_disable();
    2467           0 :         arch_spin_lock(&trace_cmdline_lock);
    2468             : 
    2469           0 :         __trace_find_cmdline(pid, comm);
    2470             : 
    2471           0 :         arch_spin_unlock(&trace_cmdline_lock);
    2472           0 :         preempt_enable();
    2473           0 : }
    2474             : 
    2475           0 : int trace_find_tgid(int pid)
    2476             : {
    2477           0 :         if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
    2478             :                 return 0;
    2479             : 
    2480           0 :         return tgid_map[pid];
    2481             : }
    2482             : 
    2483           0 : static int trace_save_tgid(struct task_struct *tsk)
    2484             : {
    2485             :         /* treat recording of idle task as a success */
    2486           0 :         if (!tsk->pid)
    2487             :                 return 1;
    2488             : 
    2489           0 :         if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
    2490             :                 return 0;
    2491             : 
    2492           0 :         tgid_map[tsk->pid] = tsk->tgid;
    2493           0 :         return 1;
    2494             : }
    2495             : 
    2496           0 : static bool tracing_record_taskinfo_skip(int flags)
    2497             : {
    2498           0 :         if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
    2499             :                 return true;
    2500           0 :         if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
    2501           0 :                 return true;
    2502           0 :         if (!__this_cpu_read(trace_taskinfo_save))
    2503           0 :                 return true;
    2504             :         return false;
    2505             : }
    2506             : 
    2507             : /**
    2508             :  * tracing_record_taskinfo - record the task info of a task
    2509             :  *
    2510             :  * @task:  task to record
    2511             :  * @flags: TRACE_RECORD_CMDLINE for recording comm
    2512             :  *         TRACE_RECORD_TGID for recording tgid
    2513             :  */
    2514           0 : void tracing_record_taskinfo(struct task_struct *task, int flags)
    2515             : {
    2516           0 :         bool done;
    2517             : 
    2518           0 :         if (tracing_record_taskinfo_skip(flags))
    2519             :                 return;
    2520             : 
    2521             :         /*
    2522             :          * Record as much task information as possible. If some fail, continue
    2523             :          * to try to record the others.
    2524             :          */
    2525           0 :         done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
    2526           0 :         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
    2527             : 
    2528             :         /* If recording any information failed, retry again soon. */
    2529           0 :         if (!done)
    2530             :                 return;
    2531             : 
    2532           0 :         __this_cpu_write(trace_taskinfo_save, false);
    2533             : }
    2534             : 
    2535             : /**
    2536             :  * tracing_record_taskinfo_sched_switch - record task info for sched_switch
    2537             :  *
    2538             :  * @prev: previous task during sched_switch
    2539             :  * @next: next task during sched_switch
    2540             :  * @flags: TRACE_RECORD_CMDLINE for recording comm
    2541             :  *         TRACE_RECORD_TGID for recording tgid
    2542             :  */
    2543           0 : void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
    2544             :                                           struct task_struct *next, int flags)
    2545             : {
    2546           0 :         bool done;
    2547             : 
    2548           0 :         if (tracing_record_taskinfo_skip(flags))
    2549             :                 return;
    2550             : 
    2551             :         /*
    2552             :          * Record as much task information as possible. If some fail, continue
    2553             :          * to try to record the others.
    2554             :          */
    2555           0 :         done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
    2556           0 :         done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
    2557           0 :         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
    2558           0 :         done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
    2559             : 
    2560             :         /* If recording any information failed, retry again soon. */
    2561           0 :         if (!done)
    2562             :                 return;
    2563             : 
    2564           0 :         __this_cpu_write(trace_taskinfo_save, false);
    2565             : }
    2566             : 
    2567             : /* Helpers to record a specific task information */
    2568           0 : void tracing_record_cmdline(struct task_struct *task)
    2569             : {
    2570           0 :         tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
    2571           0 : }
    2572             : 
    2573           0 : void tracing_record_tgid(struct task_struct *task)
    2574             : {
    2575           0 :         tracing_record_taskinfo(task, TRACE_RECORD_TGID);
    2576           0 : }
    2577             : 
    2578             : /*
    2579             :  * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
    2580             :  * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
    2581             :  * simplifies those functions and keeps them in sync.
    2582             :  */
    2583           0 : enum print_line_t trace_handle_return(struct trace_seq *s)
    2584             : {
    2585           0 :         return trace_seq_has_overflowed(s) ?
    2586           0 :                 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
    2587             : }
    2588             : EXPORT_SYMBOL_GPL(trace_handle_return);
    2589             : 
    2590           0 : unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
    2591             : {
    2592           0 :         unsigned int trace_flags = irqs_status;
    2593           0 :         unsigned int pc;
    2594             : 
    2595           0 :         pc = preempt_count();
    2596             : 
    2597           0 :         if (pc & NMI_MASK)
    2598           0 :                 trace_flags |= TRACE_FLAG_NMI;
    2599           0 :         if (pc & HARDIRQ_MASK)
    2600           0 :                 trace_flags |= TRACE_FLAG_HARDIRQ;
    2601           0 :         if (in_serving_softirq())
    2602           0 :                 trace_flags |= TRACE_FLAG_SOFTIRQ;
    2603             : 
    2604           0 :         if (tif_need_resched())
    2605           0 :                 trace_flags |= TRACE_FLAG_NEED_RESCHED;
    2606           0 :         if (test_preempt_need_resched())
    2607           0 :                 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
    2608           0 :         return (trace_flags << 16) | (pc & 0xff);
    2609             : }
    2610             : 
    2611             : struct ring_buffer_event *
    2612           0 : trace_buffer_lock_reserve(struct trace_buffer *buffer,
    2613             :                           int type,
    2614             :                           unsigned long len,
    2615             :                           unsigned int trace_ctx)
    2616             : {
    2617           0 :         return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
    2618             : }
    2619             : 
    2620             : DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
    2621             : DEFINE_PER_CPU(int, trace_buffered_event_cnt);
    2622             : static int trace_buffered_event_ref;
    2623             : 
    2624             : /**
    2625             :  * trace_buffered_event_enable - enable buffering events
    2626             :  *
    2627             :  * When events are being filtered, it is quicker to use a temporary
    2628             :  * buffer to write the event data into if there's a likely chance
    2629             :  * that it will not be committed. The discard of the ring buffer
    2630             :  * is not as fast as committing, and is much slower than copying
    2631             :  * a commit.
    2632             :  *
    2633             :  * When an event is to be filtered, allocate per cpu buffers to
    2634             :  * write the event data into, and if the event is filtered and discarded
    2635             :  * it is simply dropped, otherwise, the entire data is to be committed
    2636             :  * in one shot.
    2637             :  */
    2638           0 : void trace_buffered_event_enable(void)
    2639             : {
    2640           0 :         struct ring_buffer_event *event;
    2641           0 :         struct page *page;
    2642           0 :         int cpu;
    2643             : 
    2644           0 :         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
    2645             : 
    2646           0 :         if (trace_buffered_event_ref++)
    2647             :                 return;
    2648             : 
    2649           0 :         for_each_tracing_cpu(cpu) {
    2650           0 :                 page = alloc_pages_node(cpu_to_node(cpu),
    2651             :                                         GFP_KERNEL | __GFP_NORETRY, 0);
    2652           0 :                 if (!page)
    2653           0 :                         goto failed;
    2654             : 
    2655           0 :                 event = page_address(page);
    2656           0 :                 memset(event, 0, sizeof(*event));
    2657             : 
    2658           0 :                 per_cpu(trace_buffered_event, cpu) = event;
    2659             : 
    2660           0 :                 preempt_disable();
    2661           0 :                 if (cpu == smp_processor_id() &&
    2662           0 :                     __this_cpu_read(trace_buffered_event) !=
    2663           0 :                     per_cpu(trace_buffered_event, cpu))
    2664           0 :                         WARN_ON_ONCE(1);
    2665           0 :                 preempt_enable();
    2666             :         }
    2667             : 
    2668             :         return;
    2669           0 :  failed:
    2670           0 :         trace_buffered_event_disable();
    2671             : }
    2672             : 
    2673           0 : static void enable_trace_buffered_event(void *data)
    2674             : {
    2675             :         /* Probably not needed, but do it anyway */
    2676           0 :         smp_rmb();
    2677           0 :         this_cpu_dec(trace_buffered_event_cnt);
    2678           0 : }
    2679             : 
    2680           0 : static void disable_trace_buffered_event(void *data)
    2681             : {
    2682           0 :         this_cpu_inc(trace_buffered_event_cnt);
    2683           0 : }
    2684             : 
    2685             : /**
    2686             :  * trace_buffered_event_disable - disable buffering events
    2687             :  *
    2688             :  * When a filter is removed, it is faster to not use the buffered
    2689             :  * events, and to commit directly into the ring buffer. Free up
    2690             :  * the temp buffers when there are no more users. This requires
    2691             :  * special synchronization with current events.
    2692             :  */
    2693           0 : void trace_buffered_event_disable(void)
    2694             : {
    2695           0 :         int cpu;
    2696             : 
    2697           0 :         WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
    2698             : 
    2699           0 :         if (WARN_ON_ONCE(!trace_buffered_event_ref))
    2700             :                 return;
    2701             : 
    2702           0 :         if (--trace_buffered_event_ref)
    2703             :                 return;
    2704             : 
    2705           0 :         preempt_disable();
    2706             :         /* For each CPU, set the buffer as used. */
    2707           0 :         smp_call_function_many(tracing_buffer_mask,
    2708             :                                disable_trace_buffered_event, NULL, 1);
    2709           0 :         preempt_enable();
    2710             : 
    2711             :         /* Wait for all current users to finish */
    2712           0 :         synchronize_rcu();
    2713             : 
    2714           0 :         for_each_tracing_cpu(cpu) {
    2715           0 :                 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
    2716           0 :                 per_cpu(trace_buffered_event, cpu) = NULL;
    2717             :         }
    2718             :         /*
    2719             :          * Make sure trace_buffered_event is NULL before clearing
    2720             :          * trace_buffered_event_cnt.
    2721             :          */
    2722           0 :         smp_wmb();
    2723             : 
    2724           0 :         preempt_disable();
    2725             :         /* Do the work on each cpu */
    2726           0 :         smp_call_function_many(tracing_buffer_mask,
    2727             :                                enable_trace_buffered_event, NULL, 1);
    2728           0 :         preempt_enable();
    2729             : }
    2730             : 
    2731             : static struct trace_buffer *temp_buffer;
    2732             : 
    2733             : struct ring_buffer_event *
    2734           0 : trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
    2735             :                           struct trace_event_file *trace_file,
    2736             :                           int type, unsigned long len,
    2737             :                           unsigned int trace_ctx)
    2738             : {
    2739           0 :         struct ring_buffer_event *entry;
    2740           0 :         int val;
    2741             : 
    2742           0 :         *current_rb = trace_file->tr->array_buffer.buffer;
    2743             : 
    2744           0 :         if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
    2745           0 :              (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
    2746           0 :             (entry = this_cpu_read(trace_buffered_event))) {
    2747             :                 /* Try to use the per cpu buffer first */
    2748           0 :                 val = this_cpu_inc_return(trace_buffered_event_cnt);
    2749           0 :                 if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
    2750           0 :                         trace_event_setup(entry, type, trace_ctx);
    2751           0 :                         entry->array[0] = len;
    2752           0 :                         return entry;
    2753             :                 }
    2754           0 :                 this_cpu_dec(trace_buffered_event_cnt);
    2755             :         }
    2756             : 
    2757           0 :         entry = __trace_buffer_lock_reserve(*current_rb, type, len,
    2758             :                                             trace_ctx);
    2759             :         /*
    2760             :          * If tracing is off, but we have triggers enabled
    2761             :          * we still need to look at the event data. Use the temp_buffer
    2762             :          * to store the trace event for the trigger to use. It's recursive
    2763             :          * safe and will not be recorded anywhere.
    2764             :          */
    2765           0 :         if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
    2766           0 :                 *current_rb = temp_buffer;
    2767           0 :                 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
    2768             :                                                     trace_ctx);
    2769             :         }
    2770             :         return entry;
    2771             : }
    2772             : EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
    2773             : 
    2774             : static DEFINE_SPINLOCK(tracepoint_iter_lock);
    2775             : static DEFINE_MUTEX(tracepoint_printk_mutex);
    2776             : 
    2777           0 : static void output_printk(struct trace_event_buffer *fbuffer)
    2778             : {
    2779           0 :         struct trace_event_call *event_call;
    2780           0 :         struct trace_event_file *file;
    2781           0 :         struct trace_event *event;
    2782           0 :         unsigned long flags;
    2783           0 :         struct trace_iterator *iter = tracepoint_print_iter;
    2784             : 
    2785             :         /* We should never get here if iter is NULL */
    2786           0 :         if (WARN_ON_ONCE(!iter))
    2787             :                 return;
    2788             : 
    2789           0 :         event_call = fbuffer->trace_file->event_call;
    2790           0 :         if (!event_call || !event_call->event.funcs ||
    2791           0 :             !event_call->event.funcs->trace)
    2792             :                 return;
    2793             : 
    2794           0 :         file = fbuffer->trace_file;
    2795           0 :         if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
    2796           0 :             (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
    2797           0 :              !filter_match_preds(file->filter, fbuffer->entry)))
    2798           0 :                 return;
    2799             : 
    2800           0 :         event = &fbuffer->trace_file->event_call->event;
    2801             : 
    2802           0 :         spin_lock_irqsave(&tracepoint_iter_lock, flags);
    2803           0 :         trace_seq_init(&iter->seq);
    2804           0 :         iter->ent = fbuffer->entry;
    2805           0 :         event_call->event.funcs->trace(iter, 0, event);
    2806           0 :         trace_seq_putc(&iter->seq, 0);
    2807           0 :         printk("%s", iter->seq.buffer);
    2808             : 
    2809           0 :         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
    2810             : }
    2811             : 
    2812           0 : int tracepoint_printk_sysctl(struct ctl_table *table, int write,
    2813             :                              void *buffer, size_t *lenp,
    2814             :                              loff_t *ppos)
    2815             : {
    2816           0 :         int save_tracepoint_printk;
    2817           0 :         int ret;
    2818             : 
    2819           0 :         mutex_lock(&tracepoint_printk_mutex);
    2820           0 :         save_tracepoint_printk = tracepoint_printk;
    2821             : 
    2822           0 :         ret = proc_dointvec(table, write, buffer, lenp, ppos);
    2823             : 
    2824             :         /*
    2825             :          * This will force exiting early, as tracepoint_printk
    2826             :          * is always zero when tracepoint_printk_iter is not allocated
    2827             :          */
    2828           0 :         if (!tracepoint_print_iter)
    2829           0 :                 tracepoint_printk = 0;
    2830             : 
    2831           0 :         if (save_tracepoint_printk == tracepoint_printk)
    2832           0 :                 goto out;
    2833             : 
    2834           0 :         if (tracepoint_printk)
    2835           0 :                 static_key_enable(&tracepoint_printk_key.key);
    2836             :         else
    2837           0 :                 static_key_disable(&tracepoint_printk_key.key);
    2838             : 
    2839           0 :  out:
    2840           0 :         mutex_unlock(&tracepoint_printk_mutex);
    2841             : 
    2842           0 :         return ret;
    2843             : }
    2844             : 
    2845           0 : void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
    2846             : {
    2847           0 :         if (static_key_false(&tracepoint_printk_key.key))
    2848           0 :                 output_printk(fbuffer);
    2849             : 
    2850           0 :         if (static_branch_unlikely(&trace_event_exports_enabled))
    2851           0 :                 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
    2852           0 :         event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
    2853             :                                     fbuffer->event, fbuffer->entry,
    2854             :                                     fbuffer->trace_ctx, fbuffer->regs);
    2855           0 : }
    2856             : EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
    2857             : 
    2858             : /*
    2859             :  * Skip 3:
    2860             :  *
    2861             :  *   trace_buffer_unlock_commit_regs()
    2862             :  *   trace_event_buffer_commit()
    2863             :  *   trace_event_raw_event_xxx()
    2864             :  */
    2865             : # define STACK_SKIP 3
    2866             : 
    2867           0 : void trace_buffer_unlock_commit_regs(struct trace_array *tr,
    2868             :                                      struct trace_buffer *buffer,
    2869             :                                      struct ring_buffer_event *event,
    2870             :                                      unsigned int trace_ctx,
    2871             :                                      struct pt_regs *regs)
    2872             : {
    2873           0 :         __buffer_unlock_commit(buffer, event);
    2874             : 
    2875             :         /*
    2876             :          * If regs is not set, then skip the necessary functions.
    2877             :          * Note, we can still get here via blktrace, wakeup tracer
    2878             :          * and mmiotrace, but that's ok if they lose a function or
    2879             :          * two. They are not that meaningful.
    2880             :          */
    2881           0 :         ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
    2882           0 :         ftrace_trace_userstack(tr, buffer, trace_ctx);
    2883           0 : }
    2884             : 
    2885             : /*
    2886             :  * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
    2887             :  */
    2888             : void
    2889           0 : trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
    2890             :                                    struct ring_buffer_event *event)
    2891             : {
    2892           0 :         __buffer_unlock_commit(buffer, event);
    2893           0 : }
    2894             : 
    2895             : void
    2896           0 : trace_function(struct trace_array *tr, unsigned long ip, unsigned long
    2897             :                parent_ip, unsigned int trace_ctx)
    2898             : {
    2899           0 :         struct trace_event_call *call = &event_function;
    2900           0 :         struct trace_buffer *buffer = tr->array_buffer.buffer;
    2901           0 :         struct ring_buffer_event *event;
    2902           0 :         struct ftrace_entry *entry;
    2903             : 
    2904           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
    2905             :                                             trace_ctx);
    2906           0 :         if (!event)
    2907             :                 return;
    2908           0 :         entry   = ring_buffer_event_data(event);
    2909           0 :         entry->ip                    = ip;
    2910           0 :         entry->parent_ip             = parent_ip;
    2911             : 
    2912           0 :         if (!call_filter_check_discard(call, entry, buffer, event)) {
    2913           0 :                 if (static_branch_unlikely(&trace_function_exports_enabled))
    2914           0 :                         ftrace_exports(event, TRACE_EXPORT_FUNCTION);
    2915           0 :                 __buffer_unlock_commit(buffer, event);
    2916             :         }
    2917             : }
    2918             : 
    2919             : #ifdef CONFIG_STACKTRACE
    2920             : 
    2921             : /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
    2922             : #define FTRACE_KSTACK_NESTING   4
    2923             : 
    2924             : #define FTRACE_KSTACK_ENTRIES   (PAGE_SIZE / FTRACE_KSTACK_NESTING)
    2925             : 
    2926             : struct ftrace_stack {
    2927             :         unsigned long           calls[FTRACE_KSTACK_ENTRIES];
    2928             : };
    2929             : 
    2930             : 
    2931             : struct ftrace_stacks {
    2932             :         struct ftrace_stack     stacks[FTRACE_KSTACK_NESTING];
    2933             : };
    2934             : 
    2935             : static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
    2936             : static DEFINE_PER_CPU(int, ftrace_stack_reserve);
    2937             : 
    2938           0 : static void __ftrace_trace_stack(struct trace_buffer *buffer,
    2939             :                                  unsigned int trace_ctx,
    2940             :                                  int skip, struct pt_regs *regs)
    2941             : {
    2942           0 :         struct trace_event_call *call = &event_kernel_stack;
    2943           0 :         struct ring_buffer_event *event;
    2944           0 :         unsigned int size, nr_entries;
    2945           0 :         struct ftrace_stack *fstack;
    2946           0 :         struct stack_entry *entry;
    2947           0 :         int stackidx;
    2948             : 
    2949             :         /*
    2950             :          * Add one, for this function and the call to save_stack_trace()
    2951             :          * If regs is set, then these functions will not be in the way.
    2952             :          */
    2953             : #ifndef CONFIG_UNWINDER_ORC
    2954             :         if (!regs)
    2955             :                 skip++;
    2956             : #endif
    2957             : 
    2958           0 :         preempt_disable_notrace();
    2959             : 
    2960           0 :         stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
    2961             : 
    2962             :         /* This should never happen. If it does, yell once and skip */
    2963           0 :         if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
    2964           0 :                 goto out;
    2965             : 
    2966             :         /*
    2967             :          * The above __this_cpu_inc_return() is 'atomic' cpu local. An
    2968             :          * interrupt will either see the value pre increment or post
    2969             :          * increment. If the interrupt happens pre increment it will have
    2970             :          * restored the counter when it returns.  We just need a barrier to
    2971             :          * keep gcc from moving things around.
    2972             :          */
    2973           0 :         barrier();
    2974             : 
    2975           0 :         fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
    2976           0 :         size = ARRAY_SIZE(fstack->calls);
    2977             : 
    2978           0 :         if (regs) {
    2979           0 :                 nr_entries = stack_trace_save_regs(regs, fstack->calls,
    2980             :                                                    size, skip);
    2981             :         } else {
    2982           0 :                 nr_entries = stack_trace_save(fstack->calls, size, skip);
    2983             :         }
    2984             : 
    2985           0 :         size = nr_entries * sizeof(unsigned long);
    2986           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
    2987             :                                             sizeof(*entry) + size, trace_ctx);
    2988           0 :         if (!event)
    2989           0 :                 goto out;
    2990           0 :         entry = ring_buffer_event_data(event);
    2991             : 
    2992           0 :         memcpy(&entry->caller, fstack->calls, size);
    2993           0 :         entry->size = nr_entries;
    2994             : 
    2995           0 :         if (!call_filter_check_discard(call, entry, buffer, event))
    2996           0 :                 __buffer_unlock_commit(buffer, event);
    2997             : 
    2998           0 :  out:
    2999             :         /* Again, don't let gcc optimize things here */
    3000           0 :         barrier();
    3001           0 :         __this_cpu_dec(ftrace_stack_reserve);
    3002           0 :         preempt_enable_notrace();
    3003             : 
    3004           0 : }
    3005             : 
    3006           0 : static inline void ftrace_trace_stack(struct trace_array *tr,
    3007             :                                       struct trace_buffer *buffer,
    3008             :                                       unsigned int trace_ctx,
    3009             :                                       int skip, struct pt_regs *regs)
    3010             : {
    3011           0 :         if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
    3012             :                 return;
    3013             : 
    3014           0 :         __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
    3015             : }
    3016             : 
    3017           0 : void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
    3018             :                    int skip)
    3019             : {
    3020           0 :         struct trace_buffer *buffer = tr->array_buffer.buffer;
    3021             : 
    3022           0 :         if (rcu_is_watching()) {
    3023           0 :                 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
    3024           0 :                 return;
    3025             :         }
    3026             : 
    3027             :         /*
    3028             :          * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
    3029             :          * but if the above rcu_is_watching() failed, then the NMI
    3030             :          * triggered someplace critical, and rcu_irq_enter() should
    3031             :          * not be called from NMI.
    3032             :          */
    3033           0 :         if (unlikely(in_nmi()))
    3034             :                 return;
    3035             : 
    3036           0 :         rcu_irq_enter_irqson();
    3037           0 :         __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
    3038           0 :         rcu_irq_exit_irqson();
    3039             : }
    3040             : 
    3041             : /**
    3042             :  * trace_dump_stack - record a stack back trace in the trace buffer
    3043             :  * @skip: Number of functions to skip (helper handlers)
    3044             :  */
    3045           0 : void trace_dump_stack(int skip)
    3046             : {
    3047           0 :         if (tracing_disabled || tracing_selftest_running)
    3048             :                 return;
    3049             : 
    3050             : #ifndef CONFIG_UNWINDER_ORC
    3051             :         /* Skip 1 to skip this function. */
    3052             :         skip++;
    3053             : #endif
    3054           0 :         __ftrace_trace_stack(global_trace.array_buffer.buffer,
    3055             :                              tracing_gen_ctx(), skip, NULL);
    3056             : }
    3057             : EXPORT_SYMBOL_GPL(trace_dump_stack);
    3058             : 
    3059             : #ifdef CONFIG_USER_STACKTRACE_SUPPORT
    3060             : static DEFINE_PER_CPU(int, user_stack_count);
    3061             : 
    3062             : static void
    3063           0 : ftrace_trace_userstack(struct trace_array *tr,
    3064             :                        struct trace_buffer *buffer, unsigned int trace_ctx)
    3065             : {
    3066           0 :         struct trace_event_call *call = &event_user_stack;
    3067           0 :         struct ring_buffer_event *event;
    3068           0 :         struct userstack_entry *entry;
    3069             : 
    3070           0 :         if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
    3071             :                 return;
    3072             : 
    3073             :         /*
    3074             :          * NMIs can not handle page faults, even with fix ups.
    3075             :          * The save user stack can (and often does) fault.
    3076             :          */
    3077           0 :         if (unlikely(in_nmi()))
    3078             :                 return;
    3079             : 
    3080             :         /*
    3081             :          * prevent recursion, since the user stack tracing may
    3082             :          * trigger other kernel events.
    3083             :          */
    3084           0 :         preempt_disable();
    3085           0 :         if (__this_cpu_read(user_stack_count))
    3086           0 :                 goto out;
    3087             : 
    3088           0 :         __this_cpu_inc(user_stack_count);
    3089             : 
    3090           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
    3091             :                                             sizeof(*entry), trace_ctx);
    3092           0 :         if (!event)
    3093           0 :                 goto out_drop_count;
    3094           0 :         entry   = ring_buffer_event_data(event);
    3095             : 
    3096           0 :         entry->tgid          = current->tgid;
    3097           0 :         memset(&entry->caller, 0, sizeof(entry->caller));
    3098             : 
    3099           0 :         stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
    3100           0 :         if (!call_filter_check_discard(call, entry, buffer, event))
    3101           0 :                 __buffer_unlock_commit(buffer, event);
    3102             : 
    3103           0 :  out_drop_count:
    3104           0 :         __this_cpu_dec(user_stack_count);
    3105           0 :  out:
    3106           0 :         preempt_enable();
    3107             : }
    3108             : #else /* CONFIG_USER_STACKTRACE_SUPPORT */
    3109             : static void ftrace_trace_userstack(struct trace_array *tr,
    3110             :                                    struct trace_buffer *buffer,
    3111             :                                    unsigned int trace_ctx)
    3112             : {
    3113             : }
    3114             : #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
    3115             : 
    3116             : #endif /* CONFIG_STACKTRACE */
    3117             : 
    3118             : /* created for use with alloc_percpu */
    3119             : struct trace_buffer_struct {
    3120             :         int nesting;
    3121             :         char buffer[4][TRACE_BUF_SIZE];
    3122             : };
    3123             : 
    3124             : static struct trace_buffer_struct *trace_percpu_buffer;
    3125             : 
    3126             : /*
    3127             :  * This allows for lockless recording.  If we're nested too deeply, then
    3128             :  * this returns NULL.
    3129             :  */
    3130           0 : static char *get_trace_buf(void)
    3131             : {
    3132           0 :         struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
    3133             : 
    3134           0 :         if (!buffer || buffer->nesting >= 4)
    3135             :                 return NULL;
    3136             : 
    3137           0 :         buffer->nesting++;
    3138             : 
    3139             :         /* Interrupts must see nesting incremented before we use the buffer */
    3140           0 :         barrier();
    3141           0 :         return &buffer->buffer[buffer->nesting - 1][0];
    3142             : }
    3143             : 
    3144           0 : static void put_trace_buf(void)
    3145             : {
    3146             :         /* Don't let the decrement of nesting leak before this */
    3147           0 :         barrier();
    3148           0 :         this_cpu_dec(trace_percpu_buffer->nesting);
    3149           0 : }
    3150             : 
    3151           0 : static int alloc_percpu_trace_buffer(void)
    3152             : {
    3153           0 :         struct trace_buffer_struct *buffers;
    3154             : 
    3155           0 :         if (trace_percpu_buffer)
    3156             :                 return 0;
    3157             : 
    3158           0 :         buffers = alloc_percpu(struct trace_buffer_struct);
    3159           0 :         if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
    3160             :                 return -ENOMEM;
    3161             : 
    3162           0 :         trace_percpu_buffer = buffers;
    3163           0 :         return 0;
    3164             : }
    3165             : 
    3166             : static int buffers_allocated;
    3167             : 
    3168           0 : void trace_printk_init_buffers(void)
    3169             : {
    3170           0 :         if (buffers_allocated)
    3171             :                 return;
    3172             : 
    3173           0 :         if (alloc_percpu_trace_buffer())
    3174             :                 return;
    3175             : 
    3176             :         /* trace_printk() is for debug use only. Don't use it in production. */
    3177             : 
    3178           0 :         pr_warn("\n");
    3179           0 :         pr_warn("**********************************************************\n");
    3180           0 :         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
    3181           0 :         pr_warn("**                                                      **\n");
    3182           0 :         pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
    3183           0 :         pr_warn("**                                                      **\n");
    3184           0 :         pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
    3185           0 :         pr_warn("** unsafe for production use.                           **\n");
    3186           0 :         pr_warn("**                                                      **\n");
    3187           0 :         pr_warn("** If you see this message and you are not debugging    **\n");
    3188           0 :         pr_warn("** the kernel, report this immediately to your vendor!  **\n");
    3189           0 :         pr_warn("**                                                      **\n");
    3190           0 :         pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
    3191           0 :         pr_warn("**********************************************************\n");
    3192             : 
    3193             :         /* Expand the buffers to set size */
    3194           0 :         tracing_update_buffers();
    3195             : 
    3196           0 :         buffers_allocated = 1;
    3197             : 
    3198             :         /*
    3199             :          * trace_printk_init_buffers() can be called by modules.
    3200             :          * If that happens, then we need to start cmdline recording
    3201             :          * directly here. If the global_trace.buffer is already
    3202             :          * allocated here, then this was called by module code.
    3203             :          */
    3204           0 :         if (global_trace.array_buffer.buffer)
    3205           0 :                 tracing_start_cmdline_record();
    3206             : }
    3207             : EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
    3208             : 
    3209           1 : void trace_printk_start_comm(void)
    3210             : {
    3211             :         /* Start tracing comms if trace printk is set */
    3212           1 :         if (!buffers_allocated)
    3213             :                 return;
    3214           0 :         tracing_start_cmdline_record();
    3215             : }
    3216             : 
    3217           0 : static void trace_printk_start_stop_comm(int enabled)
    3218             : {
    3219           0 :         if (!buffers_allocated)
    3220             :                 return;
    3221             : 
    3222           0 :         if (enabled)
    3223           0 :                 tracing_start_cmdline_record();
    3224             :         else
    3225           0 :                 tracing_stop_cmdline_record();
    3226             : }
    3227             : 
    3228             : /**
    3229             :  * trace_vbprintk - write binary msg to tracing buffer
    3230             :  * @ip:    The address of the caller
    3231             :  * @fmt:   The string format to write to the buffer
    3232             :  * @args:  Arguments for @fmt
    3233             :  */
    3234           0 : int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
    3235             : {
    3236           0 :         struct trace_event_call *call = &event_bprint;
    3237           0 :         struct ring_buffer_event *event;
    3238           0 :         struct trace_buffer *buffer;
    3239           0 :         struct trace_array *tr = &global_trace;
    3240           0 :         struct bprint_entry *entry;
    3241           0 :         unsigned int trace_ctx;
    3242           0 :         char *tbuffer;
    3243           0 :         int len = 0, size;
    3244             : 
    3245           0 :         if (unlikely(tracing_selftest_running || tracing_disabled))
    3246             :                 return 0;
    3247             : 
    3248             :         /* Don't pollute graph traces with trace_vprintk internals */
    3249           0 :         pause_graph_tracing();
    3250             : 
    3251           0 :         trace_ctx = tracing_gen_ctx();
    3252           0 :         preempt_disable_notrace();
    3253             : 
    3254           0 :         tbuffer = get_trace_buf();
    3255           0 :         if (!tbuffer) {
    3256           0 :                 len = 0;
    3257           0 :                 goto out_nobuffer;
    3258             :         }
    3259             : 
    3260           0 :         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
    3261             : 
    3262           0 :         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
    3263           0 :                 goto out_put;
    3264             : 
    3265           0 :         size = sizeof(*entry) + sizeof(u32) * len;
    3266           0 :         buffer = tr->array_buffer.buffer;
    3267           0 :         ring_buffer_nest_start(buffer);
    3268           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
    3269             :                                             trace_ctx);
    3270           0 :         if (!event)
    3271           0 :                 goto out;
    3272           0 :         entry = ring_buffer_event_data(event);
    3273           0 :         entry->ip                    = ip;
    3274           0 :         entry->fmt                   = fmt;
    3275             : 
    3276           0 :         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
    3277           0 :         if (!call_filter_check_discard(call, entry, buffer, event)) {
    3278           0 :                 __buffer_unlock_commit(buffer, event);
    3279           0 :                 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
    3280             :         }
    3281             : 
    3282           0 : out:
    3283           0 :         ring_buffer_nest_end(buffer);
    3284           0 : out_put:
    3285           0 :         put_trace_buf();
    3286             : 
    3287           0 : out_nobuffer:
    3288           0 :         preempt_enable_notrace();
    3289           0 :         unpause_graph_tracing();
    3290             : 
    3291           0 :         return len;
    3292             : }
    3293             : EXPORT_SYMBOL_GPL(trace_vbprintk);
    3294             : 
    3295             : __printf(3, 0)
    3296             : static int
    3297           0 : __trace_array_vprintk(struct trace_buffer *buffer,
    3298             :                       unsigned long ip, const char *fmt, va_list args)
    3299             : {
    3300           0 :         struct trace_event_call *call = &event_print;
    3301           0 :         struct ring_buffer_event *event;
    3302           0 :         int len = 0, size;
    3303           0 :         struct print_entry *entry;
    3304           0 :         unsigned int trace_ctx;
    3305           0 :         char *tbuffer;
    3306             : 
    3307           0 :         if (tracing_disabled || tracing_selftest_running)
    3308             :                 return 0;
    3309             : 
    3310             :         /* Don't pollute graph traces with trace_vprintk internals */
    3311           0 :         pause_graph_tracing();
    3312             : 
    3313           0 :         trace_ctx = tracing_gen_ctx();
    3314           0 :         preempt_disable_notrace();
    3315             : 
    3316             : 
    3317           0 :         tbuffer = get_trace_buf();
    3318           0 :         if (!tbuffer) {
    3319           0 :                 len = 0;
    3320           0 :                 goto out_nobuffer;
    3321             :         }
    3322             : 
    3323           0 :         len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
    3324             : 
    3325           0 :         size = sizeof(*entry) + len + 1;
    3326           0 :         ring_buffer_nest_start(buffer);
    3327           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
    3328             :                                             trace_ctx);
    3329           0 :         if (!event)
    3330           0 :                 goto out;
    3331           0 :         entry = ring_buffer_event_data(event);
    3332           0 :         entry->ip = ip;
    3333             : 
    3334           0 :         memcpy(&entry->buf, tbuffer, len + 1);
    3335           0 :         if (!call_filter_check_discard(call, entry, buffer, event)) {
    3336           0 :                 __buffer_unlock_commit(buffer, event);
    3337           0 :                 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
    3338             :         }
    3339             : 
    3340           0 : out:
    3341           0 :         ring_buffer_nest_end(buffer);
    3342           0 :         put_trace_buf();
    3343             : 
    3344           0 : out_nobuffer:
    3345           0 :         preempt_enable_notrace();
    3346           0 :         unpause_graph_tracing();
    3347             : 
    3348           0 :         return len;
    3349             : }
    3350             : 
    3351             : __printf(3, 0)
    3352           0 : int trace_array_vprintk(struct trace_array *tr,
    3353             :                         unsigned long ip, const char *fmt, va_list args)
    3354             : {
    3355           0 :         return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
    3356             : }
    3357             : 
    3358             : /**
    3359             :  * trace_array_printk - Print a message to a specific instance
    3360             :  * @tr: The instance trace_array descriptor
    3361             :  * @ip: The instruction pointer that this is called from.
    3362             :  * @fmt: The format to print (printf format)
    3363             :  *
    3364             :  * If a subsystem sets up its own instance, they have the right to
    3365             :  * printk strings into their tracing instance buffer using this
    3366             :  * function. Note, this function will not write into the top level
    3367             :  * buffer (use trace_printk() for that), as writing into the top level
    3368             :  * buffer should only have events that can be individually disabled.
    3369             :  * trace_printk() is only used for debugging a kernel, and should not
    3370             :  * be ever encorporated in normal use.
    3371             :  *
    3372             :  * trace_array_printk() can be used, as it will not add noise to the
    3373             :  * top level tracing buffer.
    3374             :  *
    3375             :  * Note, trace_array_init_printk() must be called on @tr before this
    3376             :  * can be used.
    3377             :  */
    3378             : __printf(3, 0)
    3379           0 : int trace_array_printk(struct trace_array *tr,
    3380             :                        unsigned long ip, const char *fmt, ...)
    3381             : {
    3382           0 :         int ret;
    3383           0 :         va_list ap;
    3384             : 
    3385           0 :         if (!tr)
    3386             :                 return -ENOENT;
    3387             : 
    3388             :         /* This is only allowed for created instances */
    3389           0 :         if (tr == &global_trace)
    3390             :                 return 0;
    3391             : 
    3392           0 :         if (!(tr->trace_flags & TRACE_ITER_PRINTK))
    3393             :                 return 0;
    3394             : 
    3395           0 :         va_start(ap, fmt);
    3396           0 :         ret = trace_array_vprintk(tr, ip, fmt, ap);
    3397           0 :         va_end(ap);
    3398           0 :         return ret;
    3399             : }
    3400             : EXPORT_SYMBOL_GPL(trace_array_printk);
    3401             : 
    3402             : /**
    3403             :  * trace_array_init_printk - Initialize buffers for trace_array_printk()
    3404             :  * @tr: The trace array to initialize the buffers for
    3405             :  *
    3406             :  * As trace_array_printk() only writes into instances, they are OK to
    3407             :  * have in the kernel (unlike trace_printk()). This needs to be called
    3408             :  * before trace_array_printk() can be used on a trace_array.
    3409             :  */
    3410           0 : int trace_array_init_printk(struct trace_array *tr)
    3411             : {
    3412           0 :         if (!tr)
    3413             :                 return -ENOENT;
    3414             : 
    3415             :         /* This is only allowed for created instances */
    3416           0 :         if (tr == &global_trace)
    3417             :                 return -EINVAL;
    3418             : 
    3419           0 :         return alloc_percpu_trace_buffer();
    3420             : }
    3421             : EXPORT_SYMBOL_GPL(trace_array_init_printk);
    3422             : 
    3423             : __printf(3, 4)
    3424           0 : int trace_array_printk_buf(struct trace_buffer *buffer,
    3425             :                            unsigned long ip, const char *fmt, ...)
    3426             : {
    3427           0 :         int ret;
    3428           0 :         va_list ap;
    3429             : 
    3430           0 :         if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
    3431             :                 return 0;
    3432             : 
    3433           0 :         va_start(ap, fmt);
    3434           0 :         ret = __trace_array_vprintk(buffer, ip, fmt, ap);
    3435           0 :         va_end(ap);
    3436           0 :         return ret;
    3437             : }
    3438             : 
    3439             : __printf(2, 0)
    3440           0 : int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
    3441             : {
    3442           0 :         return trace_array_vprintk(&global_trace, ip, fmt, args);
    3443             : }
    3444             : EXPORT_SYMBOL_GPL(trace_vprintk);
    3445             : 
    3446           0 : static void trace_iterator_increment(struct trace_iterator *iter)
    3447             : {
    3448           0 :         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
    3449             : 
    3450           0 :         iter->idx++;
    3451           0 :         if (buf_iter)
    3452           0 :                 ring_buffer_iter_advance(buf_iter);
    3453           0 : }
    3454             : 
    3455             : static struct trace_entry *
    3456           0 : peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
    3457             :                 unsigned long *lost_events)
    3458             : {
    3459           0 :         struct ring_buffer_event *event;
    3460           0 :         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
    3461             : 
    3462           0 :         if (buf_iter) {
    3463           0 :                 event = ring_buffer_iter_peek(buf_iter, ts);
    3464           0 :                 if (lost_events)
    3465           0 :                         *lost_events = ring_buffer_iter_dropped(buf_iter) ?
    3466           0 :                                 (unsigned long)-1 : 0;
    3467             :         } else {
    3468           0 :                 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
    3469             :                                          lost_events);
    3470             :         }
    3471             : 
    3472           0 :         if (event) {
    3473           0 :                 iter->ent_size = ring_buffer_event_length(event);
    3474           0 :                 return ring_buffer_event_data(event);
    3475             :         }
    3476           0 :         iter->ent_size = 0;
    3477           0 :         return NULL;
    3478             : }
    3479             : 
    3480             : static struct trace_entry *
    3481           0 : __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
    3482             :                   unsigned long *missing_events, u64 *ent_ts)
    3483             : {
    3484           0 :         struct trace_buffer *buffer = iter->array_buffer->buffer;
    3485           0 :         struct trace_entry *ent, *next = NULL;
    3486           0 :         unsigned long lost_events = 0, next_lost = 0;
    3487           0 :         int cpu_file = iter->cpu_file;
    3488           0 :         u64 next_ts = 0, ts;
    3489           0 :         int next_cpu = -1;
    3490           0 :         int next_size = 0;
    3491           0 :         int cpu;
    3492             : 
    3493             :         /*
    3494             :          * If we are in a per_cpu trace file, don't bother by iterating over
    3495             :          * all cpu and peek directly.
    3496             :          */
    3497           0 :         if (cpu_file > RING_BUFFER_ALL_CPUS) {
    3498           0 :                 if (ring_buffer_empty_cpu(buffer, cpu_file))
    3499             :                         return NULL;
    3500           0 :                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
    3501           0 :                 if (ent_cpu)
    3502           0 :                         *ent_cpu = cpu_file;
    3503             : 
    3504           0 :                 return ent;
    3505             :         }
    3506             : 
    3507           0 :         for_each_tracing_cpu(cpu) {
    3508             : 
    3509           0 :                 if (ring_buffer_empty_cpu(buffer, cpu))
    3510           0 :                         continue;
    3511             : 
    3512           0 :                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
    3513             : 
    3514             :                 /*
    3515             :                  * Pick the entry with the smallest timestamp:
    3516             :                  */
    3517           0 :                 if (ent && (!next || ts < next_ts)) {
    3518           0 :                         next = ent;
    3519           0 :                         next_cpu = cpu;
    3520           0 :                         next_ts = ts;
    3521           0 :                         next_lost = lost_events;
    3522           0 :                         next_size = iter->ent_size;
    3523             :                 }
    3524             :         }
    3525             : 
    3526           0 :         iter->ent_size = next_size;
    3527             : 
    3528           0 :         if (ent_cpu)
    3529           0 :                 *ent_cpu = next_cpu;
    3530             : 
    3531           0 :         if (ent_ts)
    3532           0 :                 *ent_ts = next_ts;
    3533             : 
    3534           0 :         if (missing_events)
    3535           0 :                 *missing_events = next_lost;
    3536             : 
    3537             :         return next;
    3538             : }
    3539             : 
    3540             : #define STATIC_FMT_BUF_SIZE     128
    3541             : static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
    3542             : 
    3543           0 : static char *trace_iter_expand_format(struct trace_iterator *iter)
    3544             : {
    3545           0 :         char *tmp;
    3546             : 
    3547           0 :         if (iter->fmt == static_fmt_buf)
    3548             :                 return NULL;
    3549             : 
    3550           0 :         tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
    3551             :                        GFP_KERNEL);
    3552           0 :         if (tmp) {
    3553           0 :                 iter->fmt_size += STATIC_FMT_BUF_SIZE;
    3554           0 :                 iter->fmt = tmp;
    3555             :         }
    3556             : 
    3557             :         return tmp;
    3558             : }
    3559             : 
    3560           0 : const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
    3561             : {
    3562           0 :         const char *p, *new_fmt;
    3563           0 :         char *q;
    3564             : 
    3565           0 :         if (WARN_ON_ONCE(!fmt))
    3566             :                 return fmt;
    3567             : 
    3568           0 :         if (iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
    3569             :                 return fmt;
    3570             : 
    3571           0 :         p = fmt;
    3572           0 :         new_fmt = q = iter->fmt;
    3573           0 :         while (*p) {
    3574           0 :                 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
    3575           0 :                         if (!trace_iter_expand_format(iter))
    3576             :                                 return fmt;
    3577             : 
    3578           0 :                         q += iter->fmt - new_fmt;
    3579           0 :                         new_fmt = iter->fmt;
    3580             :                 }
    3581             : 
    3582           0 :                 *q++ = *p++;
    3583             : 
    3584             :                 /* Replace %p with %px */
    3585           0 :                 if (p[-1] == '%') {
    3586           0 :                         if (p[0] == '%') {
    3587           0 :                                 *q++ = *p++;
    3588           0 :                         } else if (p[0] == 'p' && !isalnum(p[1])) {
    3589           0 :                                 *q++ = *p++;
    3590           0 :                                 *q++ = 'x';
    3591             :                         }
    3592             :                 }
    3593             :         }
    3594           0 :         *q = '\0';
    3595             : 
    3596           0 :         return new_fmt;
    3597             : }
    3598             : 
    3599             : #define STATIC_TEMP_BUF_SIZE    128
    3600             : static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
    3601             : 
    3602             : /* Find the next real entry, without updating the iterator itself */
    3603           0 : struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
    3604             :                                           int *ent_cpu, u64 *ent_ts)
    3605             : {
    3606             :         /* __find_next_entry will reset ent_size */
    3607           0 :         int ent_size = iter->ent_size;
    3608           0 :         struct trace_entry *entry;
    3609             : 
    3610             :         /*
    3611             :          * If called from ftrace_dump(), then the iter->temp buffer
    3612             :          * will be the static_temp_buf and not created from kmalloc.
    3613             :          * If the entry size is greater than the buffer, we can
    3614             :          * not save it. Just return NULL in that case. This is only
    3615             :          * used to add markers when two consecutive events' time
    3616             :          * stamps have a large delta. See trace_print_lat_context()
    3617             :          */
    3618           0 :         if (iter->temp == static_temp_buf &&
    3619             :             STATIC_TEMP_BUF_SIZE < ent_size)
    3620             :                 return NULL;
    3621             : 
    3622             :         /*
    3623             :          * The __find_next_entry() may call peek_next_entry(), which may
    3624             :          * call ring_buffer_peek() that may make the contents of iter->ent
    3625             :          * undefined. Need to copy iter->ent now.
    3626             :          */
    3627           0 :         if (iter->ent && iter->ent != iter->temp) {
    3628           0 :                 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
    3629           0 :                     !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
    3630           0 :                         void *temp;
    3631           0 :                         temp = kmalloc(iter->ent_size, GFP_KERNEL);
    3632           0 :                         if (!temp)
    3633             :                                 return NULL;
    3634           0 :                         kfree(iter->temp);
    3635           0 :                         iter->temp = temp;
    3636           0 :                         iter->temp_size = iter->ent_size;
    3637             :                 }
    3638           0 :                 memcpy(iter->temp, iter->ent, iter->ent_size);
    3639           0 :                 iter->ent = iter->temp;
    3640             :         }
    3641           0 :         entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
    3642             :         /* Put back the original ent_size */
    3643           0 :         iter->ent_size = ent_size;
    3644             : 
    3645           0 :         return entry;
    3646             : }
    3647             : 
    3648             : /* Find the next real entry, and increment the iterator to the next entry */
    3649           0 : void *trace_find_next_entry_inc(struct trace_iterator *iter)
    3650             : {
    3651           0 :         iter->ent = __find_next_entry(iter, &iter->cpu,
    3652             :                                       &iter->lost_events, &iter->ts);
    3653             : 
    3654           0 :         if (iter->ent)
    3655           0 :                 trace_iterator_increment(iter);
    3656             : 
    3657           0 :         return iter->ent ? iter : NULL;
    3658             : }
    3659             : 
    3660           0 : static void trace_consume(struct trace_iterator *iter)
    3661             : {
    3662           0 :         ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
    3663             :                             &iter->lost_events);
    3664           0 : }
    3665             : 
    3666           0 : static void *s_next(struct seq_file *m, void *v, loff_t *pos)
    3667             : {
    3668           0 :         struct trace_iterator *iter = m->private;
    3669           0 :         int i = (int)*pos;
    3670           0 :         void *ent;
    3671             : 
    3672           0 :         WARN_ON_ONCE(iter->leftover);
    3673             : 
    3674           0 :         (*pos)++;
    3675             : 
    3676             :         /* can't go backwards */
    3677           0 :         if (iter->idx > i)
    3678             :                 return NULL;
    3679             : 
    3680           0 :         if (iter->idx < 0)
    3681           0 :                 ent = trace_find_next_entry_inc(iter);
    3682             :         else
    3683             :                 ent = iter;
    3684             : 
    3685           0 :         while (ent && iter->idx < i)
    3686           0 :                 ent = trace_find_next_entry_inc(iter);
    3687             : 
    3688           0 :         iter->pos = *pos;
    3689             : 
    3690           0 :         return ent;
    3691             : }
    3692             : 
    3693           0 : void tracing_iter_reset(struct trace_iterator *iter, int cpu)
    3694             : {
    3695           0 :         struct ring_buffer_iter *buf_iter;
    3696           0 :         unsigned long entries = 0;
    3697           0 :         u64 ts;
    3698             : 
    3699           0 :         per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
    3700             : 
    3701           0 :         buf_iter = trace_buffer_iter(iter, cpu);
    3702           0 :         if (!buf_iter)
    3703           0 :                 return;
    3704             : 
    3705           0 :         ring_buffer_iter_reset(buf_iter);
    3706             : 
    3707             :         /*
    3708             :          * We could have the case with the max latency tracers
    3709             :          * that a reset never took place on a cpu. This is evident
    3710             :          * by the timestamp being before the start of the buffer.
    3711             :          */
    3712           0 :         while (ring_buffer_iter_peek(buf_iter, &ts)) {
    3713           0 :                 if (ts >= iter->array_buffer->time_start)
    3714             :                         break;
    3715           0 :                 entries++;
    3716           0 :                 ring_buffer_iter_advance(buf_iter);
    3717             :         }
    3718             : 
    3719           0 :         per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
    3720             : }
    3721             : 
    3722             : /*
    3723             :  * The current tracer is copied to avoid a global locking
    3724             :  * all around.
    3725             :  */
    3726           0 : static void *s_start(struct seq_file *m, loff_t *pos)
    3727             : {
    3728           0 :         struct trace_iterator *iter = m->private;
    3729           0 :         struct trace_array *tr = iter->tr;
    3730           0 :         int cpu_file = iter->cpu_file;
    3731           0 :         void *p = NULL;
    3732           0 :         loff_t l = 0;
    3733           0 :         int cpu;
    3734             : 
    3735             :         /*
    3736             :          * copy the tracer to avoid using a global lock all around.
    3737             :          * iter->trace is a copy of current_trace, the pointer to the
    3738             :          * name may be used instead of a strcmp(), as iter->trace->name
    3739             :          * will point to the same string as current_trace->name.
    3740             :          */
    3741           0 :         mutex_lock(&trace_types_lock);
    3742           0 :         if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
    3743           0 :                 *iter->trace = *tr->current_trace;
    3744           0 :         mutex_unlock(&trace_types_lock);
    3745             : 
    3746             : #ifdef CONFIG_TRACER_MAX_TRACE
    3747             :         if (iter->snapshot && iter->trace->use_max_tr)
    3748             :                 return ERR_PTR(-EBUSY);
    3749             : #endif
    3750             : 
    3751           0 :         if (!iter->snapshot)
    3752           0 :                 atomic_inc(&trace_record_taskinfo_disabled);
    3753             : 
    3754           0 :         if (*pos != iter->pos) {
    3755           0 :                 iter->ent = NULL;
    3756           0 :                 iter->cpu = 0;
    3757           0 :                 iter->idx = -1;
    3758             : 
    3759           0 :                 if (cpu_file == RING_BUFFER_ALL_CPUS) {
    3760           0 :                         for_each_tracing_cpu(cpu)
    3761           0 :                                 tracing_iter_reset(iter, cpu);
    3762             :                 } else
    3763           0 :                         tracing_iter_reset(iter, cpu_file);
    3764             : 
    3765           0 :                 iter->leftover = 0;
    3766           0 :                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
    3767           0 :                         ;
    3768             : 
    3769             :         } else {
    3770             :                 /*
    3771             :                  * If we overflowed the seq_file before, then we want
    3772             :                  * to just reuse the trace_seq buffer again.
    3773             :                  */
    3774           0 :                 if (iter->leftover)
    3775             :                         p = iter;
    3776             :                 else {
    3777           0 :                         l = *pos - 1;
    3778           0 :                         p = s_next(m, p, &l);
    3779             :                 }
    3780             :         }
    3781             : 
    3782           0 :         trace_event_read_lock();
    3783           0 :         trace_access_lock(cpu_file);
    3784           0 :         return p;
    3785             : }
    3786             : 
    3787           0 : static void s_stop(struct seq_file *m, void *p)
    3788             : {
    3789           0 :         struct trace_iterator *iter = m->private;
    3790             : 
    3791             : #ifdef CONFIG_TRACER_MAX_TRACE
    3792             :         if (iter->snapshot && iter->trace->use_max_tr)
    3793             :                 return;
    3794             : #endif
    3795             : 
    3796           0 :         if (!iter->snapshot)
    3797           0 :                 atomic_dec(&trace_record_taskinfo_disabled);
    3798             : 
    3799           0 :         trace_access_unlock(iter->cpu_file);
    3800           0 :         trace_event_read_unlock();
    3801           0 : }
    3802             : 
    3803             : static void
    3804           0 : get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
    3805             :                       unsigned long *entries, int cpu)
    3806             : {
    3807           0 :         unsigned long count;
    3808             : 
    3809           0 :         count = ring_buffer_entries_cpu(buf->buffer, cpu);
    3810             :         /*
    3811             :          * If this buffer has skipped entries, then we hold all
    3812             :          * entries for the trace and we need to ignore the
    3813             :          * ones before the time stamp.
    3814             :          */
    3815           0 :         if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
    3816           0 :                 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
    3817             :                 /* total is the same as the entries */
    3818           0 :                 *total = count;
    3819             :         } else
    3820           0 :                 *total = count +
    3821           0 :                         ring_buffer_overrun_cpu(buf->buffer, cpu);
    3822           0 :         *entries = count;
    3823           0 : }
    3824             : 
    3825             : static void
    3826           0 : get_total_entries(struct array_buffer *buf,
    3827             :                   unsigned long *total, unsigned long *entries)
    3828             : {
    3829           0 :         unsigned long t, e;
    3830           0 :         int cpu;
    3831             : 
    3832           0 :         *total = 0;
    3833           0 :         *entries = 0;
    3834             : 
    3835           0 :         for_each_tracing_cpu(cpu) {
    3836           0 :                 get_total_entries_cpu(buf, &t, &e, cpu);
    3837           0 :                 *total += t;
    3838           0 :                 *entries += e;
    3839             :         }
    3840           0 : }
    3841             : 
    3842           0 : unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
    3843             : {
    3844           0 :         unsigned long total, entries;
    3845             : 
    3846           0 :         if (!tr)
    3847           0 :                 tr = &global_trace;
    3848             : 
    3849           0 :         get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
    3850             : 
    3851           0 :         return entries;
    3852             : }
    3853             : 
    3854           0 : unsigned long trace_total_entries(struct trace_array *tr)
    3855             : {
    3856           0 :         unsigned long total, entries;
    3857             : 
    3858           0 :         if (!tr)
    3859           0 :                 tr = &global_trace;
    3860             : 
    3861           0 :         get_total_entries(&tr->array_buffer, &total, &entries);
    3862             : 
    3863           0 :         return entries;
    3864             : }
    3865             : 
    3866           0 : static void print_lat_help_header(struct seq_file *m)
    3867             : {
    3868           0 :         seq_puts(m, "#                    _------=> CPU#            \n"
    3869             :                     "#                   / _-----=> irqs-off        \n"
    3870             :                     "#                  | / _----=> need-resched    \n"
    3871             :                     "#                  || / _---=> hardirq/softirq \n"
    3872             :                     "#                  ||| / _--=> preempt-depth   \n"
    3873             :                     "#                  |||| /     delay            \n"
    3874             :                     "#  cmd     pid     ||||| time  |   caller      \n"
    3875             :                     "#     \\   /        |||||  \\    |   /         \n");
    3876           0 : }
    3877             : 
    3878           0 : static void print_event_info(struct array_buffer *buf, struct seq_file *m)
    3879             : {
    3880           0 :         unsigned long total;
    3881           0 :         unsigned long entries;
    3882             : 
    3883           0 :         get_total_entries(buf, &total, &entries);
    3884           0 :         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
    3885             :                    entries, total, num_online_cpus());
    3886           0 :         seq_puts(m, "#\n");
    3887           0 : }
    3888             : 
    3889           0 : static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
    3890             :                                    unsigned int flags)
    3891             : {
    3892           0 :         bool tgid = flags & TRACE_ITER_RECORD_TGID;
    3893             : 
    3894           0 :         print_event_info(buf, m);
    3895             : 
    3896           0 :         seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
    3897           0 :         seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
    3898           0 : }
    3899             : 
    3900           0 : static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
    3901             :                                        unsigned int flags)
    3902             : {
    3903           0 :         bool tgid = flags & TRACE_ITER_RECORD_TGID;
    3904           0 :         const char *space = "            ";
    3905           0 :         int prec = tgid ? 12 : 2;
    3906             : 
    3907           0 :         print_event_info(buf, m);
    3908             : 
    3909           0 :         seq_printf(m, "#                            %.*s  _-----=> irqs-off\n", prec, space);
    3910           0 :         seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
    3911           0 :         seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
    3912           0 :         seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
    3913           0 :         seq_printf(m, "#                            %.*s||| /     delay\n", prec, space);
    3914           0 :         seq_printf(m, "#           TASK-PID  %.*s CPU#  ||||   TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
    3915           0 :         seq_printf(m, "#              | |    %.*s   |   ||||      |         |\n", prec, "       |    ");
    3916           0 : }
    3917             : 
    3918             : void
    3919           0 : print_trace_header(struct seq_file *m, struct trace_iterator *iter)
    3920             : {
    3921           0 :         unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
    3922           0 :         struct array_buffer *buf = iter->array_buffer;
    3923           0 :         struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
    3924           0 :         struct tracer *type = iter->trace;
    3925           0 :         unsigned long entries;
    3926           0 :         unsigned long total;
    3927           0 :         const char *name = "preemption";
    3928             : 
    3929           0 :         name = type->name;
    3930             : 
    3931           0 :         get_total_entries(buf, &total, &entries);
    3932             : 
    3933           0 :         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
    3934             :                    name, UTS_RELEASE);
    3935           0 :         seq_puts(m, "# -----------------------------------"
    3936             :                  "---------------------------------\n");
    3937           0 :         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
    3938             :                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
    3939             :                    nsecs_to_usecs(data->saved_latency),
    3940             :                    entries,
    3941             :                    total,
    3942             :                    buf->cpu,
    3943             : #if defined(CONFIG_PREEMPT_NONE)
    3944             :                    "server",
    3945             : #elif defined(CONFIG_PREEMPT_VOLUNTARY)
    3946             :                    "desktop",
    3947             : #elif defined(CONFIG_PREEMPT)
    3948             :                    "preempt",
    3949             : #elif defined(CONFIG_PREEMPT_RT)
    3950             :                    "preempt_rt",
    3951             : #else
    3952             :                    "unknown",
    3953             : #endif
    3954             :                    /* These are reserved for later use */
    3955             :                    0, 0, 0, 0);
    3956             : #ifdef CONFIG_SMP
    3957           0 :         seq_printf(m, " #P:%d)\n", num_online_cpus());
    3958             : #else
    3959             :         seq_puts(m, ")\n");
    3960             : #endif
    3961           0 :         seq_puts(m, "#    -----------------\n");
    3962           0 :         seq_printf(m, "#    | task: %.16s-%d "
    3963             :                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
    3964           0 :                    data->comm, data->pid,
    3965             :                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
    3966             :                    data->policy, data->rt_priority);
    3967           0 :         seq_puts(m, "#    -----------------\n");
    3968             : 
    3969           0 :         if (data->critical_start) {
    3970           0 :                 seq_puts(m, "#  => started at: ");
    3971           0 :                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
    3972           0 :                 trace_print_seq(m, &iter->seq);
    3973           0 :                 seq_puts(m, "\n#  => ended at:   ");
    3974           0 :                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
    3975           0 :                 trace_print_seq(m, &iter->seq);
    3976           0 :                 seq_puts(m, "\n#\n");
    3977             :         }
    3978             : 
    3979           0 :         seq_puts(m, "#\n");
    3980           0 : }
    3981             : 
    3982           0 : static void test_cpu_buff_start(struct trace_iterator *iter)
    3983             : {
    3984           0 :         struct trace_seq *s = &iter->seq;
    3985           0 :         struct trace_array *tr = iter->tr;
    3986             : 
    3987           0 :         if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
    3988             :                 return;
    3989             : 
    3990           0 :         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
    3991             :                 return;
    3992             : 
    3993           0 :         if (cpumask_available(iter->started) &&
    3994           0 :             cpumask_test_cpu(iter->cpu, iter->started))
    3995             :                 return;
    3996             : 
    3997           0 :         if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
    3998             :                 return;
    3999             : 
    4000           0 :         if (cpumask_available(iter->started))
    4001           0 :                 cpumask_set_cpu(iter->cpu, iter->started);
    4002             : 
    4003             :         /* Don't print started cpu buffer for the first entry of the trace */
    4004           0 :         if (iter->idx > 1)
    4005           0 :                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
    4006             :                                 iter->cpu);
    4007             : }
    4008             : 
    4009           0 : static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
    4010             : {
    4011           0 :         struct trace_array *tr = iter->tr;
    4012           0 :         struct trace_seq *s = &iter->seq;
    4013           0 :         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
    4014           0 :         struct trace_entry *entry;
    4015           0 :         struct trace_event *event;
    4016             : 
    4017           0 :         entry = iter->ent;
    4018             : 
    4019           0 :         test_cpu_buff_start(iter);
    4020             : 
    4021           0 :         event = ftrace_find_event(entry->type);
    4022             : 
    4023           0 :         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
    4024           0 :                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
    4025           0 :                         trace_print_lat_context(iter);
    4026             :                 else
    4027           0 :                         trace_print_context(iter);
    4028             :         }
    4029             : 
    4030           0 :         if (trace_seq_has_overflowed(s))
    4031             :                 return TRACE_TYPE_PARTIAL_LINE;
    4032             : 
    4033           0 :         if (event)
    4034           0 :                 return event->funcs->trace(iter, sym_flags, event);
    4035             : 
    4036           0 :         trace_seq_printf(s, "Unknown type %d\n", entry->type);
    4037             : 
    4038           0 :         return trace_handle_return(s);
    4039             : }
    4040             : 
    4041           0 : static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
    4042             : {
    4043           0 :         struct trace_array *tr = iter->tr;
    4044           0 :         struct trace_seq *s = &iter->seq;
    4045           0 :         struct trace_entry *entry;
    4046           0 :         struct trace_event *event;
    4047             : 
    4048           0 :         entry = iter->ent;
    4049             : 
    4050           0 :         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
    4051           0 :                 trace_seq_printf(s, "%d %d %llu ",
    4052             :                                  entry->pid, iter->cpu, iter->ts);
    4053             : 
    4054           0 :         if (trace_seq_has_overflowed(s))
    4055             :                 return TRACE_TYPE_PARTIAL_LINE;
    4056             : 
    4057           0 :         event = ftrace_find_event(entry->type);
    4058           0 :         if (event)
    4059           0 :                 return event->funcs->raw(iter, 0, event);
    4060             : 
    4061           0 :         trace_seq_printf(s, "%d ?\n", entry->type);
    4062             : 
    4063           0 :         return trace_handle_return(s);
    4064             : }
    4065             : 
    4066           0 : static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
    4067             : {
    4068           0 :         struct trace_array *tr = iter->tr;
    4069           0 :         struct trace_seq *s = &iter->seq;
    4070           0 :         unsigned char newline = '\n';
    4071           0 :         struct trace_entry *entry;
    4072           0 :         struct trace_event *event;
    4073             : 
    4074           0 :         entry = iter->ent;
    4075             : 
    4076           0 :         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
    4077           0 :                 SEQ_PUT_HEX_FIELD(s, entry->pid);
    4078           0 :                 SEQ_PUT_HEX_FIELD(s, iter->cpu);
    4079           0 :                 SEQ_PUT_HEX_FIELD(s, iter->ts);
    4080           0 :                 if (trace_seq_has_overflowed(s))
    4081             :                         return TRACE_TYPE_PARTIAL_LINE;
    4082             :         }
    4083             : 
    4084           0 :         event = ftrace_find_event(entry->type);
    4085           0 :         if (event) {
    4086           0 :                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
    4087           0 :                 if (ret != TRACE_TYPE_HANDLED)
    4088             :                         return ret;
    4089             :         }
    4090             : 
    4091           0 :         SEQ_PUT_FIELD(s, newline);
    4092             : 
    4093           0 :         return trace_handle_return(s);
    4094             : }
    4095             : 
    4096           0 : static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
    4097             : {
    4098           0 :         struct trace_array *tr = iter->tr;
    4099           0 :         struct trace_seq *s = &iter->seq;
    4100           0 :         struct trace_entry *entry;
    4101           0 :         struct trace_event *event;
    4102             : 
    4103           0 :         entry = iter->ent;
    4104             : 
    4105           0 :         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
    4106           0 :                 SEQ_PUT_FIELD(s, entry->pid);
    4107           0 :                 SEQ_PUT_FIELD(s, iter->cpu);
    4108           0 :                 SEQ_PUT_FIELD(s, iter->ts);
    4109           0 :                 if (trace_seq_has_overflowed(s))
    4110             :                         return TRACE_TYPE_PARTIAL_LINE;
    4111             :         }
    4112             : 
    4113           0 :         event = ftrace_find_event(entry->type);
    4114           0 :         return event ? event->funcs->binary(iter, 0, event) :
    4115             :                 TRACE_TYPE_HANDLED;
    4116             : }
    4117             : 
    4118           0 : int trace_empty(struct trace_iterator *iter)
    4119             : {
    4120           0 :         struct ring_buffer_iter *buf_iter;
    4121           0 :         int cpu;
    4122             : 
    4123             :         /* If we are looking at one CPU buffer, only check that one */
    4124           0 :         if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
    4125           0 :                 cpu = iter->cpu_file;
    4126           0 :                 buf_iter = trace_buffer_iter(iter, cpu);
    4127           0 :                 if (buf_iter) {
    4128           0 :                         if (!ring_buffer_iter_empty(buf_iter))
    4129             :                                 return 0;
    4130             :                 } else {
    4131           0 :                         if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
    4132             :                                 return 0;
    4133             :                 }
    4134           0 :                 return 1;
    4135             :         }
    4136             : 
    4137           0 :         for_each_tracing_cpu(cpu) {
    4138           0 :                 buf_iter = trace_buffer_iter(iter, cpu);
    4139           0 :                 if (buf_iter) {
    4140           0 :                         if (!ring_buffer_iter_empty(buf_iter))
    4141             :                                 return 0;
    4142             :                 } else {
    4143           0 :                         if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
    4144             :                                 return 0;
    4145             :                 }
    4146             :         }
    4147             : 
    4148             :         return 1;
    4149             : }
    4150             : 
    4151             : /*  Called with trace_event_read_lock() held. */
    4152           0 : enum print_line_t print_trace_line(struct trace_iterator *iter)
    4153             : {
    4154           0 :         struct trace_array *tr = iter->tr;
    4155           0 :         unsigned long trace_flags = tr->trace_flags;
    4156           0 :         enum print_line_t ret;
    4157             : 
    4158           0 :         if (iter->lost_events) {
    4159           0 :                 if (iter->lost_events == (unsigned long)-1)
    4160           0 :                         trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
    4161             :                                          iter->cpu);
    4162             :                 else
    4163           0 :                         trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
    4164             :                                          iter->cpu, iter->lost_events);
    4165           0 :                 if (trace_seq_has_overflowed(&iter->seq))
    4166             :                         return TRACE_TYPE_PARTIAL_LINE;
    4167             :         }
    4168             : 
    4169           0 :         if (iter->trace && iter->trace->print_line) {
    4170           0 :                 ret = iter->trace->print_line(iter);
    4171           0 :                 if (ret != TRACE_TYPE_UNHANDLED)
    4172             :                         return ret;
    4173             :         }
    4174             : 
    4175           0 :         if (iter->ent->type == TRACE_BPUTS &&
    4176           0 :                         trace_flags & TRACE_ITER_PRINTK &&
    4177             :                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
    4178           0 :                 return trace_print_bputs_msg_only(iter);
    4179             : 
    4180           0 :         if (iter->ent->type == TRACE_BPRINT &&
    4181           0 :                         trace_flags & TRACE_ITER_PRINTK &&
    4182             :                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
    4183           0 :                 return trace_print_bprintk_msg_only(iter);
    4184             : 
    4185           0 :         if (iter->ent->type == TRACE_PRINT &&
    4186           0 :                         trace_flags & TRACE_ITER_PRINTK &&
    4187             :                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
    4188           0 :                 return trace_print_printk_msg_only(iter);
    4189             : 
    4190           0 :         if (trace_flags & TRACE_ITER_BIN)
    4191           0 :                 return print_bin_fmt(iter);
    4192             : 
    4193           0 :         if (trace_flags & TRACE_ITER_HEX)
    4194           0 :                 return print_hex_fmt(iter);
    4195             : 
    4196           0 :         if (trace_flags & TRACE_ITER_RAW)
    4197           0 :                 return print_raw_fmt(iter);
    4198             : 
    4199           0 :         return print_trace_fmt(iter);
    4200             : }
    4201             : 
    4202           0 : void trace_latency_header(struct seq_file *m)
    4203             : {
    4204           0 :         struct trace_iterator *iter = m->private;
    4205           0 :         struct trace_array *tr = iter->tr;
    4206             : 
    4207             :         /* print nothing if the buffers are empty */
    4208           0 :         if (trace_empty(iter))
    4209             :                 return;
    4210             : 
    4211           0 :         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
    4212           0 :                 print_trace_header(m, iter);
    4213             : 
    4214           0 :         if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
    4215           0 :                 print_lat_help_header(m);
    4216             : }
    4217             : 
    4218           0 : void trace_default_header(struct seq_file *m)
    4219             : {
    4220           0 :         struct trace_iterator *iter = m->private;
    4221           0 :         struct trace_array *tr = iter->tr;
    4222           0 :         unsigned long trace_flags = tr->trace_flags;
    4223             : 
    4224           0 :         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
    4225             :                 return;
    4226             : 
    4227           0 :         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
    4228             :                 /* print nothing if the buffers are empty */
    4229           0 :                 if (trace_empty(iter))
    4230             :                         return;
    4231           0 :                 print_trace_header(m, iter);
    4232           0 :                 if (!(trace_flags & TRACE_ITER_VERBOSE))
    4233           0 :                         print_lat_help_header(m);
    4234             :         } else {
    4235           0 :                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
    4236           0 :                         if (trace_flags & TRACE_ITER_IRQ_INFO)
    4237           0 :                                 print_func_help_header_irq(iter->array_buffer,
    4238             :                                                            m, trace_flags);
    4239             :                         else
    4240           0 :                                 print_func_help_header(iter->array_buffer, m,
    4241             :                                                        trace_flags);
    4242             :                 }
    4243             :         }
    4244             : }
    4245             : 
    4246           0 : static void test_ftrace_alive(struct seq_file *m)
    4247             : {
    4248           0 :         if (!ftrace_is_dead())
    4249           0 :                 return;
    4250             :         seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
    4251             :                     "#          MAY BE MISSING FUNCTION EVENTS\n");
    4252             : }
    4253             : 
    4254             : #ifdef CONFIG_TRACER_MAX_TRACE
    4255             : static void show_snapshot_main_help(struct seq_file *m)
    4256             : {
    4257             :         seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
    4258             :                     "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
    4259             :                     "#                      Takes a snapshot of the main buffer.\n"
    4260             :                     "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
    4261             :                     "#                      (Doesn't have to be '2' works with any number that\n"
    4262             :                     "#                       is not a '0' or '1')\n");
    4263             : }
    4264             : 
    4265             : static void show_snapshot_percpu_help(struct seq_file *m)
    4266             : {
    4267             :         seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
    4268             : #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
    4269             :         seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
    4270             :                     "#                      Takes a snapshot of the main buffer for this cpu.\n");
    4271             : #else
    4272             :         seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
    4273             :                     "#                     Must use main snapshot file to allocate.\n");
    4274             : #endif
    4275             :         seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
    4276             :                     "#                      (Doesn't have to be '2' works with any number that\n"
    4277             :                     "#                       is not a '0' or '1')\n");
    4278             : }
    4279             : 
    4280             : static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
    4281             : {
    4282             :         if (iter->tr->allocated_snapshot)
    4283             :                 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
    4284             :         else
    4285             :                 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
    4286             : 
    4287             :         seq_puts(m, "# Snapshot commands:\n");
    4288             :         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
    4289             :                 show_snapshot_main_help(m);
    4290             :         else
    4291             :                 show_snapshot_percpu_help(m);
    4292             : }
    4293             : #else
    4294             : /* Should never be called */
    4295             : static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
    4296             : #endif
    4297             : 
    4298           0 : static int s_show(struct seq_file *m, void *v)
    4299             : {
    4300           0 :         struct trace_iterator *iter = v;
    4301           0 :         int ret;
    4302             : 
    4303           0 :         if (iter->ent == NULL) {
    4304           0 :                 if (iter->tr) {
    4305           0 :                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
    4306           0 :                         seq_puts(m, "#\n");
    4307           0 :                         test_ftrace_alive(m);
    4308             :                 }
    4309           0 :                 if (iter->snapshot && trace_empty(iter))
    4310           0 :                         print_snapshot_help(m, iter);
    4311           0 :                 else if (iter->trace && iter->trace->print_header)
    4312           0 :                         iter->trace->print_header(m);
    4313             :                 else
    4314           0 :                         trace_default_header(m);
    4315             : 
    4316           0 :         } else if (iter->leftover) {
    4317             :                 /*
    4318             :                  * If we filled the seq_file buffer earlier, we
    4319             :                  * want to just show it now.
    4320             :                  */
    4321           0 :                 ret = trace_print_seq(m, &iter->seq);
    4322             : 
    4323             :                 /* ret should this time be zero, but you never know */
    4324           0 :                 iter->leftover = ret;
    4325             : 
    4326             :         } else {
    4327           0 :                 print_trace_line(iter);
    4328           0 :                 ret = trace_print_seq(m, &iter->seq);
    4329             :                 /*
    4330             :                  * If we overflow the seq_file buffer, then it will
    4331             :                  * ask us for this data again at start up.
    4332             :                  * Use that instead.
    4333             :                  *  ret is 0 if seq_file write succeeded.
    4334             :                  *        -1 otherwise.
    4335             :                  */
    4336           0 :                 iter->leftover = ret;
    4337             :         }
    4338             : 
    4339           0 :         return 0;
    4340             : }
    4341             : 
    4342             : /*
    4343             :  * Should be used after trace_array_get(), trace_types_lock
    4344             :  * ensures that i_cdev was already initialized.
    4345             :  */
    4346           0 : static inline int tracing_get_cpu(struct inode *inode)
    4347             : {
    4348           0 :         if (inode->i_cdev) /* See trace_create_cpu_file() */
    4349           0 :                 return (long)inode->i_cdev - 1;
    4350             :         return RING_BUFFER_ALL_CPUS;
    4351             : }
    4352             : 
    4353             : static const struct seq_operations tracer_seq_ops = {
    4354             :         .start          = s_start,
    4355             :         .next           = s_next,
    4356             :         .stop           = s_stop,
    4357             :         .show           = s_show,
    4358             : };
    4359             : 
    4360             : static struct trace_iterator *
    4361           0 : __tracing_open(struct inode *inode, struct file *file, bool snapshot)
    4362             : {
    4363           0 :         struct trace_array *tr = inode->i_private;
    4364           0 :         struct trace_iterator *iter;
    4365           0 :         int cpu;
    4366             : 
    4367           0 :         if (tracing_disabled)
    4368           0 :                 return ERR_PTR(-ENODEV);
    4369             : 
    4370           0 :         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
    4371           0 :         if (!iter)
    4372           0 :                 return ERR_PTR(-ENOMEM);
    4373             : 
    4374           0 :         iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
    4375             :                                     GFP_KERNEL);
    4376           0 :         if (!iter->buffer_iter)
    4377           0 :                 goto release;
    4378             : 
    4379             :         /*
    4380             :          * trace_find_next_entry() may need to save off iter->ent.
    4381             :          * It will place it into the iter->temp buffer. As most
    4382             :          * events are less than 128, allocate a buffer of that size.
    4383             :          * If one is greater, then trace_find_next_entry() will
    4384             :          * allocate a new buffer to adjust for the bigger iter->ent.
    4385             :          * It's not critical if it fails to get allocated here.
    4386             :          */
    4387           0 :         iter->temp = kmalloc(128, GFP_KERNEL);
    4388           0 :         if (iter->temp)
    4389           0 :                 iter->temp_size = 128;
    4390             : 
    4391             :         /*
    4392             :          * trace_event_printf() may need to modify given format
    4393             :          * string to replace %p with %px so that it shows real address
    4394             :          * instead of hash value. However, that is only for the event
    4395             :          * tracing, other tracer may not need. Defer the allocation
    4396             :          * until it is needed.
    4397             :          */
    4398           0 :         iter->fmt = NULL;
    4399           0 :         iter->fmt_size = 0;
    4400             : 
    4401             :         /*
    4402             :          * We make a copy of the current tracer to avoid concurrent
    4403             :          * changes on it while we are reading.
    4404             :          */
    4405           0 :         mutex_lock(&trace_types_lock);
    4406           0 :         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
    4407           0 :         if (!iter->trace)
    4408           0 :                 goto fail;
    4409             : 
    4410           0 :         *iter->trace = *tr->current_trace;
    4411             : 
    4412           0 :         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
    4413             :                 goto fail;
    4414             : 
    4415           0 :         iter->tr = tr;
    4416             : 
    4417             : #ifdef CONFIG_TRACER_MAX_TRACE
    4418             :         /* Currently only the top directory has a snapshot */
    4419             :         if (tr->current_trace->print_max || snapshot)
    4420             :                 iter->array_buffer = &tr->max_buffer;
    4421             :         else
    4422             : #endif
    4423           0 :                 iter->array_buffer = &tr->array_buffer;
    4424           0 :         iter->snapshot = snapshot;
    4425           0 :         iter->pos = -1;
    4426           0 :         iter->cpu_file = tracing_get_cpu(inode);
    4427           0 :         mutex_init(&iter->mutex);
    4428             : 
    4429             :         /* Notify the tracer early; before we stop tracing. */
    4430           0 :         if (iter->trace->open)
    4431           0 :                 iter->trace->open(iter);
    4432             : 
    4433             :         /* Annotate start of buffers if we had overruns */
    4434           0 :         if (ring_buffer_overruns(iter->array_buffer->buffer))
    4435           0 :                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
    4436             : 
    4437             :         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
    4438           0 :         if (trace_clocks[tr->clock_id].in_ns)
    4439           0 :                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
    4440             : 
    4441             :         /*
    4442             :          * If pause-on-trace is enabled, then stop the trace while
    4443             :          * dumping, unless this is the "snapshot" file
    4444             :          */
    4445           0 :         if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
    4446           0 :                 tracing_stop_tr(tr);
    4447             : 
    4448           0 :         if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
    4449           0 :                 for_each_tracing_cpu(cpu) {
    4450           0 :                         iter->buffer_iter[cpu] =
    4451           0 :                                 ring_buffer_read_prepare(iter->array_buffer->buffer,
    4452             :                                                          cpu, GFP_KERNEL);
    4453             :                 }
    4454           0 :                 ring_buffer_read_prepare_sync();
    4455           0 :                 for_each_tracing_cpu(cpu) {
    4456           0 :                         ring_buffer_read_start(iter->buffer_iter[cpu]);
    4457           0 :                         tracing_iter_reset(iter, cpu);
    4458             :                 }
    4459             :         } else {
    4460           0 :                 cpu = iter->cpu_file;
    4461           0 :                 iter->buffer_iter[cpu] =
    4462           0 :                         ring_buffer_read_prepare(iter->array_buffer->buffer,
    4463             :                                                  cpu, GFP_KERNEL);
    4464           0 :                 ring_buffer_read_prepare_sync();
    4465           0 :                 ring_buffer_read_start(iter->buffer_iter[cpu]);
    4466           0 :                 tracing_iter_reset(iter, cpu);
    4467             :         }
    4468             : 
    4469           0 :         mutex_unlock(&trace_types_lock);
    4470             : 
    4471           0 :         return iter;
    4472             : 
    4473           0 :  fail:
    4474           0 :         mutex_unlock(&trace_types_lock);
    4475           0 :         kfree(iter->trace);
    4476           0 :         kfree(iter->temp);
    4477           0 :         kfree(iter->buffer_iter);
    4478           0 : release:
    4479           0 :         seq_release_private(inode, file);
    4480           0 :         return ERR_PTR(-ENOMEM);
    4481             : }
    4482             : 
    4483           0 : int tracing_open_generic(struct inode *inode, struct file *filp)
    4484             : {
    4485           0 :         int ret;
    4486             : 
    4487           0 :         ret = tracing_check_open_get_tr(NULL);
    4488           0 :         if (ret)
    4489             :                 return ret;
    4490             : 
    4491           0 :         filp->private_data = inode->i_private;
    4492           0 :         return 0;
    4493             : }
    4494             : 
    4495           0 : bool tracing_is_disabled(void)
    4496             : {
    4497           0 :         return (tracing_disabled) ? true: false;
    4498             : }
    4499             : 
    4500             : /*
    4501             :  * Open and update trace_array ref count.
    4502             :  * Must have the current trace_array passed to it.
    4503             :  */
    4504           0 : int tracing_open_generic_tr(struct inode *inode, struct file *filp)
    4505             : {
    4506           0 :         struct trace_array *tr = inode->i_private;
    4507           0 :         int ret;
    4508             : 
    4509           0 :         ret = tracing_check_open_get_tr(tr);
    4510           0 :         if (ret)
    4511             :                 return ret;
    4512             : 
    4513           0 :         filp->private_data = inode->i_private;
    4514             : 
    4515           0 :         return 0;
    4516             : }
    4517             : 
    4518           0 : static int tracing_release(struct inode *inode, struct file *file)
    4519             : {
    4520           0 :         struct trace_array *tr = inode->i_private;
    4521           0 :         struct seq_file *m = file->private_data;
    4522           0 :         struct trace_iterator *iter;
    4523           0 :         int cpu;
    4524             : 
    4525           0 :         if (!(file->f_mode & FMODE_READ)) {
    4526           0 :                 trace_array_put(tr);
    4527           0 :                 return 0;
    4528             :         }
    4529             : 
    4530             :         /* Writes do not use seq_file */
    4531           0 :         iter = m->private;
    4532           0 :         mutex_lock(&trace_types_lock);
    4533             : 
    4534           0 :         for_each_tracing_cpu(cpu) {
    4535           0 :                 if (iter->buffer_iter[cpu])
    4536           0 :                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
    4537             :         }
    4538             : 
    4539           0 :         if (iter->trace && iter->trace->close)
    4540           0 :                 iter->trace->close(iter);
    4541             : 
    4542           0 :         if (!iter->snapshot && tr->stop_count)
    4543             :                 /* reenable tracing if it was previously enabled */
    4544           0 :                 tracing_start_tr(tr);
    4545             : 
    4546           0 :         __trace_array_put(tr);
    4547             : 
    4548           0 :         mutex_unlock(&trace_types_lock);
    4549             : 
    4550           0 :         mutex_destroy(&iter->mutex);
    4551           0 :         free_cpumask_var(iter->started);
    4552           0 :         kfree(iter->fmt);
    4553           0 :         kfree(iter->temp);
    4554           0 :         kfree(iter->trace);
    4555           0 :         kfree(iter->buffer_iter);
    4556           0 :         seq_release_private(inode, file);
    4557             : 
    4558           0 :         return 0;
    4559             : }
    4560             : 
    4561           0 : static int tracing_release_generic_tr(struct inode *inode, struct file *file)
    4562             : {
    4563           0 :         struct trace_array *tr = inode->i_private;
    4564             : 
    4565           0 :         trace_array_put(tr);
    4566           0 :         return 0;
    4567             : }
    4568             : 
    4569           0 : static int tracing_single_release_tr(struct inode *inode, struct file *file)
    4570             : {
    4571           0 :         struct trace_array *tr = inode->i_private;
    4572             : 
    4573           0 :         trace_array_put(tr);
    4574             : 
    4575           0 :         return single_release(inode, file);
    4576             : }
    4577             : 
    4578           0 : static int tracing_open(struct inode *inode, struct file *file)
    4579             : {
    4580           0 :         struct trace_array *tr = inode->i_private;
    4581           0 :         struct trace_iterator *iter;
    4582           0 :         int ret;
    4583             : 
    4584           0 :         ret = tracing_check_open_get_tr(tr);
    4585           0 :         if (ret)
    4586             :                 return ret;
    4587             : 
    4588             :         /* If this file was open for write, then erase contents */
    4589           0 :         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
    4590           0 :                 int cpu = tracing_get_cpu(inode);
    4591           0 :                 struct array_buffer *trace_buf = &tr->array_buffer;
    4592             : 
    4593             : #ifdef CONFIG_TRACER_MAX_TRACE
    4594             :                 if (tr->current_trace->print_max)
    4595             :                         trace_buf = &tr->max_buffer;
    4596             : #endif
    4597             : 
    4598           0 :                 if (cpu == RING_BUFFER_ALL_CPUS)
    4599           0 :                         tracing_reset_online_cpus(trace_buf);
    4600             :                 else
    4601           0 :                         tracing_reset_cpu(trace_buf, cpu);
    4602             :         }
    4603             : 
    4604           0 :         if (file->f_mode & FMODE_READ) {
    4605           0 :                 iter = __tracing_open(inode, file, false);
    4606           0 :                 if (IS_ERR(iter))
    4607           0 :                         ret = PTR_ERR(iter);
    4608           0 :                 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
    4609           0 :                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
    4610             :         }
    4611             : 
    4612           0 :         if (ret < 0)
    4613           0 :                 trace_array_put(tr);
    4614             : 
    4615             :         return ret;
    4616             : }
    4617             : 
    4618             : /*
    4619             :  * Some tracers are not suitable for instance buffers.
    4620             :  * A tracer is always available for the global array (toplevel)
    4621             :  * or if it explicitly states that it is.
    4622             :  */
    4623             : static bool
    4624           1 : trace_ok_for_array(struct tracer *t, struct trace_array *tr)
    4625             : {
    4626           0 :         return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
    4627             : }
    4628             : 
    4629             : /* Find the next tracer that this trace array may use */
    4630             : static struct tracer *
    4631           0 : get_tracer_for_array(struct trace_array *tr, struct tracer *t)
    4632             : {
    4633           0 :         while (t && !trace_ok_for_array(t, tr))
    4634           0 :                 t = t->next;
    4635             : 
    4636           0 :         return t;
    4637             : }
    4638             : 
    4639             : static void *
    4640           0 : t_next(struct seq_file *m, void *v, loff_t *pos)
    4641             : {
    4642           0 :         struct trace_array *tr = m->private;
    4643           0 :         struct tracer *t = v;
    4644             : 
    4645           0 :         (*pos)++;
    4646             : 
    4647           0 :         if (t)
    4648           0 :                 t = get_tracer_for_array(tr, t->next);
    4649             : 
    4650           0 :         return t;
    4651             : }
    4652             : 
    4653           0 : static void *t_start(struct seq_file *m, loff_t *pos)
    4654             : {
    4655           0 :         struct trace_array *tr = m->private;
    4656           0 :         struct tracer *t;
    4657           0 :         loff_t l = 0;
    4658             : 
    4659           0 :         mutex_lock(&trace_types_lock);
    4660             : 
    4661           0 :         t = get_tracer_for_array(tr, trace_types);
    4662           0 :         for (; t && l < *pos; t = t_next(m, t, &l))
    4663           0 :                         ;
    4664             : 
    4665           0 :         return t;
    4666             : }
    4667             : 
    4668           0 : static void t_stop(struct seq_file *m, void *p)
    4669             : {
    4670           0 :         mutex_unlock(&trace_types_lock);
    4671           0 : }
    4672             : 
    4673           0 : static int t_show(struct seq_file *m, void *v)
    4674             : {
    4675           0 :         struct tracer *t = v;
    4676             : 
    4677           0 :         if (!t)
    4678             :                 return 0;
    4679             : 
    4680           0 :         seq_puts(m, t->name);
    4681           0 :         if (t->next)
    4682           0 :                 seq_putc(m, ' ');
    4683             :         else
    4684           0 :                 seq_putc(m, '\n');
    4685             : 
    4686             :         return 0;
    4687             : }
    4688             : 
    4689             : static const struct seq_operations show_traces_seq_ops = {
    4690             :         .start          = t_start,
    4691             :         .next           = t_next,
    4692             :         .stop           = t_stop,
    4693             :         .show           = t_show,
    4694             : };
    4695             : 
    4696           0 : static int show_traces_open(struct inode *inode, struct file *file)
    4697             : {
    4698           0 :         struct trace_array *tr = inode->i_private;
    4699           0 :         struct seq_file *m;
    4700           0 :         int ret;
    4701             : 
    4702           0 :         ret = tracing_check_open_get_tr(tr);
    4703           0 :         if (ret)
    4704             :                 return ret;
    4705             : 
    4706           0 :         ret = seq_open(file, &show_traces_seq_ops);
    4707           0 :         if (ret) {
    4708           0 :                 trace_array_put(tr);
    4709           0 :                 return ret;
    4710             :         }
    4711             : 
    4712           0 :         m = file->private_data;
    4713           0 :         m->private = tr;
    4714             : 
    4715           0 :         return 0;
    4716             : }
    4717             : 
    4718           0 : static int show_traces_release(struct inode *inode, struct file *file)
    4719             : {
    4720           0 :         struct trace_array *tr = inode->i_private;
    4721             : 
    4722           0 :         trace_array_put(tr);
    4723           0 :         return seq_release(inode, file);
    4724             : }
    4725             : 
    4726             : static ssize_t
    4727           0 : tracing_write_stub(struct file *filp, const char __user *ubuf,
    4728             :                    size_t count, loff_t *ppos)
    4729             : {
    4730           0 :         return count;
    4731             : }
    4732             : 
    4733           0 : loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
    4734             : {
    4735           0 :         int ret;
    4736             : 
    4737           0 :         if (file->f_mode & FMODE_READ)
    4738           0 :                 ret = seq_lseek(file, offset, whence);
    4739             :         else
    4740           0 :                 file->f_pos = ret = 0;
    4741             : 
    4742           0 :         return ret;
    4743             : }
    4744             : 
    4745             : static const struct file_operations tracing_fops = {
    4746             :         .open           = tracing_open,
    4747             :         .read           = seq_read,
    4748             :         .write          = tracing_write_stub,
    4749             :         .llseek         = tracing_lseek,
    4750             :         .release        = tracing_release,
    4751             : };
    4752             : 
    4753             : static const struct file_operations show_traces_fops = {
    4754             :         .open           = show_traces_open,
    4755             :         .read           = seq_read,
    4756             :         .llseek         = seq_lseek,
    4757             :         .release        = show_traces_release,
    4758             : };
    4759             : 
    4760             : static ssize_t
    4761           0 : tracing_cpumask_read(struct file *filp, char __user *ubuf,
    4762             :                      size_t count, loff_t *ppos)
    4763             : {
    4764           0 :         struct trace_array *tr = file_inode(filp)->i_private;
    4765           0 :         char *mask_str;
    4766           0 :         int len;
    4767             : 
    4768           0 :         len = snprintf(NULL, 0, "%*pb\n",
    4769           0 :                        cpumask_pr_args(tr->tracing_cpumask)) + 1;
    4770           0 :         mask_str = kmalloc(len, GFP_KERNEL);
    4771           0 :         if (!mask_str)
    4772             :                 return -ENOMEM;
    4773             : 
    4774           0 :         len = snprintf(mask_str, len, "%*pb\n",
    4775             :                        cpumask_pr_args(tr->tracing_cpumask));
    4776           0 :         if (len >= count) {
    4777           0 :                 count = -EINVAL;
    4778           0 :                 goto out_err;
    4779             :         }
    4780           0 :         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
    4781             : 
    4782           0 : out_err:
    4783           0 :         kfree(mask_str);
    4784             : 
    4785           0 :         return count;
    4786             : }
    4787             : 
    4788           0 : int tracing_set_cpumask(struct trace_array *tr,
    4789             :                         cpumask_var_t tracing_cpumask_new)
    4790             : {
    4791           0 :         int cpu;
    4792             : 
    4793           0 :         if (!tr)
    4794             :                 return -EINVAL;
    4795             : 
    4796           0 :         local_irq_disable();
    4797           0 :         arch_spin_lock(&tr->max_lock);
    4798           0 :         for_each_tracing_cpu(cpu) {
    4799             :                 /*
    4800             :                  * Increase/decrease the disabled counter if we are
    4801             :                  * about to flip a bit in the cpumask:
    4802             :                  */
    4803           0 :                 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
    4804           0 :                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
    4805           0 :                         atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
    4806           0 :                         ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
    4807             :                 }
    4808           0 :                 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
    4809           0 :                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
    4810           0 :                         atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
    4811           0 :                         ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
    4812             :                 }
    4813             :         }
    4814           0 :         arch_spin_unlock(&tr->max_lock);
    4815           0 :         local_irq_enable();
    4816             : 
    4817           0 :         cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
    4818             : 
    4819           0 :         return 0;
    4820             : }
    4821             : 
    4822             : static ssize_t
    4823           0 : tracing_cpumask_write(struct file *filp, const char __user *ubuf,
    4824             :                       size_t count, loff_t *ppos)
    4825             : {
    4826           0 :         struct trace_array *tr = file_inode(filp)->i_private;
    4827           0 :         cpumask_var_t tracing_cpumask_new;
    4828           0 :         int err;
    4829             : 
    4830           0 :         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
    4831             :                 return -ENOMEM;
    4832             : 
    4833           0 :         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
    4834           0 :         if (err)
    4835           0 :                 goto err_free;
    4836             : 
    4837           0 :         err = tracing_set_cpumask(tr, tracing_cpumask_new);
    4838           0 :         if (err)
    4839           0 :                 goto err_free;
    4840             : 
    4841           0 :         free_cpumask_var(tracing_cpumask_new);
    4842             : 
    4843           0 :         return count;
    4844             : 
    4845           0 : err_free:
    4846           0 :         free_cpumask_var(tracing_cpumask_new);
    4847             : 
    4848           0 :         return err;
    4849             : }
    4850             : 
    4851             : static const struct file_operations tracing_cpumask_fops = {
    4852             :         .open           = tracing_open_generic_tr,
    4853             :         .read           = tracing_cpumask_read,
    4854             :         .write          = tracing_cpumask_write,
    4855             :         .release        = tracing_release_generic_tr,
    4856             :         .llseek         = generic_file_llseek,
    4857             : };
    4858             : 
    4859           0 : static int tracing_trace_options_show(struct seq_file *m, void *v)
    4860             : {
    4861           0 :         struct tracer_opt *trace_opts;
    4862           0 :         struct trace_array *tr = m->private;
    4863           0 :         u32 tracer_flags;
    4864           0 :         int i;
    4865             : 
    4866           0 :         mutex_lock(&trace_types_lock);
    4867           0 :         tracer_flags = tr->current_trace->flags->val;
    4868           0 :         trace_opts = tr->current_trace->flags->opts;
    4869             : 
    4870           0 :         for (i = 0; trace_options[i]; i++) {
    4871           0 :                 if (tr->trace_flags & (1 << i))
    4872           0 :                         seq_printf(m, "%s\n", trace_options[i]);
    4873             :                 else
    4874           0 :                         seq_printf(m, "no%s\n", trace_options[i]);
    4875             :         }
    4876             : 
    4877           0 :         for (i = 0; trace_opts[i].name; i++) {
    4878           0 :                 if (tracer_flags & trace_opts[i].bit)
    4879           0 :                         seq_printf(m, "%s\n", trace_opts[i].name);
    4880             :                 else
    4881           0 :                         seq_printf(m, "no%s\n", trace_opts[i].name);
    4882             :         }
    4883           0 :         mutex_unlock(&trace_types_lock);
    4884             : 
    4885           0 :         return 0;
    4886             : }
    4887             : 
    4888           0 : static int __set_tracer_option(struct trace_array *tr,
    4889             :                                struct tracer_flags *tracer_flags,
    4890             :                                struct tracer_opt *opts, int neg)
    4891             : {
    4892           0 :         struct tracer *trace = tracer_flags->trace;
    4893           0 :         int ret;
    4894             : 
    4895           0 :         ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
    4896           0 :         if (ret)
    4897             :                 return ret;
    4898             : 
    4899           0 :         if (neg)
    4900           0 :                 tracer_flags->val &= ~opts->bit;
    4901             :         else
    4902           0 :                 tracer_flags->val |= opts->bit;
    4903             :         return 0;
    4904             : }
    4905             : 
    4906             : /* Try to assign a tracer specific option */
    4907           0 : static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
    4908             : {
    4909           0 :         struct tracer *trace = tr->current_trace;
    4910           0 :         struct tracer_flags *tracer_flags = trace->flags;
    4911           0 :         struct tracer_opt *opts = NULL;
    4912           0 :         int i;
    4913             : 
    4914           0 :         for (i = 0; tracer_flags->opts[i].name; i++) {
    4915           0 :                 opts = &tracer_flags->opts[i];
    4916             : 
    4917           0 :                 if (strcmp(cmp, opts->name) == 0)
    4918           0 :                         return __set_tracer_option(tr, trace->flags, opts, neg);
    4919             :         }
    4920             : 
    4921             :         return -EINVAL;
    4922             : }
    4923             : 
    4924             : /* Some tracers require overwrite to stay enabled */
    4925           0 : int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
    4926             : {
    4927           0 :         if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
    4928           0 :                 return -1;
    4929             : 
    4930             :         return 0;
    4931             : }
    4932             : 
    4933           0 : int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
    4934             : {
    4935           0 :         if ((mask == TRACE_ITER_RECORD_TGID) ||
    4936           0 :             (mask == TRACE_ITER_RECORD_CMD))
    4937           0 :                 lockdep_assert_held(&event_mutex);
    4938             : 
    4939             :         /* do nothing if flag is already set */
    4940           0 :         if (!!(tr->trace_flags & mask) == !!enabled)
    4941             :                 return 0;
    4942             : 
    4943             :         /* Give the tracer a chance to approve the change */
    4944           0 :         if (tr->current_trace->flag_changed)
    4945           0 :                 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
    4946             :                         return -EINVAL;
    4947             : 
    4948           0 :         if (enabled)
    4949           0 :                 tr->trace_flags |= mask;
    4950             :         else
    4951           0 :                 tr->trace_flags &= ~mask;
    4952             : 
    4953           0 :         if (mask == TRACE_ITER_RECORD_CMD)
    4954           0 :                 trace_event_enable_cmd_record(enabled);
    4955             : 
    4956           0 :         if (mask == TRACE_ITER_RECORD_TGID) {
    4957           0 :                 if (!tgid_map)
    4958           0 :                         tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
    4959             :                                            sizeof(*tgid_map),
    4960             :                                            GFP_KERNEL);
    4961           0 :                 if (!tgid_map) {
    4962           0 :                         tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
    4963           0 :                         return -ENOMEM;
    4964             :                 }
    4965             : 
    4966           0 :                 trace_event_enable_tgid_record(enabled);
    4967             :         }
    4968             : 
    4969           0 :         if (mask == TRACE_ITER_EVENT_FORK)
    4970           0 :                 trace_event_follow_fork(tr, enabled);
    4971             : 
    4972           0 :         if (mask == TRACE_ITER_FUNC_FORK)
    4973           0 :                 ftrace_pid_follow_fork(tr, enabled);
    4974             : 
    4975           0 :         if (mask == TRACE_ITER_OVERWRITE) {
    4976           0 :                 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
    4977             : #ifdef CONFIG_TRACER_MAX_TRACE
    4978             :                 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
    4979             : #endif
    4980             :         }
    4981             : 
    4982           0 :         if (mask == TRACE_ITER_PRINTK) {
    4983           0 :                 trace_printk_start_stop_comm(enabled);
    4984           0 :                 trace_printk_control(enabled);
    4985             :         }
    4986             : 
    4987             :         return 0;
    4988             : }
    4989             : 
    4990           0 : int trace_set_options(struct trace_array *tr, char *option)
    4991             : {
    4992           0 :         char *cmp;
    4993           0 :         int neg = 0;
    4994           0 :         int ret;
    4995           0 :         size_t orig_len = strlen(option);
    4996           0 :         int len;
    4997             : 
    4998           0 :         cmp = strstrip(option);
    4999             : 
    5000           0 :         len = str_has_prefix(cmp, "no");
    5001           0 :         if (len)
    5002           0 :                 neg = 1;
    5003             : 
    5004           0 :         cmp += len;
    5005             : 
    5006           0 :         mutex_lock(&event_mutex);
    5007           0 :         mutex_lock(&trace_types_lock);
    5008             : 
    5009           0 :         ret = match_string(trace_options, -1, cmp);
    5010             :         /* If no option could be set, test the specific tracer options */
    5011           0 :         if (ret < 0)
    5012           0 :                 ret = set_tracer_option(tr, cmp, neg);
    5013             :         else
    5014           0 :                 ret = set_tracer_flag(tr, 1 << ret, !neg);
    5015             : 
    5016           0 :         mutex_unlock(&trace_types_lock);
    5017           0 :         mutex_unlock(&event_mutex);
    5018             : 
    5019             :         /*
    5020             :          * If the first trailing whitespace is replaced with '\0' by strstrip,
    5021             :          * turn it back into a space.
    5022             :          */
    5023           0 :         if (orig_len > strlen(option))
    5024           0 :                 option[strlen(option)] = ' ';
    5025             : 
    5026           0 :         return ret;
    5027             : }
    5028             : 
    5029           1 : static void __init apply_trace_boot_options(void)
    5030             : {
    5031           1 :         char *buf = trace_boot_options_buf;
    5032           2 :         char *option;
    5033             : 
    5034           2 :         while (true) {
    5035           2 :                 option = strsep(&buf, ",");
    5036             : 
    5037           2 :                 if (!option)
    5038             :                         break;
    5039             : 
    5040           1 :                 if (*option)
    5041           0 :                         trace_set_options(&global_trace, option);
    5042             : 
    5043             :                 /* Put back the comma to allow this to be called again */
    5044           1 :                 if (buf)
    5045           0 :                         *(buf - 1) = ',';
    5046             :         }
    5047           1 : }
    5048             : 
    5049             : static ssize_t
    5050           0 : tracing_trace_options_write(struct file *filp, const char __user *ubuf,
    5051             :                         size_t cnt, loff_t *ppos)
    5052             : {
    5053           0 :         struct seq_file *m = filp->private_data;
    5054           0 :         struct trace_array *tr = m->private;
    5055           0 :         char buf[64];
    5056           0 :         int ret;
    5057             : 
    5058           0 :         if (cnt >= sizeof(buf))
    5059             :                 return -EINVAL;
    5060             : 
    5061           0 :         if (copy_from_user(buf, ubuf, cnt))
    5062             :                 return -EFAULT;
    5063             : 
    5064           0 :         buf[cnt] = 0;
    5065             : 
    5066           0 :         ret = trace_set_options(tr, buf);
    5067           0 :         if (ret < 0)
    5068           0 :                 return ret;
    5069             : 
    5070           0 :         *ppos += cnt;
    5071             : 
    5072           0 :         return cnt;
    5073             : }
    5074             : 
    5075           0 : static int tracing_trace_options_open(struct inode *inode, struct file *file)
    5076             : {
    5077           0 :         struct trace_array *tr = inode->i_private;
    5078           0 :         int ret;
    5079             : 
    5080           0 :         ret = tracing_check_open_get_tr(tr);
    5081           0 :         if (ret)
    5082             :                 return ret;
    5083             : 
    5084           0 :         ret = single_open(file, tracing_trace_options_show, inode->i_private);
    5085           0 :         if (ret < 0)
    5086           0 :                 trace_array_put(tr);
    5087             : 
    5088             :         return ret;
    5089             : }
    5090             : 
    5091             : static const struct file_operations tracing_iter_fops = {
    5092             :         .open           = tracing_trace_options_open,
    5093             :         .read           = seq_read,
    5094             :         .llseek         = seq_lseek,
    5095             :         .release        = tracing_single_release_tr,
    5096             :         .write          = tracing_trace_options_write,
    5097             : };
    5098             : 
    5099             : static const char readme_msg[] =
    5100             :         "tracing mini-HOWTO:\n\n"
    5101             :         "# echo 0 > tracing_on : quick way to disable tracing\n"
    5102             :         "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
    5103             :         " Important files:\n"
    5104             :         "  trace\t\t\t- The static contents of the buffer\n"
    5105             :         "\t\t\t  To clear the buffer write into this file: echo > trace\n"
    5106             :         "  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
    5107             :         "  current_tracer\t- function and latency tracers\n"
    5108             :         "  available_tracers\t- list of configured tracers for current_tracer\n"
    5109             :         "  error_log\t- error log for failed commands (that support it)\n"
    5110             :         "  buffer_size_kb\t- view and modify size of per cpu buffer\n"
    5111             :         "  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
    5112             :         "  trace_clock\t\t-change the clock used to order events\n"
    5113             :         "       local:   Per cpu clock but may not be synced across CPUs\n"
    5114             :         "      global:   Synced across CPUs but slows tracing down.\n"
    5115             :         "     counter:   Not a clock, but just an increment\n"
    5116             :         "      uptime:   Jiffy counter from time of boot\n"
    5117             :         "        perf:   Same clock that perf events use\n"
    5118             : #ifdef CONFIG_X86_64
    5119             :         "     x86-tsc:   TSC cycle counter\n"
    5120             : #endif
    5121             :         "\n  timestamp_mode\t-view the mode used to timestamp events\n"
    5122             :         "       delta:   Delta difference against a buffer-wide timestamp\n"
    5123             :         "    absolute:   Absolute (standalone) timestamp\n"
    5124             :         "\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
    5125             :         "\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
    5126             :         "  tracing_cpumask\t- Limit which CPUs to trace\n"
    5127             :         "  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
    5128             :         "\t\t\t  Remove sub-buffer with rmdir\n"
    5129             :         "  trace_options\t\t- Set format or modify how tracing happens\n"
    5130             :         "\t\t\t  Disable an option by prefixing 'no' to the\n"
    5131             :         "\t\t\t  option name\n"
    5132             :         "  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
    5133             : #ifdef CONFIG_DYNAMIC_FTRACE
    5134             :         "\n  available_filter_functions - list of functions that can be filtered on\n"
    5135             :         "  set_ftrace_filter\t- echo function name in here to only trace these\n"
    5136             :         "\t\t\t  functions\n"
    5137             :         "\t     accepts: func_full_name or glob-matching-pattern\n"
    5138             :         "\t     modules: Can select a group via module\n"
    5139             :         "\t      Format: :mod:<module-name>\n"
    5140             :         "\t     example: echo :mod:ext3 > set_ftrace_filter\n"
    5141             :         "\t    triggers: a command to perform when function is hit\n"
    5142             :         "\t      Format: <function>:<trigger>[:count]\n"
    5143             :         "\t     trigger: traceon, traceoff\n"
    5144             :         "\t\t      enable_event:<system>:<event>\n"
    5145             :         "\t\t      disable_event:<system>:<event>\n"
    5146             : #ifdef CONFIG_STACKTRACE
    5147             :         "\t\t      stacktrace\n"
    5148             : #endif
    5149             : #ifdef CONFIG_TRACER_SNAPSHOT
    5150             :         "\t\t      snapshot\n"
    5151             : #endif
    5152             :         "\t\t      dump\n"
    5153             :         "\t\t      cpudump\n"
    5154             :         "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
    5155             :         "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
    5156             :         "\t     The first one will disable tracing every time do_fault is hit\n"
    5157             :         "\t     The second will disable tracing at most 3 times when do_trap is hit\n"
    5158             :         "\t       The first time do trap is hit and it disables tracing, the\n"
    5159             :         "\t       counter will decrement to 2. If tracing is already disabled,\n"
    5160             :         "\t       the counter will not decrement. It only decrements when the\n"
    5161             :         "\t       trigger did work\n"
    5162             :         "\t     To remove trigger without count:\n"
    5163             :         "\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
    5164             :         "\t     To remove trigger with a count:\n"
    5165             :         "\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
    5166             :         "  set_ftrace_notrace\t- echo function name in here to never trace.\n"
    5167             :         "\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
    5168             :         "\t    modules: Can select a group via module command :mod:\n"
    5169             :         "\t    Does not accept triggers\n"
    5170             : #endif /* CONFIG_DYNAMIC_FTRACE */
    5171             : #ifdef CONFIG_FUNCTION_TRACER
    5172             :         "  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
    5173             :         "\t\t    (function)\n"
    5174             :         "  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
    5175             :         "\t\t    (function)\n"
    5176             : #endif
    5177             : #ifdef CONFIG_FUNCTION_GRAPH_TRACER
    5178             :         "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
    5179             :         "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
    5180             :         "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
    5181             : #endif
    5182             : #ifdef CONFIG_TRACER_SNAPSHOT
    5183             :         "\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
    5184             :         "\t\t\t  snapshot buffer. Read the contents for more\n"
    5185             :         "\t\t\t  information\n"
    5186             : #endif
    5187             : #ifdef CONFIG_STACK_TRACER
    5188             :         "  stack_trace\t\t- Shows the max stack trace when active\n"
    5189             :         "  stack_max_size\t- Shows current max stack size that was traced\n"
    5190             :         "\t\t\t  Write into this file to reset the max size (trigger a\n"
    5191             :         "\t\t\t  new trace)\n"
    5192             : #ifdef CONFIG_DYNAMIC_FTRACE
    5193             :         "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
    5194             :         "\t\t\t  traces\n"
    5195             : #endif
    5196             : #endif /* CONFIG_STACK_TRACER */
    5197             : #ifdef CONFIG_DYNAMIC_EVENTS
    5198             :         "  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
    5199             :         "\t\t\t  Write into this file to define/undefine new trace events.\n"
    5200             : #endif
    5201             : #ifdef CONFIG_KPROBE_EVENTS
    5202             :         "  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
    5203             :         "\t\t\t  Write into this file to define/undefine new trace events.\n"
    5204             : #endif
    5205             : #ifdef CONFIG_UPROBE_EVENTS
    5206             :         "  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
    5207             :         "\t\t\t  Write into this file to define/undefine new trace events.\n"
    5208             : #endif
    5209             : #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
    5210             :         "\t  accepts: event-definitions (one definition per line)\n"
    5211             :         "\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
    5212             :         "\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
    5213             : #ifdef CONFIG_HIST_TRIGGERS
    5214             :         "\t           s:[synthetic/]<event> <field> [<field>]\n"
    5215             : #endif
    5216             :         "\t           -:[<group>/]<event>\n"
    5217             : #ifdef CONFIG_KPROBE_EVENTS
    5218             :         "\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
    5219             :   "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
    5220             : #endif
    5221             : #ifdef CONFIG_UPROBE_EVENTS
    5222             :   "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
    5223             : #endif
    5224             :         "\t     args: <name>=fetcharg[:type]\n"
    5225             :         "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
    5226             : #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
    5227             :         "\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
    5228             : #else
    5229             :         "\t           $stack<index>, $stack, $retval, $comm,\n"
    5230             : #endif
    5231             :         "\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
    5232             :         "\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
    5233             :         "\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
    5234             :         "\t           <type>\\[<array-size>\\]\n"
    5235             : #ifdef CONFIG_HIST_TRIGGERS
    5236             :         "\t    field: <stype> <name>;\n"
    5237             :         "\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
    5238             :         "\t           [unsigned] char/int/long\n"
    5239             : #endif
    5240             : #endif
    5241             :         "  events/\t\t- Directory containing all trace event subsystems:\n"
    5242             :         "      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
    5243             :         "  events/<system>/\t- Directory containing all trace events for <system>:\n"
    5244             :         "      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
    5245             :         "\t\t\t  events\n"
    5246             :         "      filter\t\t- If set, only events passing filter are traced\n"
    5247             :         "  events/<system>/<event>/\t- Directory containing control files for\n"
    5248             :         "\t\t\t  <event>:\n"
    5249             :         "      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
    5250             :         "      filter\t\t- If set, only events passing filter are traced\n"
    5251             :         "      trigger\t\t- If set, a command to perform when event is hit\n"
    5252             :         "\t    Format: <trigger>[:count][if <filter>]\n"
    5253             :         "\t   trigger: traceon, traceoff\n"
    5254             :         "\t            enable_event:<system>:<event>\n"
    5255             :         "\t            disable_event:<system>:<event>\n"
    5256             : #ifdef CONFIG_HIST_TRIGGERS
    5257             :         "\t            enable_hist:<system>:<event>\n"
    5258             :         "\t            disable_hist:<system>:<event>\n"
    5259             : #endif
    5260             : #ifdef CONFIG_STACKTRACE
    5261             :         "\t\t    stacktrace\n"
    5262             : #endif
    5263             : #ifdef CONFIG_TRACER_SNAPSHOT
    5264             :         "\t\t    snapshot\n"
    5265             : #endif
    5266             : #ifdef CONFIG_HIST_TRIGGERS
    5267             :         "\t\t    hist (see below)\n"
    5268             : #endif
    5269             :         "\t   example: echo traceoff > events/block/block_unplug/trigger\n"
    5270             :         "\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
    5271             :         "\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
    5272             :         "\t                  events/block/block_unplug/trigger\n"
    5273             :         "\t   The first disables tracing every time block_unplug is hit.\n"
    5274             :         "\t   The second disables tracing the first 3 times block_unplug is hit.\n"
    5275             :         "\t   The third enables the kmalloc event the first 3 times block_unplug\n"
    5276             :         "\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
    5277             :         "\t   Like function triggers, the counter is only decremented if it\n"
    5278             :         "\t    enabled or disabled tracing.\n"
    5279             :         "\t   To remove a trigger without a count:\n"
    5280             :         "\t     echo '!<trigger> > <system>/<event>/trigger\n"
    5281             :         "\t   To remove a trigger with a count:\n"
    5282             :         "\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
    5283             :         "\t   Filters can be ignored when removing a trigger.\n"
    5284             : #ifdef CONFIG_HIST_TRIGGERS
    5285             :         "      hist trigger\t- If set, event hits are aggregated into a hash table\n"
    5286             :         "\t    Format: hist:keys=<field1[,field2,...]>\n"
    5287             :         "\t            [:values=<field1[,field2,...]>]\n"
    5288             :         "\t            [:sort=<field1[,field2,...]>]\n"
    5289             :         "\t            [:size=#entries]\n"
    5290             :         "\t            [:pause][:continue][:clear]\n"
    5291             :         "\t            [:name=histname1]\n"
    5292             :         "\t            [:<handler>.<action>]\n"
    5293             :         "\t            [if <filter>]\n\n"
    5294             :         "\t    When a matching event is hit, an entry is added to a hash\n"
    5295             :         "\t    table using the key(s) and value(s) named, and the value of a\n"
    5296             :         "\t    sum called 'hitcount' is incremented.  Keys and values\n"
    5297             :         "\t    correspond to fields in the event's format description.  Keys\n"
    5298             :         "\t    can be any field, or the special string 'stacktrace'.\n"
    5299             :         "\t    Compound keys consisting of up to two fields can be specified\n"
    5300             :         "\t    by the 'keys' keyword.  Values must correspond to numeric\n"
    5301             :         "\t    fields.  Sort keys consisting of up to two fields can be\n"
    5302             :         "\t    specified using the 'sort' keyword.  The sort direction can\n"
    5303             :         "\t    be modified by appending '.descending' or '.ascending' to a\n"
    5304             :         "\t    sort field.  The 'size' parameter can be used to specify more\n"
    5305             :         "\t    or fewer than the default 2048 entries for the hashtable size.\n"
    5306             :         "\t    If a hist trigger is given a name using the 'name' parameter,\n"
    5307             :         "\t    its histogram data will be shared with other triggers of the\n"
    5308             :         "\t    same name, and trigger hits will update this common data.\n\n"
    5309             :         "\t    Reading the 'hist' file for the event will dump the hash\n"
    5310             :         "\t    table in its entirety to stdout.  If there are multiple hist\n"
    5311             :         "\t    triggers attached to an event, there will be a table for each\n"
    5312             :         "\t    trigger in the output.  The table displayed for a named\n"
    5313             :         "\t    trigger will be the same as any other instance having the\n"
    5314             :         "\t    same name.  The default format used to display a given field\n"
    5315             :         "\t    can be modified by appending any of the following modifiers\n"
    5316             :         "\t    to the field name, as applicable:\n\n"
    5317             :         "\t            .hex        display a number as a hex value\n"
    5318             :         "\t            .sym        display an address as a symbol\n"
    5319             :         "\t            .sym-offset display an address as a symbol and offset\n"
    5320             :         "\t            .execname   display a common_pid as a program name\n"
    5321             :         "\t            .syscall    display a syscall id as a syscall name\n"
    5322             :         "\t            .log2       display log2 value rather than raw number\n"
    5323             :         "\t            .usecs      display a common_timestamp in microseconds\n\n"
    5324             :         "\t    The 'pause' parameter can be used to pause an existing hist\n"
    5325             :         "\t    trigger or to start a hist trigger but not log any events\n"
    5326             :         "\t    until told to do so.  'continue' can be used to start or\n"
    5327             :         "\t    restart a paused hist trigger.\n\n"
    5328             :         "\t    The 'clear' parameter will clear the contents of a running\n"
    5329             :         "\t    hist trigger and leave its current paused/active state\n"
    5330             :         "\t    unchanged.\n\n"
    5331             :         "\t    The enable_hist and disable_hist triggers can be used to\n"
    5332             :         "\t    have one event conditionally start and stop another event's\n"
    5333             :         "\t    already-attached hist trigger.  The syntax is analogous to\n"
    5334             :         "\t    the enable_event and disable_event triggers.\n\n"
    5335             :         "\t    Hist trigger handlers and actions are executed whenever a\n"
    5336             :         "\t    a histogram entry is added or updated.  They take the form:\n\n"
    5337             :         "\t        <handler>.<action>\n\n"
    5338             :         "\t    The available handlers are:\n\n"
    5339             :         "\t        onmatch(matching.event)  - invoke on addition or update\n"
    5340             :         "\t        onmax(var)               - invoke if var exceeds current max\n"
    5341             :         "\t        onchange(var)            - invoke action if var changes\n\n"
    5342             :         "\t    The available actions are:\n\n"
    5343             :         "\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
    5344             :         "\t        save(field,...)                      - save current event fields\n"
    5345             : #ifdef CONFIG_TRACER_SNAPSHOT
    5346             :         "\t        snapshot()                           - snapshot the trace buffer\n\n"
    5347             : #endif
    5348             : #ifdef CONFIG_SYNTH_EVENTS
    5349             :         "  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
    5350             :         "\t  Write into this file to define/undefine new synthetic events.\n"
    5351             :         "\t     example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
    5352             : #endif
    5353             : #endif
    5354             : ;
    5355             : 
    5356             : static ssize_t
    5357           0 : tracing_readme_read(struct file *filp, char __user *ubuf,
    5358             :                        size_t cnt, loff_t *ppos)
    5359             : {
    5360           0 :         return simple_read_from_buffer(ubuf, cnt, ppos,
    5361             :                                         readme_msg, strlen(readme_msg));
    5362             : }
    5363             : 
    5364             : static const struct file_operations tracing_readme_fops = {
    5365             :         .open           = tracing_open_generic,
    5366             :         .read           = tracing_readme_read,
    5367             :         .llseek         = generic_file_llseek,
    5368             : };
    5369             : 
    5370           0 : static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
    5371             : {
    5372           0 :         int *ptr = v;
    5373             : 
    5374           0 :         if (*pos || m->count)
    5375           0 :                 ptr++;
    5376             : 
    5377           0 :         (*pos)++;
    5378             : 
    5379           0 :         for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
    5380           0 :                 if (trace_find_tgid(*ptr))
    5381           0 :                         return ptr;
    5382             :         }
    5383             : 
    5384             :         return NULL;
    5385             : }
    5386             : 
    5387           0 : static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
    5388             : {
    5389           0 :         void *v;
    5390           0 :         loff_t l = 0;
    5391             : 
    5392           0 :         if (!tgid_map)
    5393             :                 return NULL;
    5394             : 
    5395             :         v = &tgid_map[0];
    5396           0 :         while (l <= *pos) {
    5397           0 :                 v = saved_tgids_next(m, v, &l);
    5398           0 :                 if (!v)
    5399             :                         return NULL;
    5400             :         }
    5401             : 
    5402             :         return v;
    5403             : }
    5404             : 
    5405           0 : static void saved_tgids_stop(struct seq_file *m, void *v)
    5406             : {
    5407           0 : }
    5408             : 
    5409           0 : static int saved_tgids_show(struct seq_file *m, void *v)
    5410             : {
    5411           0 :         int pid = (int *)v - tgid_map;
    5412             : 
    5413           0 :         seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
    5414           0 :         return 0;
    5415             : }
    5416             : 
    5417             : static const struct seq_operations tracing_saved_tgids_seq_ops = {
    5418             :         .start          = saved_tgids_start,
    5419             :         .stop           = saved_tgids_stop,
    5420             :         .next           = saved_tgids_next,
    5421             :         .show           = saved_tgids_show,
    5422             : };
    5423             : 
    5424           0 : static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
    5425             : {
    5426           0 :         int ret;
    5427             : 
    5428           0 :         ret = tracing_check_open_get_tr(NULL);
    5429           0 :         if (ret)
    5430             :                 return ret;
    5431             : 
    5432           0 :         return seq_open(filp, &tracing_saved_tgids_seq_ops);
    5433             : }
    5434             : 
    5435             : 
    5436             : static const struct file_operations tracing_saved_tgids_fops = {
    5437             :         .open           = tracing_saved_tgids_open,
    5438             :         .read           = seq_read,
    5439             :         .llseek         = seq_lseek,
    5440             :         .release        = seq_release,
    5441             : };
    5442             : 
    5443           0 : static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
    5444             : {
    5445           0 :         unsigned int *ptr = v;
    5446             : 
    5447           0 :         if (*pos || m->count)
    5448           0 :                 ptr++;
    5449             : 
    5450           0 :         (*pos)++;
    5451             : 
    5452           0 :         for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
    5453           0 :              ptr++) {
    5454           0 :                 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
    5455           0 :                         continue;
    5456             : 
    5457             :                 return ptr;
    5458             :         }
    5459             : 
    5460             :         return NULL;
    5461             : }
    5462             : 
    5463           0 : static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
    5464             : {
    5465           0 :         void *v;
    5466           0 :         loff_t l = 0;
    5467             : 
    5468           0 :         preempt_disable();
    5469           0 :         arch_spin_lock(&trace_cmdline_lock);
    5470             : 
    5471           0 :         v = &savedcmd->map_cmdline_to_pid[0];
    5472           0 :         while (l <= *pos) {
    5473           0 :                 v = saved_cmdlines_next(m, v, &l);
    5474           0 :                 if (!v)
    5475             :                         return NULL;
    5476             :         }
    5477             : 
    5478             :         return v;
    5479             : }
    5480             : 
    5481           0 : static void saved_cmdlines_stop(struct seq_file *m, void *v)
    5482             : {
    5483           0 :         arch_spin_unlock(&trace_cmdline_lock);
    5484           0 :         preempt_enable();
    5485           0 : }
    5486             : 
    5487           0 : static int saved_cmdlines_show(struct seq_file *m, void *v)
    5488             : {
    5489           0 :         char buf[TASK_COMM_LEN];
    5490           0 :         unsigned int *pid = v;
    5491             : 
    5492           0 :         __trace_find_cmdline(*pid, buf);
    5493           0 :         seq_printf(m, "%d %s\n", *pid, buf);
    5494           0 :         return 0;
    5495             : }
    5496             : 
    5497             : static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
    5498             :         .start          = saved_cmdlines_start,
    5499             :         .next           = saved_cmdlines_next,
    5500             :         .stop           = saved_cmdlines_stop,
    5501             :         .show           = saved_cmdlines_show,
    5502             : };
    5503             : 
    5504           0 : static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
    5505             : {
    5506           0 :         int ret;
    5507             : 
    5508           0 :         ret = tracing_check_open_get_tr(NULL);
    5509           0 :         if (ret)
    5510             :                 return ret;
    5511             : 
    5512           0 :         return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
    5513             : }
    5514             : 
    5515             : static const struct file_operations tracing_saved_cmdlines_fops = {
    5516             :         .open           = tracing_saved_cmdlines_open,
    5517             :         .read           = seq_read,
    5518             :         .llseek         = seq_lseek,
    5519             :         .release        = seq_release,
    5520             : };
    5521             : 
    5522             : static ssize_t
    5523           0 : tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
    5524             :                                  size_t cnt, loff_t *ppos)
    5525             : {
    5526           0 :         char buf[64];
    5527           0 :         int r;
    5528             : 
    5529           0 :         arch_spin_lock(&trace_cmdline_lock);
    5530           0 :         r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
    5531           0 :         arch_spin_unlock(&trace_cmdline_lock);
    5532             : 
    5533           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    5534             : }
    5535             : 
    5536           0 : static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
    5537             : {
    5538           0 :         kfree(s->saved_cmdlines);
    5539           0 :         kfree(s->map_cmdline_to_pid);
    5540           0 :         kfree(s);
    5541           0 : }
    5542             : 
    5543           0 : static int tracing_resize_saved_cmdlines(unsigned int val)
    5544             : {
    5545           0 :         struct saved_cmdlines_buffer *s, *savedcmd_temp;
    5546             : 
    5547           0 :         s = kmalloc(sizeof(*s), GFP_KERNEL);
    5548           0 :         if (!s)
    5549             :                 return -ENOMEM;
    5550             : 
    5551           0 :         if (allocate_cmdlines_buffer(val, s) < 0) {
    5552           0 :                 kfree(s);
    5553           0 :                 return -ENOMEM;
    5554             :         }
    5555             : 
    5556           0 :         arch_spin_lock(&trace_cmdline_lock);
    5557           0 :         savedcmd_temp = savedcmd;
    5558           0 :         savedcmd = s;
    5559           0 :         arch_spin_unlock(&trace_cmdline_lock);
    5560           0 :         free_saved_cmdlines_buffer(savedcmd_temp);
    5561             : 
    5562           0 :         return 0;
    5563             : }
    5564             : 
    5565             : static ssize_t
    5566           0 : tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
    5567             :                                   size_t cnt, loff_t *ppos)
    5568             : {
    5569           0 :         unsigned long val;
    5570           0 :         int ret;
    5571             : 
    5572           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    5573           0 :         if (ret)
    5574           0 :                 return ret;
    5575             : 
    5576             :         /* must have at least 1 entry or less than PID_MAX_DEFAULT */
    5577           0 :         if (!val || val > PID_MAX_DEFAULT)
    5578             :                 return -EINVAL;
    5579             : 
    5580           0 :         ret = tracing_resize_saved_cmdlines((unsigned int)val);
    5581           0 :         if (ret < 0)
    5582           0 :                 return ret;
    5583             : 
    5584           0 :         *ppos += cnt;
    5585             : 
    5586           0 :         return cnt;
    5587             : }
    5588             : 
    5589             : static const struct file_operations tracing_saved_cmdlines_size_fops = {
    5590             :         .open           = tracing_open_generic,
    5591             :         .read           = tracing_saved_cmdlines_size_read,
    5592             :         .write          = tracing_saved_cmdlines_size_write,
    5593             : };
    5594             : 
    5595             : #ifdef CONFIG_TRACE_EVAL_MAP_FILE
    5596             : static union trace_eval_map_item *
    5597             : update_eval_map(union trace_eval_map_item *ptr)
    5598             : {
    5599             :         if (!ptr->map.eval_string) {
    5600             :                 if (ptr->tail.next) {
    5601             :                         ptr = ptr->tail.next;
    5602             :                         /* Set ptr to the next real item (skip head) */
    5603             :                         ptr++;
    5604             :                 } else
    5605             :                         return NULL;
    5606             :         }
    5607             :         return ptr;
    5608             : }
    5609             : 
    5610             : static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
    5611             : {
    5612             :         union trace_eval_map_item *ptr = v;
    5613             : 
    5614             :         /*
    5615             :          * Paranoid! If ptr points to end, we don't want to increment past it.
    5616             :          * This really should never happen.
    5617             :          */
    5618             :         (*pos)++;
    5619             :         ptr = update_eval_map(ptr);
    5620             :         if (WARN_ON_ONCE(!ptr))
    5621             :                 return NULL;
    5622             : 
    5623             :         ptr++;
    5624             :         ptr = update_eval_map(ptr);
    5625             : 
    5626             :         return ptr;
    5627             : }
    5628             : 
    5629             : static void *eval_map_start(struct seq_file *m, loff_t *pos)
    5630             : {
    5631             :         union trace_eval_map_item *v;
    5632             :         loff_t l = 0;
    5633             : 
    5634             :         mutex_lock(&trace_eval_mutex);
    5635             : 
    5636             :         v = trace_eval_maps;
    5637             :         if (v)
    5638             :                 v++;
    5639             : 
    5640             :         while (v && l < *pos) {
    5641             :                 v = eval_map_next(m, v, &l);
    5642             :         }
    5643             : 
    5644             :         return v;
    5645             : }
    5646             : 
    5647             : static void eval_map_stop(struct seq_file *m, void *v)
    5648             : {
    5649             :         mutex_unlock(&trace_eval_mutex);
    5650             : }
    5651             : 
    5652             : static int eval_map_show(struct seq_file *m, void *v)
    5653             : {
    5654             :         union trace_eval_map_item *ptr = v;
    5655             : 
    5656             :         seq_printf(m, "%s %ld (%s)\n",
    5657             :                    ptr->map.eval_string, ptr->map.eval_value,
    5658             :                    ptr->map.system);
    5659             : 
    5660             :         return 0;
    5661             : }
    5662             : 
    5663             : static const struct seq_operations tracing_eval_map_seq_ops = {
    5664             :         .start          = eval_map_start,
    5665             :         .next           = eval_map_next,
    5666             :         .stop           = eval_map_stop,
    5667             :         .show           = eval_map_show,
    5668             : };
    5669             : 
    5670             : static int tracing_eval_map_open(struct inode *inode, struct file *filp)
    5671             : {
    5672             :         int ret;
    5673             : 
    5674             :         ret = tracing_check_open_get_tr(NULL);
    5675             :         if (ret)
    5676             :                 return ret;
    5677             : 
    5678             :         return seq_open(filp, &tracing_eval_map_seq_ops);
    5679             : }
    5680             : 
    5681             : static const struct file_operations tracing_eval_map_fops = {
    5682             :         .open           = tracing_eval_map_open,
    5683             :         .read           = seq_read,
    5684             :         .llseek         = seq_lseek,
    5685             :         .release        = seq_release,
    5686             : };
    5687             : 
    5688             : static inline union trace_eval_map_item *
    5689             : trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
    5690             : {
    5691             :         /* Return tail of array given the head */
    5692             :         return ptr + ptr->head.length + 1;
    5693             : }
    5694             : 
    5695             : static void
    5696             : trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
    5697             :                            int len)
    5698             : {
    5699             :         struct trace_eval_map **stop;
    5700             :         struct trace_eval_map **map;
    5701             :         union trace_eval_map_item *map_array;
    5702             :         union trace_eval_map_item *ptr;
    5703             : 
    5704             :         stop = start + len;
    5705             : 
    5706             :         /*
    5707             :          * The trace_eval_maps contains the map plus a head and tail item,
    5708             :          * where the head holds the module and length of array, and the
    5709             :          * tail holds a pointer to the next list.
    5710             :          */
    5711             :         map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
    5712             :         if (!map_array) {
    5713             :                 pr_warn("Unable to allocate trace eval mapping\n");
    5714             :                 return;
    5715             :         }
    5716             : 
    5717             :         mutex_lock(&trace_eval_mutex);
    5718             : 
    5719             :         if (!trace_eval_maps)
    5720             :                 trace_eval_maps = map_array;
    5721             :         else {
    5722             :                 ptr = trace_eval_maps;
    5723             :                 for (;;) {
    5724             :                         ptr = trace_eval_jmp_to_tail(ptr);
    5725             :                         if (!ptr->tail.next)
    5726             :                                 break;
    5727             :                         ptr = ptr->tail.next;
    5728             : 
    5729             :                 }
    5730             :                 ptr->tail.next = map_array;
    5731             :         }
    5732             :         map_array->head.mod = mod;
    5733             :         map_array->head.length = len;
    5734             :         map_array++;
    5735             : 
    5736             :         for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
    5737             :                 map_array->map = **map;
    5738             :                 map_array++;
    5739             :         }
    5740             :         memset(map_array, 0, sizeof(*map_array));
    5741             : 
    5742             :         mutex_unlock(&trace_eval_mutex);
    5743             : }
    5744             : 
    5745             : static void trace_create_eval_file(struct dentry *d_tracer)
    5746             : {
    5747             :         trace_create_file("eval_map", 0444, d_tracer,
    5748             :                           NULL, &tracing_eval_map_fops);
    5749             : }
    5750             : 
    5751             : #else /* CONFIG_TRACE_EVAL_MAP_FILE */
    5752           1 : static inline void trace_create_eval_file(struct dentry *d_tracer) { }
    5753           1 : static inline void trace_insert_eval_map_file(struct module *mod,
    5754           1 :                               struct trace_eval_map **start, int len) { }
    5755             : #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
    5756             : 
    5757           1 : static void trace_insert_eval_map(struct module *mod,
    5758             :                                   struct trace_eval_map **start, int len)
    5759             : {
    5760           1 :         struct trace_eval_map **map;
    5761             : 
    5762           1 :         if (len <= 0)
    5763             :                 return;
    5764             : 
    5765           1 :         map = start;
    5766             : 
    5767           1 :         trace_event_eval_update(map, len);
    5768             : 
    5769           1 :         trace_insert_eval_map_file(mod, start, len);
    5770             : }
    5771             : 
    5772             : static ssize_t
    5773           0 : tracing_set_trace_read(struct file *filp, char __user *ubuf,
    5774             :                        size_t cnt, loff_t *ppos)
    5775             : {
    5776           0 :         struct trace_array *tr = filp->private_data;
    5777           0 :         char buf[MAX_TRACER_SIZE+2];
    5778           0 :         int r;
    5779             : 
    5780           0 :         mutex_lock(&trace_types_lock);
    5781           0 :         r = sprintf(buf, "%s\n", tr->current_trace->name);
    5782           0 :         mutex_unlock(&trace_types_lock);
    5783             : 
    5784           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    5785             : }
    5786             : 
    5787           0 : int tracer_init(struct tracer *t, struct trace_array *tr)
    5788             : {
    5789           0 :         tracing_reset_online_cpus(&tr->array_buffer);
    5790           0 :         return t->init(tr);
    5791             : }
    5792             : 
    5793           1 : static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
    5794             : {
    5795           1 :         int cpu;
    5796             : 
    5797           5 :         for_each_tracing_cpu(cpu)
    5798           4 :                 per_cpu_ptr(buf->data, cpu)->entries = val;
    5799           1 : }
    5800             : 
    5801             : #ifdef CONFIG_TRACER_MAX_TRACE
    5802             : /* resize @tr's buffer to the size of @size_tr's entries */
    5803             : static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
    5804             :                                         struct array_buffer *size_buf, int cpu_id)
    5805             : {
    5806             :         int cpu, ret = 0;
    5807             : 
    5808             :         if (cpu_id == RING_BUFFER_ALL_CPUS) {
    5809             :                 for_each_tracing_cpu(cpu) {
    5810             :                         ret = ring_buffer_resize(trace_buf->buffer,
    5811             :                                  per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
    5812             :                         if (ret < 0)
    5813             :                                 break;
    5814             :                         per_cpu_ptr(trace_buf->data, cpu)->entries =
    5815             :                                 per_cpu_ptr(size_buf->data, cpu)->entries;
    5816             :                 }
    5817             :         } else {
    5818             :                 ret = ring_buffer_resize(trace_buf->buffer,
    5819             :                                  per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
    5820             :                 if (ret == 0)
    5821             :                         per_cpu_ptr(trace_buf->data, cpu_id)->entries =
    5822             :                                 per_cpu_ptr(size_buf->data, cpu_id)->entries;
    5823             :         }
    5824             : 
    5825             :         return ret;
    5826             : }
    5827             : #endif /* CONFIG_TRACER_MAX_TRACE */
    5828             : 
    5829           0 : static int __tracing_resize_ring_buffer(struct trace_array *tr,
    5830             :                                         unsigned long size, int cpu)
    5831             : {
    5832           0 :         int ret;
    5833             : 
    5834             :         /*
    5835             :          * If kernel or user changes the size of the ring buffer
    5836             :          * we use the size that was given, and we can forget about
    5837             :          * expanding it later.
    5838             :          */
    5839           0 :         ring_buffer_expanded = true;
    5840             : 
    5841             :         /* May be called before buffers are initialized */
    5842           0 :         if (!tr->array_buffer.buffer)
    5843             :                 return 0;
    5844             : 
    5845           0 :         ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
    5846           0 :         if (ret < 0)
    5847             :                 return ret;
    5848             : 
    5849             : #ifdef CONFIG_TRACER_MAX_TRACE
    5850             :         if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
    5851             :             !tr->current_trace->use_max_tr)
    5852             :                 goto out;
    5853             : 
    5854             :         ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
    5855             :         if (ret < 0) {
    5856             :                 int r = resize_buffer_duplicate_size(&tr->array_buffer,
    5857             :                                                      &tr->array_buffer, cpu);
    5858             :                 if (r < 0) {
    5859             :                         /*
    5860             :                          * AARGH! We are left with different
    5861             :                          * size max buffer!!!!
    5862             :                          * The max buffer is our "snapshot" buffer.
    5863             :                          * When a tracer needs a snapshot (one of the
    5864             :                          * latency tracers), it swaps the max buffer
    5865             :                          * with the saved snap shot. We succeeded to
    5866             :                          * update the size of the main buffer, but failed to
    5867             :                          * update the size of the max buffer. But when we tried
    5868             :                          * to reset the main buffer to the original size, we
    5869             :                          * failed there too. This is very unlikely to
    5870             :                          * happen, but if it does, warn and kill all
    5871             :                          * tracing.
    5872             :                          */
    5873             :                         WARN_ON(1);
    5874             :                         tracing_disabled = 1;
    5875             :                 }
    5876             :                 return ret;
    5877             :         }
    5878             : 
    5879             :         if (cpu == RING_BUFFER_ALL_CPUS)
    5880             :                 set_buffer_entries(&tr->max_buffer, size);
    5881             :         else
    5882             :                 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
    5883             : 
    5884             :  out:
    5885             : #endif /* CONFIG_TRACER_MAX_TRACE */
    5886             : 
    5887           0 :         if (cpu == RING_BUFFER_ALL_CPUS)
    5888           0 :                 set_buffer_entries(&tr->array_buffer, size);
    5889             :         else
    5890           0 :                 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
    5891             : 
    5892             :         return ret;
    5893             : }
    5894             : 
    5895           0 : ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
    5896             :                                   unsigned long size, int cpu_id)
    5897             : {
    5898           0 :         int ret = size;
    5899             : 
    5900           0 :         mutex_lock(&trace_types_lock);
    5901             : 
    5902           0 :         if (cpu_id != RING_BUFFER_ALL_CPUS) {
    5903             :                 /* make sure, this cpu is enabled in the mask */
    5904           0 :                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
    5905           0 :                         ret = -EINVAL;
    5906           0 :                         goto out;
    5907             :                 }
    5908             :         }
    5909             : 
    5910           0 :         ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
    5911           0 :         if (ret < 0)
    5912           0 :                 ret = -ENOMEM;
    5913             : 
    5914           0 : out:
    5915           0 :         mutex_unlock(&trace_types_lock);
    5916             : 
    5917           0 :         return ret;
    5918             : }
    5919             : 
    5920             : 
    5921             : /**
    5922             :  * tracing_update_buffers - used by tracing facility to expand ring buffers
    5923             :  *
    5924             :  * To save on memory when the tracing is never used on a system with it
    5925             :  * configured in. The ring buffers are set to a minimum size. But once
    5926             :  * a user starts to use the tracing facility, then they need to grow
    5927             :  * to their default size.
    5928             :  *
    5929             :  * This function is to be called when a tracer is about to be used.
    5930             :  */
    5931           0 : int tracing_update_buffers(void)
    5932             : {
    5933           0 :         int ret = 0;
    5934             : 
    5935           0 :         mutex_lock(&trace_types_lock);
    5936           0 :         if (!ring_buffer_expanded)
    5937           0 :                 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
    5938             :                                                 RING_BUFFER_ALL_CPUS);
    5939           0 :         mutex_unlock(&trace_types_lock);
    5940             : 
    5941           0 :         return ret;
    5942             : }
    5943             : 
    5944             : struct trace_option_dentry;
    5945             : 
    5946             : static void
    5947             : create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
    5948             : 
    5949             : /*
    5950             :  * Used to clear out the tracer before deletion of an instance.
    5951             :  * Must have trace_types_lock held.
    5952             :  */
    5953           0 : static void tracing_set_nop(struct trace_array *tr)
    5954             : {
    5955           0 :         if (tr->current_trace == &nop_trace)
    5956             :                 return;
    5957             :         
    5958           0 :         tr->current_trace->enabled--;
    5959             : 
    5960           0 :         if (tr->current_trace->reset)
    5961           0 :                 tr->current_trace->reset(tr);
    5962             : 
    5963           0 :         tr->current_trace = &nop_trace;
    5964             : }
    5965             : 
    5966           2 : static void add_tracer_options(struct trace_array *tr, struct tracer *t)
    5967             : {
    5968             :         /* Only enable if the directory has been created already. */
    5969           1 :         if (!tr->dir)
    5970             :                 return;
    5971             : 
    5972           1 :         create_trace_option_files(tr, t);
    5973             : }
    5974             : 
    5975           0 : int tracing_set_tracer(struct trace_array *tr, const char *buf)
    5976             : {
    5977           0 :         struct tracer *t;
    5978             : #ifdef CONFIG_TRACER_MAX_TRACE
    5979             :         bool had_max_tr;
    5980             : #endif
    5981           0 :         int ret = 0;
    5982             : 
    5983           0 :         mutex_lock(&trace_types_lock);
    5984             : 
    5985           0 :         if (!ring_buffer_expanded) {
    5986           0 :                 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
    5987             :                                                 RING_BUFFER_ALL_CPUS);
    5988           0 :                 if (ret < 0)
    5989           0 :                         goto out;
    5990             :                 ret = 0;
    5991             :         }
    5992             : 
    5993           0 :         for (t = trace_types; t; t = t->next) {
    5994           0 :                 if (strcmp(t->name, buf) == 0)
    5995             :                         break;
    5996             :         }
    5997           0 :         if (!t) {
    5998           0 :                 ret = -EINVAL;
    5999           0 :                 goto out;
    6000             :         }
    6001           0 :         if (t == tr->current_trace)
    6002           0 :                 goto out;
    6003             : 
    6004             : #ifdef CONFIG_TRACER_SNAPSHOT
    6005             :         if (t->use_max_tr) {
    6006             :                 arch_spin_lock(&tr->max_lock);
    6007             :                 if (tr->cond_snapshot)
    6008             :                         ret = -EBUSY;
    6009             :                 arch_spin_unlock(&tr->max_lock);
    6010             :                 if (ret)
    6011             :                         goto out;
    6012             :         }
    6013             : #endif
    6014             :         /* Some tracers won't work on kernel command line */
    6015           0 :         if (system_state < SYSTEM_RUNNING && t->noboot) {
    6016           0 :                 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
    6017             :                         t->name);
    6018           0 :                 goto out;
    6019             :         }
    6020             : 
    6021             :         /* Some tracers are only allowed for the top level buffer */
    6022           0 :         if (!trace_ok_for_array(t, tr)) {
    6023           0 :                 ret = -EINVAL;
    6024           0 :                 goto out;
    6025             :         }
    6026             : 
    6027             :         /* If trace pipe files are being read, we can't change the tracer */
    6028           0 :         if (tr->trace_ref) {
    6029           0 :                 ret = -EBUSY;
    6030           0 :                 goto out;
    6031             :         }
    6032             : 
    6033           0 :         trace_branch_disable();
    6034             : 
    6035           0 :         tr->current_trace->enabled--;
    6036             : 
    6037           0 :         if (tr->current_trace->reset)
    6038           0 :                 tr->current_trace->reset(tr);
    6039             : 
    6040             :         /* Current trace needs to be nop_trace before synchronize_rcu */
    6041           0 :         tr->current_trace = &nop_trace;
    6042             : 
    6043             : #ifdef CONFIG_TRACER_MAX_TRACE
    6044             :         had_max_tr = tr->allocated_snapshot;
    6045             : 
    6046             :         if (had_max_tr && !t->use_max_tr) {
    6047             :                 /*
    6048             :                  * We need to make sure that the update_max_tr sees that
    6049             :                  * current_trace changed to nop_trace to keep it from
    6050             :                  * swapping the buffers after we resize it.
    6051             :                  * The update_max_tr is called from interrupts disabled
    6052             :                  * so a synchronized_sched() is sufficient.
    6053             :                  */
    6054             :                 synchronize_rcu();
    6055             :                 free_snapshot(tr);
    6056             :         }
    6057             : #endif
    6058             : 
    6059             : #ifdef CONFIG_TRACER_MAX_TRACE
    6060             :         if (t->use_max_tr && !had_max_tr) {
    6061             :                 ret = tracing_alloc_snapshot_instance(tr);
    6062             :                 if (ret < 0)
    6063             :                         goto out;
    6064             :         }
    6065             : #endif
    6066             : 
    6067           0 :         if (t->init) {
    6068           0 :                 ret = tracer_init(t, tr);
    6069           0 :                 if (ret)
    6070           0 :                         goto out;
    6071             :         }
    6072             : 
    6073           0 :         tr->current_trace = t;
    6074           0 :         tr->current_trace->enabled++;
    6075           0 :         trace_branch_enable(tr);
    6076           0 :  out:
    6077           0 :         mutex_unlock(&trace_types_lock);
    6078             : 
    6079           0 :         return ret;
    6080             : }
    6081             : 
    6082             : static ssize_t
    6083           0 : tracing_set_trace_write(struct file *filp, const char __user *ubuf,
    6084             :                         size_t cnt, loff_t *ppos)
    6085             : {
    6086           0 :         struct trace_array *tr = filp->private_data;
    6087           0 :         char buf[MAX_TRACER_SIZE+1];
    6088           0 :         int i;
    6089           0 :         size_t ret;
    6090           0 :         int err;
    6091             : 
    6092           0 :         ret = cnt;
    6093             : 
    6094           0 :         if (cnt > MAX_TRACER_SIZE)
    6095             :                 cnt = MAX_TRACER_SIZE;
    6096             : 
    6097           0 :         if (copy_from_user(buf, ubuf, cnt))
    6098             :                 return -EFAULT;
    6099             : 
    6100           0 :         buf[cnt] = 0;
    6101             : 
    6102             :         /* strip ending whitespace. */
    6103           0 :         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
    6104           0 :                 buf[i] = 0;
    6105             : 
    6106           0 :         err = tracing_set_tracer(tr, buf);
    6107           0 :         if (err)
    6108           0 :                 return err;
    6109             : 
    6110           0 :         *ppos += ret;
    6111             : 
    6112           0 :         return ret;
    6113             : }
    6114             : 
    6115             : static ssize_t
    6116           0 : tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
    6117             :                    size_t cnt, loff_t *ppos)
    6118             : {
    6119           0 :         char buf[64];
    6120           0 :         int r;
    6121             : 
    6122           0 :         r = snprintf(buf, sizeof(buf), "%ld\n",
    6123           0 :                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
    6124           0 :         if (r > sizeof(buf))
    6125           0 :                 r = sizeof(buf);
    6126           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    6127             : }
    6128             : 
    6129             : static ssize_t
    6130           0 : tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
    6131             :                     size_t cnt, loff_t *ppos)
    6132             : {
    6133           0 :         unsigned long val;
    6134           0 :         int ret;
    6135             : 
    6136           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    6137           0 :         if (ret)
    6138           0 :                 return ret;
    6139             : 
    6140           0 :         *ptr = val * 1000;
    6141             : 
    6142           0 :         return cnt;
    6143             : }
    6144             : 
    6145             : static ssize_t
    6146           0 : tracing_thresh_read(struct file *filp, char __user *ubuf,
    6147             :                     size_t cnt, loff_t *ppos)
    6148             : {
    6149           0 :         return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
    6150             : }
    6151             : 
    6152             : static ssize_t
    6153           0 : tracing_thresh_write(struct file *filp, const char __user *ubuf,
    6154             :                      size_t cnt, loff_t *ppos)
    6155             : {
    6156           0 :         struct trace_array *tr = filp->private_data;
    6157           0 :         int ret;
    6158             : 
    6159           0 :         mutex_lock(&trace_types_lock);
    6160           0 :         ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
    6161           0 :         if (ret < 0)
    6162           0 :                 goto out;
    6163             : 
    6164           0 :         if (tr->current_trace->update_thresh) {
    6165           0 :                 ret = tr->current_trace->update_thresh(tr);
    6166           0 :                 if (ret < 0)
    6167           0 :                         goto out;
    6168             :         }
    6169             : 
    6170           0 :         ret = cnt;
    6171           0 : out:
    6172           0 :         mutex_unlock(&trace_types_lock);
    6173             : 
    6174           0 :         return ret;
    6175             : }
    6176             : 
    6177             : #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
    6178             : 
    6179             : static ssize_t
    6180             : tracing_max_lat_read(struct file *filp, char __user *ubuf,
    6181             :                      size_t cnt, loff_t *ppos)
    6182             : {
    6183             :         return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
    6184             : }
    6185             : 
    6186             : static ssize_t
    6187             : tracing_max_lat_write(struct file *filp, const char __user *ubuf,
    6188             :                       size_t cnt, loff_t *ppos)
    6189             : {
    6190             :         return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
    6191             : }
    6192             : 
    6193             : #endif
    6194             : 
    6195           0 : static int tracing_open_pipe(struct inode *inode, struct file *filp)
    6196             : {
    6197           0 :         struct trace_array *tr = inode->i_private;
    6198           0 :         struct trace_iterator *iter;
    6199           0 :         int ret;
    6200             : 
    6201           0 :         ret = tracing_check_open_get_tr(tr);
    6202           0 :         if (ret)
    6203             :                 return ret;
    6204             : 
    6205           0 :         mutex_lock(&trace_types_lock);
    6206             : 
    6207             :         /* create a buffer to store the information to pass to userspace */
    6208           0 :         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
    6209           0 :         if (!iter) {
    6210           0 :                 ret = -ENOMEM;
    6211           0 :                 __trace_array_put(tr);
    6212           0 :                 goto out;
    6213             :         }
    6214             : 
    6215           0 :         trace_seq_init(&iter->seq);
    6216           0 :         iter->trace = tr->current_trace;
    6217             : 
    6218           0 :         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
    6219             :                 ret = -ENOMEM;
    6220             :                 goto fail;
    6221             :         }
    6222             : 
    6223             :         /* trace pipe does not show start of buffer */
    6224           0 :         cpumask_setall(iter->started);
    6225             : 
    6226           0 :         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
    6227           0 :                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
    6228             : 
    6229             :         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
    6230           0 :         if (trace_clocks[tr->clock_id].in_ns)
    6231           0 :                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
    6232             : 
    6233           0 :         iter->tr = tr;
    6234           0 :         iter->array_buffer = &tr->array_buffer;
    6235           0 :         iter->cpu_file = tracing_get_cpu(inode);
    6236           0 :         mutex_init(&iter->mutex);
    6237           0 :         filp->private_data = iter;
    6238             : 
    6239           0 :         if (iter->trace->pipe_open)
    6240           0 :                 iter->trace->pipe_open(iter);
    6241             : 
    6242           0 :         nonseekable_open(inode, filp);
    6243             : 
    6244           0 :         tr->trace_ref++;
    6245           0 : out:
    6246           0 :         mutex_unlock(&trace_types_lock);
    6247           0 :         return ret;
    6248             : 
    6249             : fail:
    6250             :         kfree(iter);
    6251             :         __trace_array_put(tr);
    6252             :         mutex_unlock(&trace_types_lock);
    6253             :         return ret;
    6254             : }
    6255             : 
    6256           0 : static int tracing_release_pipe(struct inode *inode, struct file *file)
    6257             : {
    6258           0 :         struct trace_iterator *iter = file->private_data;
    6259           0 :         struct trace_array *tr = inode->i_private;
    6260             : 
    6261           0 :         mutex_lock(&trace_types_lock);
    6262             : 
    6263           0 :         tr->trace_ref--;
    6264             : 
    6265           0 :         if (iter->trace->pipe_close)
    6266           0 :                 iter->trace->pipe_close(iter);
    6267             : 
    6268           0 :         mutex_unlock(&trace_types_lock);
    6269             : 
    6270           0 :         free_cpumask_var(iter->started);
    6271           0 :         mutex_destroy(&iter->mutex);
    6272           0 :         kfree(iter);
    6273             : 
    6274           0 :         trace_array_put(tr);
    6275             : 
    6276           0 :         return 0;
    6277             : }
    6278             : 
    6279             : static __poll_t
    6280           0 : trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
    6281             : {
    6282           0 :         struct trace_array *tr = iter->tr;
    6283             : 
    6284             :         /* Iterators are static, they should be filled or empty */
    6285           0 :         if (trace_buffer_iter(iter, iter->cpu_file))
    6286             :                 return EPOLLIN | EPOLLRDNORM;
    6287             : 
    6288           0 :         if (tr->trace_flags & TRACE_ITER_BLOCK)
    6289             :                 /*
    6290             :                  * Always select as readable when in blocking mode
    6291             :                  */
    6292             :                 return EPOLLIN | EPOLLRDNORM;
    6293             :         else
    6294           0 :                 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
    6295             :                                              filp, poll_table);
    6296             : }
    6297             : 
    6298             : static __poll_t
    6299           0 : tracing_poll_pipe(struct file *filp, poll_table *poll_table)
    6300             : {
    6301           0 :         struct trace_iterator *iter = filp->private_data;
    6302             : 
    6303           0 :         return trace_poll(iter, filp, poll_table);
    6304             : }
    6305             : 
    6306             : /* Must be called with iter->mutex held. */
    6307           0 : static int tracing_wait_pipe(struct file *filp)
    6308             : {
    6309           0 :         struct trace_iterator *iter = filp->private_data;
    6310           0 :         int ret;
    6311             : 
    6312           0 :         while (trace_empty(iter)) {
    6313             : 
    6314           0 :                 if ((filp->f_flags & O_NONBLOCK)) {
    6315             :                         return -EAGAIN;
    6316             :                 }
    6317             : 
    6318             :                 /*
    6319             :                  * We block until we read something and tracing is disabled.
    6320             :                  * We still block if tracing is disabled, but we have never
    6321             :                  * read anything. This allows a user to cat this file, and
    6322             :                  * then enable tracing. But after we have read something,
    6323             :                  * we give an EOF when tracing is again disabled.
    6324             :                  *
    6325             :                  * iter->pos will be 0 if we haven't read anything.
    6326             :                  */
    6327           0 :                 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
    6328             :                         break;
    6329             : 
    6330           0 :                 mutex_unlock(&iter->mutex);
    6331             : 
    6332           0 :                 ret = wait_on_pipe(iter, 0);
    6333             : 
    6334           0 :                 mutex_lock(&iter->mutex);
    6335             : 
    6336           0 :                 if (ret)
    6337           0 :                         return ret;
    6338             :         }
    6339             : 
    6340             :         return 1;
    6341             : }
    6342             : 
    6343             : /*
    6344             :  * Consumer reader.
    6345             :  */
    6346             : static ssize_t
    6347           0 : tracing_read_pipe(struct file *filp, char __user *ubuf,
    6348             :                   size_t cnt, loff_t *ppos)
    6349             : {
    6350           0 :         struct trace_iterator *iter = filp->private_data;
    6351           0 :         ssize_t sret;
    6352             : 
    6353             :         /*
    6354             :          * Avoid more than one consumer on a single file descriptor
    6355             :          * This is just a matter of traces coherency, the ring buffer itself
    6356             :          * is protected.
    6357             :          */
    6358           0 :         mutex_lock(&iter->mutex);
    6359             : 
    6360             :         /* return any leftover data */
    6361           0 :         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
    6362           0 :         if (sret != -EBUSY)
    6363           0 :                 goto out;
    6364             : 
    6365           0 :         trace_seq_init(&iter->seq);
    6366             : 
    6367           0 :         if (iter->trace->read) {
    6368           0 :                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
    6369           0 :                 if (sret)
    6370           0 :                         goto out;
    6371             :         }
    6372             : 
    6373           0 : waitagain:
    6374           0 :         sret = tracing_wait_pipe(filp);
    6375           0 :         if (sret <= 0)
    6376           0 :                 goto out;
    6377             : 
    6378             :         /* stop when tracing is finished */
    6379           0 :         if (trace_empty(iter)) {
    6380           0 :                 sret = 0;
    6381           0 :                 goto out;
    6382             :         }
    6383             : 
    6384           0 :         if (cnt >= PAGE_SIZE)
    6385             :                 cnt = PAGE_SIZE - 1;
    6386             : 
    6387             :         /* reset all but tr, trace, and overruns */
    6388           0 :         memset(&iter->seq, 0,
    6389             :                sizeof(struct trace_iterator) -
    6390             :                offsetof(struct trace_iterator, seq));
    6391           0 :         cpumask_clear(iter->started);
    6392           0 :         trace_seq_init(&iter->seq);
    6393           0 :         iter->pos = -1;
    6394             : 
    6395           0 :         trace_event_read_lock();
    6396           0 :         trace_access_lock(iter->cpu_file);
    6397           0 :         while (trace_find_next_entry_inc(iter) != NULL) {
    6398           0 :                 enum print_line_t ret;
    6399           0 :                 int save_len = iter->seq.seq.len;
    6400             : 
    6401           0 :                 ret = print_trace_line(iter);
    6402           0 :                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
    6403             :                         /* don't print partial lines */
    6404           0 :                         iter->seq.seq.len = save_len;
    6405           0 :                         break;
    6406             :                 }
    6407           0 :                 if (ret != TRACE_TYPE_NO_CONSUME)
    6408           0 :                         trace_consume(iter);
    6409             : 
    6410           0 :                 if (trace_seq_used(&iter->seq) >= cnt)
    6411             :                         break;
    6412             : 
    6413             :                 /*
    6414             :                  * Setting the full flag means we reached the trace_seq buffer
    6415             :                  * size and we should leave by partial output condition above.
    6416             :                  * One of the trace_seq_* functions is not used properly.
    6417             :                  */
    6418           0 :                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
    6419             :                           iter->ent->type);
    6420             :         }
    6421           0 :         trace_access_unlock(iter->cpu_file);
    6422           0 :         trace_event_read_unlock();
    6423             : 
    6424             :         /* Now copy what we have to the user */
    6425           0 :         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
    6426           0 :         if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
    6427           0 :                 trace_seq_init(&iter->seq);
    6428             : 
    6429             :         /*
    6430             :          * If there was nothing to send to user, in spite of consuming trace
    6431             :          * entries, go back to wait for more entries.
    6432             :          */
    6433           0 :         if (sret == -EBUSY)
    6434           0 :                 goto waitagain;
    6435             : 
    6436           0 : out:
    6437           0 :         mutex_unlock(&iter->mutex);
    6438             : 
    6439           0 :         return sret;
    6440             : }
    6441             : 
    6442           0 : static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
    6443             :                                      unsigned int idx)
    6444             : {
    6445           0 :         __free_page(spd->pages[idx]);
    6446           0 : }
    6447             : 
    6448             : static size_t
    6449           0 : tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
    6450             : {
    6451           0 :         size_t count;
    6452           0 :         int save_len;
    6453           0 :         int ret;
    6454             : 
    6455             :         /* Seq buffer is page-sized, exactly what we need. */
    6456           0 :         for (;;) {
    6457           0 :                 save_len = iter->seq.seq.len;
    6458           0 :                 ret = print_trace_line(iter);
    6459             : 
    6460           0 :                 if (trace_seq_has_overflowed(&iter->seq)) {
    6461           0 :                         iter->seq.seq.len = save_len;
    6462           0 :                         break;
    6463             :                 }
    6464             : 
    6465             :                 /*
    6466             :                  * This should not be hit, because it should only
    6467             :                  * be set if the iter->seq overflowed. But check it
    6468             :                  * anyway to be safe.
    6469             :                  */
    6470           0 :                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
    6471           0 :                         iter->seq.seq.len = save_len;
    6472           0 :                         break;
    6473             :                 }
    6474             : 
    6475           0 :                 count = trace_seq_used(&iter->seq) - save_len;
    6476           0 :                 if (rem < count) {
    6477           0 :                         rem = 0;
    6478           0 :                         iter->seq.seq.len = save_len;
    6479           0 :                         break;
    6480             :                 }
    6481             : 
    6482           0 :                 if (ret != TRACE_TYPE_NO_CONSUME)
    6483           0 :                         trace_consume(iter);
    6484           0 :                 rem -= count;
    6485           0 :                 if (!trace_find_next_entry_inc(iter))   {
    6486           0 :                         rem = 0;
    6487           0 :                         iter->ent = NULL;
    6488           0 :                         break;
    6489             :                 }
    6490             :         }
    6491             : 
    6492           0 :         return rem;
    6493             : }
    6494             : 
    6495           0 : static ssize_t tracing_splice_read_pipe(struct file *filp,
    6496             :                                         loff_t *ppos,
    6497             :                                         struct pipe_inode_info *pipe,
    6498             :                                         size_t len,
    6499             :                                         unsigned int flags)
    6500             : {
    6501           0 :         struct page *pages_def[PIPE_DEF_BUFFERS];
    6502           0 :         struct partial_page partial_def[PIPE_DEF_BUFFERS];
    6503           0 :         struct trace_iterator *iter = filp->private_data;
    6504           0 :         struct splice_pipe_desc spd = {
    6505             :                 .pages          = pages_def,
    6506             :                 .partial        = partial_def,
    6507             :                 .nr_pages       = 0, /* This gets updated below. */
    6508             :                 .nr_pages_max   = PIPE_DEF_BUFFERS,
    6509             :                 .ops            = &default_pipe_buf_ops,
    6510             :                 .spd_release    = tracing_spd_release_pipe,
    6511             :         };
    6512           0 :         ssize_t ret;
    6513           0 :         size_t rem;
    6514           0 :         unsigned int i;
    6515             : 
    6516           0 :         if (splice_grow_spd(pipe, &spd))
    6517             :                 return -ENOMEM;
    6518             : 
    6519           0 :         mutex_lock(&iter->mutex);
    6520             : 
    6521           0 :         if (iter->trace->splice_read) {
    6522           0 :                 ret = iter->trace->splice_read(iter, filp,
    6523             :                                                ppos, pipe, len, flags);
    6524           0 :                 if (ret)
    6525           0 :                         goto out_err;
    6526             :         }
    6527             : 
    6528           0 :         ret = tracing_wait_pipe(filp);
    6529           0 :         if (ret <= 0)
    6530           0 :                 goto out_err;
    6531             : 
    6532           0 :         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
    6533           0 :                 ret = -EFAULT;
    6534           0 :                 goto out_err;
    6535             :         }
    6536             : 
    6537           0 :         trace_event_read_lock();
    6538           0 :         trace_access_lock(iter->cpu_file);
    6539             : 
    6540             :         /* Fill as many pages as possible. */
    6541           0 :         for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
    6542           0 :                 spd.pages[i] = alloc_page(GFP_KERNEL);
    6543           0 :                 if (!spd.pages[i])
    6544             :                         break;
    6545             : 
    6546           0 :                 rem = tracing_fill_pipe_page(rem, iter);
    6547             : 
    6548             :                 /* Copy the data into the page, so we can start over. */
    6549           0 :                 ret = trace_seq_to_buffer(&iter->seq,
    6550           0 :                                           page_address(spd.pages[i]),
    6551           0 :                                           trace_seq_used(&iter->seq));
    6552           0 :                 if (ret < 0) {
    6553           0 :                         __free_page(spd.pages[i]);
    6554           0 :                         break;
    6555             :                 }
    6556           0 :                 spd.partial[i].offset = 0;
    6557           0 :                 spd.partial[i].len = trace_seq_used(&iter->seq);
    6558             : 
    6559           0 :                 trace_seq_init(&iter->seq);
    6560             :         }
    6561             : 
    6562           0 :         trace_access_unlock(iter->cpu_file);
    6563           0 :         trace_event_read_unlock();
    6564           0 :         mutex_unlock(&iter->mutex);
    6565             : 
    6566           0 :         spd.nr_pages = i;
    6567             : 
    6568           0 :         if (i)
    6569           0 :                 ret = splice_to_pipe(pipe, &spd);
    6570             :         else
    6571             :                 ret = 0;
    6572           0 : out:
    6573           0 :         splice_shrink_spd(&spd);
    6574           0 :         return ret;
    6575             : 
    6576           0 : out_err:
    6577           0 :         mutex_unlock(&iter->mutex);
    6578           0 :         goto out;
    6579             : }
    6580             : 
    6581             : static ssize_t
    6582           0 : tracing_entries_read(struct file *filp, char __user *ubuf,
    6583             :                      size_t cnt, loff_t *ppos)
    6584             : {
    6585           0 :         struct inode *inode = file_inode(filp);
    6586           0 :         struct trace_array *tr = inode->i_private;
    6587           0 :         int cpu = tracing_get_cpu(inode);
    6588           0 :         char buf[64];
    6589           0 :         int r = 0;
    6590           0 :         ssize_t ret;
    6591             : 
    6592           0 :         mutex_lock(&trace_types_lock);
    6593             : 
    6594           0 :         if (cpu == RING_BUFFER_ALL_CPUS) {
    6595             :                 int cpu, buf_size_same;
    6596             :                 unsigned long size;
    6597             : 
    6598             :                 size = 0;
    6599           0 :                 buf_size_same = 1;
    6600             :                 /* check if all cpu sizes are same */
    6601           0 :                 for_each_tracing_cpu(cpu) {
    6602             :                         /* fill in the size from first enabled cpu */
    6603           0 :                         if (size == 0)
    6604           0 :                                 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
    6605           0 :                         if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
    6606             :                                 buf_size_same = 0;
    6607             :                                 break;
    6608             :                         }
    6609             :                 }
    6610             : 
    6611           0 :                 if (buf_size_same) {
    6612           0 :                         if (!ring_buffer_expanded)
    6613           0 :                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
    6614             :                                             size >> 10,
    6615             :                                             trace_buf_size >> 10);
    6616             :                         else
    6617           0 :                                 r = sprintf(buf, "%lu\n", size >> 10);
    6618             :                 } else
    6619           0 :                         r = sprintf(buf, "X\n");
    6620             :         } else
    6621           0 :                 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
    6622             : 
    6623           0 :         mutex_unlock(&trace_types_lock);
    6624             : 
    6625           0 :         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    6626           0 :         return ret;
    6627             : }
    6628             : 
    6629             : static ssize_t
    6630           0 : tracing_entries_write(struct file *filp, const char __user *ubuf,
    6631             :                       size_t cnt, loff_t *ppos)
    6632             : {
    6633           0 :         struct inode *inode = file_inode(filp);
    6634           0 :         struct trace_array *tr = inode->i_private;
    6635           0 :         unsigned long val;
    6636           0 :         int ret;
    6637             : 
    6638           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    6639           0 :         if (ret)
    6640           0 :                 return ret;
    6641             : 
    6642             :         /* must have at least 1 entry */
    6643           0 :         if (!val)
    6644             :                 return -EINVAL;
    6645             : 
    6646             :         /* value is in KB */
    6647           0 :         val <<= 10;
    6648           0 :         ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
    6649           0 :         if (ret < 0)
    6650           0 :                 return ret;
    6651             : 
    6652           0 :         *ppos += cnt;
    6653             : 
    6654           0 :         return cnt;
    6655             : }
    6656             : 
    6657             : static ssize_t
    6658           0 : tracing_total_entries_read(struct file *filp, char __user *ubuf,
    6659             :                                 size_t cnt, loff_t *ppos)
    6660             : {
    6661           0 :         struct trace_array *tr = filp->private_data;
    6662           0 :         char buf[64];
    6663           0 :         int r, cpu;
    6664           0 :         unsigned long size = 0, expanded_size = 0;
    6665             : 
    6666           0 :         mutex_lock(&trace_types_lock);
    6667           0 :         for_each_tracing_cpu(cpu) {
    6668           0 :                 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
    6669           0 :                 if (!ring_buffer_expanded)
    6670           0 :                         expanded_size += trace_buf_size >> 10;
    6671             :         }
    6672           0 :         if (ring_buffer_expanded)
    6673           0 :                 r = sprintf(buf, "%lu\n", size);
    6674             :         else
    6675           0 :                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
    6676           0 :         mutex_unlock(&trace_types_lock);
    6677             : 
    6678           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    6679             : }
    6680             : 
    6681             : static ssize_t
    6682           0 : tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
    6683             :                           size_t cnt, loff_t *ppos)
    6684             : {
    6685             :         /*
    6686             :          * There is no need to read what the user has written, this function
    6687             :          * is just to make sure that there is no error when "echo" is used
    6688             :          */
    6689             : 
    6690           0 :         *ppos += cnt;
    6691             : 
    6692           0 :         return cnt;
    6693             : }
    6694             : 
    6695             : static int
    6696           0 : tracing_free_buffer_release(struct inode *inode, struct file *filp)
    6697             : {
    6698           0 :         struct trace_array *tr = inode->i_private;
    6699             : 
    6700             :         /* disable tracing ? */
    6701           0 :         if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
    6702           0 :                 tracer_tracing_off(tr);
    6703             :         /* resize the ring buffer to 0 */
    6704           0 :         tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
    6705             : 
    6706           0 :         trace_array_put(tr);
    6707             : 
    6708           0 :         return 0;
    6709             : }
    6710             : 
    6711             : static ssize_t
    6712           0 : tracing_mark_write(struct file *filp, const char __user *ubuf,
    6713             :                                         size_t cnt, loff_t *fpos)
    6714             : {
    6715           0 :         struct trace_array *tr = filp->private_data;
    6716           0 :         struct ring_buffer_event *event;
    6717           0 :         enum event_trigger_type tt = ETT_NONE;
    6718           0 :         struct trace_buffer *buffer;
    6719           0 :         struct print_entry *entry;
    6720           0 :         ssize_t written;
    6721           0 :         int size;
    6722           0 :         int len;
    6723             : 
    6724             : /* Used in tracing_mark_raw_write() as well */
    6725             : #define FAULTED_STR "<faulted>"
    6726             : #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
    6727             : 
    6728           0 :         if (tracing_disabled)
    6729             :                 return -EINVAL;
    6730             : 
    6731           0 :         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
    6732             :                 return -EINVAL;
    6733             : 
    6734           0 :         if (cnt > TRACE_BUF_SIZE)
    6735             :                 cnt = TRACE_BUF_SIZE;
    6736             : 
    6737           0 :         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
    6738             : 
    6739           0 :         size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
    6740             : 
    6741             :         /* If less than "<faulted>", then make sure we can still add that */
    6742           0 :         if (cnt < FAULTED_SIZE)
    6743           0 :                 size += FAULTED_SIZE - cnt;
    6744             : 
    6745           0 :         buffer = tr->array_buffer.buffer;
    6746           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
    6747             :                                             tracing_gen_ctx());
    6748           0 :         if (unlikely(!event))
    6749             :                 /* Ring buffer disabled, return as if not open for write */
    6750             :                 return -EBADF;
    6751             : 
    6752           0 :         entry = ring_buffer_event_data(event);
    6753           0 :         entry->ip = _THIS_IP_;
    6754             : 
    6755           0 :         len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
    6756           0 :         if (len) {
    6757           0 :                 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
    6758           0 :                 cnt = FAULTED_SIZE;
    6759           0 :                 written = -EFAULT;
    6760             :         } else
    6761           0 :                 written = cnt;
    6762             : 
    6763           0 :         if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
    6764             :                 /* do not add \n before testing triggers, but add \0 */
    6765           0 :                 entry->buf[cnt] = '\0';
    6766           0 :                 tt = event_triggers_call(tr->trace_marker_file, entry, event);
    6767             :         }
    6768             : 
    6769           0 :         if (entry->buf[cnt - 1] != '\n') {
    6770           0 :                 entry->buf[cnt] = '\n';
    6771           0 :                 entry->buf[cnt + 1] = '\0';
    6772             :         } else
    6773           0 :                 entry->buf[cnt] = '\0';
    6774             : 
    6775           0 :         if (static_branch_unlikely(&trace_marker_exports_enabled))
    6776           0 :                 ftrace_exports(event, TRACE_EXPORT_MARKER);
    6777           0 :         __buffer_unlock_commit(buffer, event);
    6778             : 
    6779           0 :         if (tt)
    6780           0 :                 event_triggers_post_call(tr->trace_marker_file, tt);
    6781             : 
    6782           0 :         if (written > 0)
    6783           0 :                 *fpos += written;
    6784             : 
    6785             :         return written;
    6786             : }
    6787             : 
    6788             : /* Limit it for now to 3K (including tag) */
    6789             : #define RAW_DATA_MAX_SIZE (1024*3)
    6790             : 
    6791             : static ssize_t
    6792           0 : tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
    6793             :                                         size_t cnt, loff_t *fpos)
    6794             : {
    6795           0 :         struct trace_array *tr = filp->private_data;
    6796           0 :         struct ring_buffer_event *event;
    6797           0 :         struct trace_buffer *buffer;
    6798           0 :         struct raw_data_entry *entry;
    6799           0 :         ssize_t written;
    6800           0 :         int size;
    6801           0 :         int len;
    6802             : 
    6803             : #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
    6804             : 
    6805           0 :         if (tracing_disabled)
    6806             :                 return -EINVAL;
    6807             : 
    6808           0 :         if (!(tr->trace_flags & TRACE_ITER_MARKERS))
    6809             :                 return -EINVAL;
    6810             : 
    6811             :         /* The marker must at least have a tag id */
    6812           0 :         if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
    6813             :                 return -EINVAL;
    6814             : 
    6815           0 :         if (cnt > TRACE_BUF_SIZE)
    6816             :                 cnt = TRACE_BUF_SIZE;
    6817             : 
    6818           0 :         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
    6819             : 
    6820           0 :         size = sizeof(*entry) + cnt;
    6821           0 :         if (cnt < FAULT_SIZE_ID)
    6822           0 :                 size += FAULT_SIZE_ID - cnt;
    6823             : 
    6824           0 :         buffer = tr->array_buffer.buffer;
    6825           0 :         event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
    6826             :                                             tracing_gen_ctx());
    6827           0 :         if (!event)
    6828             :                 /* Ring buffer disabled, return as if not open for write */
    6829             :                 return -EBADF;
    6830             : 
    6831           0 :         entry = ring_buffer_event_data(event);
    6832             : 
    6833           0 :         len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
    6834           0 :         if (len) {
    6835           0 :                 entry->id = -1;
    6836           0 :                 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
    6837           0 :                 written = -EFAULT;
    6838             :         } else
    6839           0 :                 written = cnt;
    6840             : 
    6841           0 :         __buffer_unlock_commit(buffer, event);
    6842             : 
    6843           0 :         if (written > 0)
    6844           0 :                 *fpos += written;
    6845             : 
    6846             :         return written;
    6847             : }
    6848             : 
    6849           0 : static int tracing_clock_show(struct seq_file *m, void *v)
    6850             : {
    6851           0 :         struct trace_array *tr = m->private;
    6852           0 :         int i;
    6853             : 
    6854           0 :         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
    6855           0 :                 seq_printf(m,
    6856             :                         "%s%s%s%s", i ? " " : "",
    6857             :                         i == tr->clock_id ? "[" : "", trace_clocks[i].name,
    6858           0 :                         i == tr->clock_id ? "]" : "");
    6859           0 :         seq_putc(m, '\n');
    6860             : 
    6861           0 :         return 0;
    6862             : }
    6863             : 
    6864           0 : int tracing_set_clock(struct trace_array *tr, const char *clockstr)
    6865             : {
    6866           0 :         int i;
    6867             : 
    6868           0 :         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
    6869           0 :                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
    6870             :                         break;
    6871             :         }
    6872           0 :         if (i == ARRAY_SIZE(trace_clocks))
    6873             :                 return -EINVAL;
    6874             : 
    6875           0 :         mutex_lock(&trace_types_lock);
    6876             : 
    6877           0 :         tr->clock_id = i;
    6878             : 
    6879           0 :         ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
    6880             : 
    6881             :         /*
    6882             :          * New clock may not be consistent with the previous clock.
    6883             :          * Reset the buffer so that it doesn't have incomparable timestamps.
    6884             :          */
    6885           0 :         tracing_reset_online_cpus(&tr->array_buffer);
    6886             : 
    6887             : #ifdef CONFIG_TRACER_MAX_TRACE
    6888             :         if (tr->max_buffer.buffer)
    6889             :                 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
    6890             :         tracing_reset_online_cpus(&tr->max_buffer);
    6891             : #endif
    6892             : 
    6893           0 :         mutex_unlock(&trace_types_lock);
    6894             : 
    6895           0 :         return 0;
    6896             : }
    6897             : 
    6898           0 : static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
    6899             :                                    size_t cnt, loff_t *fpos)
    6900             : {
    6901           0 :         struct seq_file *m = filp->private_data;
    6902           0 :         struct trace_array *tr = m->private;
    6903           0 :         char buf[64];
    6904           0 :         const char *clockstr;
    6905           0 :         int ret;
    6906             : 
    6907           0 :         if (cnt >= sizeof(buf))
    6908             :                 return -EINVAL;
    6909             : 
    6910           0 :         if (copy_from_user(buf, ubuf, cnt))
    6911             :                 return -EFAULT;
    6912             : 
    6913           0 :         buf[cnt] = 0;
    6914             : 
    6915           0 :         clockstr = strstrip(buf);
    6916             : 
    6917           0 :         ret = tracing_set_clock(tr, clockstr);
    6918           0 :         if (ret)
    6919           0 :                 return ret;
    6920             : 
    6921           0 :         *fpos += cnt;
    6922             : 
    6923           0 :         return cnt;
    6924             : }
    6925             : 
    6926           0 : static int tracing_clock_open(struct inode *inode, struct file *file)
    6927             : {
    6928           0 :         struct trace_array *tr = inode->i_private;
    6929           0 :         int ret;
    6930             : 
    6931           0 :         ret = tracing_check_open_get_tr(tr);
    6932           0 :         if (ret)
    6933             :                 return ret;
    6934             : 
    6935           0 :         ret = single_open(file, tracing_clock_show, inode->i_private);
    6936           0 :         if (ret < 0)
    6937           0 :                 trace_array_put(tr);
    6938             : 
    6939             :         return ret;
    6940             : }
    6941             : 
    6942           0 : static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
    6943             : {
    6944           0 :         struct trace_array *tr = m->private;
    6945             : 
    6946           0 :         mutex_lock(&trace_types_lock);
    6947             : 
    6948           0 :         if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
    6949           0 :                 seq_puts(m, "delta [absolute]\n");
    6950             :         else
    6951           0 :                 seq_puts(m, "[delta] absolute\n");
    6952             : 
    6953           0 :         mutex_unlock(&trace_types_lock);
    6954             : 
    6955           0 :         return 0;
    6956             : }
    6957             : 
    6958           0 : static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
    6959             : {
    6960           0 :         struct trace_array *tr = inode->i_private;
    6961           0 :         int ret;
    6962             : 
    6963           0 :         ret = tracing_check_open_get_tr(tr);
    6964           0 :         if (ret)
    6965             :                 return ret;
    6966             : 
    6967           0 :         ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
    6968           0 :         if (ret < 0)
    6969           0 :                 trace_array_put(tr);
    6970             : 
    6971             :         return ret;
    6972             : }
    6973             : 
    6974           0 : int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
    6975             : {
    6976           0 :         int ret = 0;
    6977             : 
    6978           0 :         mutex_lock(&trace_types_lock);
    6979             : 
    6980           0 :         if (abs && tr->time_stamp_abs_ref++)
    6981           0 :                 goto out;
    6982             : 
    6983           0 :         if (!abs) {
    6984           0 :                 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
    6985           0 :                         ret = -EINVAL;
    6986           0 :                         goto out;
    6987             :                 }
    6988             : 
    6989           0 :                 if (--tr->time_stamp_abs_ref)
    6990           0 :                         goto out;
    6991             :         }
    6992             : 
    6993           0 :         ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
    6994             : 
    6995             : #ifdef CONFIG_TRACER_MAX_TRACE
    6996             :         if (tr->max_buffer.buffer)
    6997             :                 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
    6998             : #endif
    6999           0 :  out:
    7000           0 :         mutex_unlock(&trace_types_lock);
    7001             : 
    7002           0 :         return ret;
    7003             : }
    7004             : 
    7005             : struct ftrace_buffer_info {
    7006             :         struct trace_iterator   iter;
    7007             :         void                    *spare;
    7008             :         unsigned int            spare_cpu;
    7009             :         unsigned int            read;
    7010             : };
    7011             : 
    7012             : #ifdef CONFIG_TRACER_SNAPSHOT
    7013             : static int tracing_snapshot_open(struct inode *inode, struct file *file)
    7014             : {
    7015             :         struct trace_array *tr = inode->i_private;
    7016             :         struct trace_iterator *iter;
    7017             :         struct seq_file *m;
    7018             :         int ret;
    7019             : 
    7020             :         ret = tracing_check_open_get_tr(tr);
    7021             :         if (ret)
    7022             :                 return ret;
    7023             : 
    7024             :         if (file->f_mode & FMODE_READ) {
    7025             :                 iter = __tracing_open(inode, file, true);
    7026             :                 if (IS_ERR(iter))
    7027             :                         ret = PTR_ERR(iter);
    7028             :         } else {
    7029             :                 /* Writes still need the seq_file to hold the private data */
    7030             :                 ret = -ENOMEM;
    7031             :                 m = kzalloc(sizeof(*m), GFP_KERNEL);
    7032             :                 if (!m)
    7033             :                         goto out;
    7034             :                 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
    7035             :                 if (!iter) {
    7036             :                         kfree(m);
    7037             :                         goto out;
    7038             :                 }
    7039             :                 ret = 0;
    7040             : 
    7041             :                 iter->tr = tr;
    7042             :                 iter->array_buffer = &tr->max_buffer;
    7043             :                 iter->cpu_file = tracing_get_cpu(inode);
    7044             :                 m->private = iter;
    7045             :                 file->private_data = m;
    7046             :         }
    7047             : out:
    7048             :         if (ret < 0)
    7049             :                 trace_array_put(tr);
    7050             : 
    7051             :         return ret;
    7052             : }
    7053             : 
    7054             : static ssize_t
    7055             : tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
    7056             :                        loff_t *ppos)
    7057             : {
    7058             :         struct seq_file *m = filp->private_data;
    7059             :         struct trace_iterator *iter = m->private;
    7060             :         struct trace_array *tr = iter->tr;
    7061             :         unsigned long val;
    7062             :         int ret;
    7063             : 
    7064             :         ret = tracing_update_buffers();
    7065             :         if (ret < 0)
    7066             :                 return ret;
    7067             : 
    7068             :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    7069             :         if (ret)
    7070             :                 return ret;
    7071             : 
    7072             :         mutex_lock(&trace_types_lock);
    7073             : 
    7074             :         if (tr->current_trace->use_max_tr) {
    7075             :                 ret = -EBUSY;
    7076             :                 goto out;
    7077             :         }
    7078             : 
    7079             :         arch_spin_lock(&tr->max_lock);
    7080             :         if (tr->cond_snapshot)
    7081             :                 ret = -EBUSY;
    7082             :         arch_spin_unlock(&tr->max_lock);
    7083             :         if (ret)
    7084             :                 goto out;
    7085             : 
    7086             :         switch (val) {
    7087             :         case 0:
    7088             :                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
    7089             :                         ret = -EINVAL;
    7090             :                         break;
    7091             :                 }
    7092             :                 if (tr->allocated_snapshot)
    7093             :                         free_snapshot(tr);
    7094             :                 break;
    7095             :         case 1:
    7096             : /* Only allow per-cpu swap if the ring buffer supports it */
    7097             : #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
    7098             :                 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
    7099             :                         ret = -EINVAL;
    7100             :                         break;
    7101             :                 }
    7102             : #endif
    7103             :                 if (tr->allocated_snapshot)
    7104             :                         ret = resize_buffer_duplicate_size(&tr->max_buffer,
    7105             :                                         &tr->array_buffer, iter->cpu_file);
    7106             :                 else
    7107             :                         ret = tracing_alloc_snapshot_instance(tr);
    7108             :                 if (ret < 0)
    7109             :                         break;
    7110             :                 local_irq_disable();
    7111             :                 /* Now, we're going to swap */
    7112             :                 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
    7113             :                         update_max_tr(tr, current, smp_processor_id(), NULL);
    7114             :                 else
    7115             :                         update_max_tr_single(tr, current, iter->cpu_file);
    7116             :                 local_irq_enable();
    7117             :                 break;
    7118             :         default:
    7119             :                 if (tr->allocated_snapshot) {
    7120             :                         if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
    7121             :                                 tracing_reset_online_cpus(&tr->max_buffer);
    7122             :                         else
    7123             :                                 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
    7124             :                 }
    7125             :                 break;
    7126             :         }
    7127             : 
    7128             :         if (ret >= 0) {
    7129             :                 *ppos += cnt;
    7130             :                 ret = cnt;
    7131             :         }
    7132             : out:
    7133             :         mutex_unlock(&trace_types_lock);
    7134             :         return ret;
    7135             : }
    7136             : 
    7137             : static int tracing_snapshot_release(struct inode *inode, struct file *file)
    7138             : {
    7139             :         struct seq_file *m = file->private_data;
    7140             :         int ret;
    7141             : 
    7142             :         ret = tracing_release(inode, file);
    7143             : 
    7144             :         if (file->f_mode & FMODE_READ)
    7145             :                 return ret;
    7146             : 
    7147             :         /* If write only, the seq_file is just a stub */
    7148             :         if (m)
    7149             :                 kfree(m->private);
    7150             :         kfree(m);
    7151             : 
    7152             :         return 0;
    7153             : }
    7154             : 
    7155             : static int tracing_buffers_open(struct inode *inode, struct file *filp);
    7156             : static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
    7157             :                                     size_t count, loff_t *ppos);
    7158             : static int tracing_buffers_release(struct inode *inode, struct file *file);
    7159             : static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
    7160             :                    struct pipe_inode_info *pipe, size_t len, unsigned int flags);
    7161             : 
    7162             : static int snapshot_raw_open(struct inode *inode, struct file *filp)
    7163             : {
    7164             :         struct ftrace_buffer_info *info;
    7165             :         int ret;
    7166             : 
    7167             :         /* The following checks for tracefs lockdown */
    7168             :         ret = tracing_buffers_open(inode, filp);
    7169             :         if (ret < 0)
    7170             :                 return ret;
    7171             : 
    7172             :         info = filp->private_data;
    7173             : 
    7174             :         if (info->iter.trace->use_max_tr) {
    7175             :                 tracing_buffers_release(inode, filp);
    7176             :                 return -EBUSY;
    7177             :         }
    7178             : 
    7179             :         info->iter.snapshot = true;
    7180             :         info->iter.array_buffer = &info->iter.tr->max_buffer;
    7181             : 
    7182             :         return ret;
    7183             : }
    7184             : 
    7185             : #endif /* CONFIG_TRACER_SNAPSHOT */
    7186             : 
    7187             : 
    7188             : static const struct file_operations tracing_thresh_fops = {
    7189             :         .open           = tracing_open_generic,
    7190             :         .read           = tracing_thresh_read,
    7191             :         .write          = tracing_thresh_write,
    7192             :         .llseek         = generic_file_llseek,
    7193             : };
    7194             : 
    7195             : #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
    7196             : static const struct file_operations tracing_max_lat_fops = {
    7197             :         .open           = tracing_open_generic,
    7198             :         .read           = tracing_max_lat_read,
    7199             :         .write          = tracing_max_lat_write,
    7200             :         .llseek         = generic_file_llseek,
    7201             : };
    7202             : #endif
    7203             : 
    7204             : static const struct file_operations set_tracer_fops = {
    7205             :         .open           = tracing_open_generic,
    7206             :         .read           = tracing_set_trace_read,
    7207             :         .write          = tracing_set_trace_write,
    7208             :         .llseek         = generic_file_llseek,
    7209             : };
    7210             : 
    7211             : static const struct file_operations tracing_pipe_fops = {
    7212             :         .open           = tracing_open_pipe,
    7213             :         .poll           = tracing_poll_pipe,
    7214             :         .read           = tracing_read_pipe,
    7215             :         .splice_read    = tracing_splice_read_pipe,
    7216             :         .release        = tracing_release_pipe,
    7217             :         .llseek         = no_llseek,
    7218             : };
    7219             : 
    7220             : static const struct file_operations tracing_entries_fops = {
    7221             :         .open           = tracing_open_generic_tr,
    7222             :         .read           = tracing_entries_read,
    7223             :         .write          = tracing_entries_write,
    7224             :         .llseek         = generic_file_llseek,
    7225             :         .release        = tracing_release_generic_tr,
    7226             : };
    7227             : 
    7228             : static const struct file_operations tracing_total_entries_fops = {
    7229             :         .open           = tracing_open_generic_tr,
    7230             :         .read           = tracing_total_entries_read,
    7231             :         .llseek         = generic_file_llseek,
    7232             :         .release        = tracing_release_generic_tr,
    7233             : };
    7234             : 
    7235             : static const struct file_operations tracing_free_buffer_fops = {
    7236             :         .open           = tracing_open_generic_tr,
    7237             :         .write          = tracing_free_buffer_write,
    7238             :         .release        = tracing_free_buffer_release,
    7239             : };
    7240             : 
    7241             : static const struct file_operations tracing_mark_fops = {
    7242             :         .open           = tracing_open_generic_tr,
    7243             :         .write          = tracing_mark_write,
    7244             :         .llseek         = generic_file_llseek,
    7245             :         .release        = tracing_release_generic_tr,
    7246             : };
    7247             : 
    7248             : static const struct file_operations tracing_mark_raw_fops = {
    7249             :         .open           = tracing_open_generic_tr,
    7250             :         .write          = tracing_mark_raw_write,
    7251             :         .llseek         = generic_file_llseek,
    7252             :         .release        = tracing_release_generic_tr,
    7253             : };
    7254             : 
    7255             : static const struct file_operations trace_clock_fops = {
    7256             :         .open           = tracing_clock_open,
    7257             :         .read           = seq_read,
    7258             :         .llseek         = seq_lseek,
    7259             :         .release        = tracing_single_release_tr,
    7260             :         .write          = tracing_clock_write,
    7261             : };
    7262             : 
    7263             : static const struct file_operations trace_time_stamp_mode_fops = {
    7264             :         .open           = tracing_time_stamp_mode_open,
    7265             :         .read           = seq_read,
    7266             :         .llseek         = seq_lseek,
    7267             :         .release        = tracing_single_release_tr,
    7268             : };
    7269             : 
    7270             : #ifdef CONFIG_TRACER_SNAPSHOT
    7271             : static const struct file_operations snapshot_fops = {
    7272             :         .open           = tracing_snapshot_open,
    7273             :         .read           = seq_read,
    7274             :         .write          = tracing_snapshot_write,
    7275             :         .llseek         = tracing_lseek,
    7276             :         .release        = tracing_snapshot_release,
    7277             : };
    7278             : 
    7279             : static const struct file_operations snapshot_raw_fops = {
    7280             :         .open           = snapshot_raw_open,
    7281             :         .read           = tracing_buffers_read,
    7282             :         .release        = tracing_buffers_release,
    7283             :         .splice_read    = tracing_buffers_splice_read,
    7284             :         .llseek         = no_llseek,
    7285             : };
    7286             : 
    7287             : #endif /* CONFIG_TRACER_SNAPSHOT */
    7288             : 
    7289             : #define TRACING_LOG_ERRS_MAX    8
    7290             : #define TRACING_LOG_LOC_MAX     128
    7291             : 
    7292             : #define CMD_PREFIX "  Command: "
    7293             : 
    7294             : struct err_info {
    7295             :         const char      **errs; /* ptr to loc-specific array of err strings */
    7296             :         u8              type;   /* index into errs -> specific err string */
    7297             :         u8              pos;    /* MAX_FILTER_STR_VAL = 256 */
    7298             :         u64             ts;
    7299             : };
    7300             : 
    7301             : struct tracing_log_err {
    7302             :         struct list_head        list;
    7303             :         struct err_info         info;
    7304             :         char                    loc[TRACING_LOG_LOC_MAX]; /* err location */
    7305             :         char                    cmd[MAX_FILTER_STR_VAL]; /* what caused err */
    7306             : };
    7307             : 
    7308             : static DEFINE_MUTEX(tracing_err_log_lock);
    7309             : 
    7310           0 : static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
    7311             : {
    7312           0 :         struct tracing_log_err *err;
    7313             : 
    7314           0 :         if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
    7315           0 :                 err = kzalloc(sizeof(*err), GFP_KERNEL);
    7316           0 :                 if (!err)
    7317           0 :                         err = ERR_PTR(-ENOMEM);
    7318           0 :                 tr->n_err_log_entries++;
    7319             : 
    7320           0 :                 return err;
    7321             :         }
    7322             : 
    7323           0 :         err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
    7324           0 :         list_del(&err->list);
    7325             : 
    7326           0 :         return err;
    7327             : }
    7328             : 
    7329             : /**
    7330             :  * err_pos - find the position of a string within a command for error careting
    7331             :  * @cmd: The tracing command that caused the error
    7332             :  * @str: The string to position the caret at within @cmd
    7333             :  *
    7334             :  * Finds the position of the first occurence of @str within @cmd.  The
    7335             :  * return value can be passed to tracing_log_err() for caret placement
    7336             :  * within @cmd.
    7337             :  *
    7338             :  * Returns the index within @cmd of the first occurence of @str or 0
    7339             :  * if @str was not found.
    7340             :  */
    7341           0 : unsigned int err_pos(char *cmd, const char *str)
    7342             : {
    7343           0 :         char *found;
    7344             : 
    7345           0 :         if (WARN_ON(!strlen(cmd)))
    7346             :                 return 0;
    7347             : 
    7348           0 :         found = strstr(cmd, str);
    7349           0 :         if (found)
    7350           0 :                 return found - cmd;
    7351             : 
    7352             :         return 0;
    7353             : }
    7354             : 
    7355             : /**
    7356             :  * tracing_log_err - write an error to the tracing error log
    7357             :  * @tr: The associated trace array for the error (NULL for top level array)
    7358             :  * @loc: A string describing where the error occurred
    7359             :  * @cmd: The tracing command that caused the error
    7360             :  * @errs: The array of loc-specific static error strings
    7361             :  * @type: The index into errs[], which produces the specific static err string
    7362             :  * @pos: The position the caret should be placed in the cmd
    7363             :  *
    7364             :  * Writes an error into tracing/error_log of the form:
    7365             :  *
    7366             :  * <loc>: error: <text>
    7367             :  *   Command: <cmd>
    7368             :  *              ^
    7369             :  *
    7370             :  * tracing/error_log is a small log file containing the last
    7371             :  * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
    7372             :  * unless there has been a tracing error, and the error log can be
    7373             :  * cleared and have its memory freed by writing the empty string in
    7374             :  * truncation mode to it i.e. echo > tracing/error_log.
    7375             :  *
    7376             :  * NOTE: the @errs array along with the @type param are used to
    7377             :  * produce a static error string - this string is not copied and saved
    7378             :  * when the error is logged - only a pointer to it is saved.  See
    7379             :  * existing callers for examples of how static strings are typically
    7380             :  * defined for use with tracing_log_err().
    7381             :  */
    7382           0 : void tracing_log_err(struct trace_array *tr,
    7383             :                      const char *loc, const char *cmd,
    7384             :                      const char **errs, u8 type, u8 pos)
    7385             : {
    7386           0 :         struct tracing_log_err *err;
    7387             : 
    7388           0 :         if (!tr)
    7389           0 :                 tr = &global_trace;
    7390             : 
    7391           0 :         mutex_lock(&tracing_err_log_lock);
    7392           0 :         err = get_tracing_log_err(tr);
    7393           0 :         if (PTR_ERR(err) == -ENOMEM) {
    7394           0 :                 mutex_unlock(&tracing_err_log_lock);
    7395           0 :                 return;
    7396             :         }
    7397             : 
    7398           0 :         snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
    7399           0 :         snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
    7400             : 
    7401           0 :         err->info.errs = errs;
    7402           0 :         err->info.type = type;
    7403           0 :         err->info.pos = pos;
    7404           0 :         err->info.ts = local_clock();
    7405             : 
    7406           0 :         list_add_tail(&err->list, &tr->err_log);
    7407           0 :         mutex_unlock(&tracing_err_log_lock);
    7408             : }
    7409             : 
    7410           0 : static void clear_tracing_err_log(struct trace_array *tr)
    7411             : {
    7412           0 :         struct tracing_log_err *err, *next;
    7413             : 
    7414           0 :         mutex_lock(&tracing_err_log_lock);
    7415           0 :         list_for_each_entry_safe(err, next, &tr->err_log, list) {
    7416           0 :                 list_del(&err->list);
    7417           0 :                 kfree(err);
    7418             :         }
    7419             : 
    7420           0 :         tr->n_err_log_entries = 0;
    7421           0 :         mutex_unlock(&tracing_err_log_lock);
    7422           0 : }
    7423             : 
    7424           0 : static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
    7425             : {
    7426           0 :         struct trace_array *tr = m->private;
    7427             : 
    7428           0 :         mutex_lock(&tracing_err_log_lock);
    7429             : 
    7430           0 :         return seq_list_start(&tr->err_log, *pos);
    7431             : }
    7432             : 
    7433           0 : static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
    7434             : {
    7435           0 :         struct trace_array *tr = m->private;
    7436             : 
    7437           0 :         return seq_list_next(v, &tr->err_log, pos);
    7438             : }
    7439             : 
    7440           0 : static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
    7441             : {
    7442           0 :         mutex_unlock(&tracing_err_log_lock);
    7443           0 : }
    7444             : 
    7445           0 : static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
    7446             : {
    7447           0 :         u8 i;
    7448             : 
    7449           0 :         for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
    7450           0 :                 seq_putc(m, ' ');
    7451           0 :         for (i = 0; i < pos; i++)
    7452           0 :                 seq_putc(m, ' ');
    7453           0 :         seq_puts(m, "^\n");
    7454           0 : }
    7455             : 
    7456           0 : static int tracing_err_log_seq_show(struct seq_file *m, void *v)
    7457             : {
    7458           0 :         struct tracing_log_err *err = v;
    7459             : 
    7460           0 :         if (err) {
    7461           0 :                 const char *err_text = err->info.errs[err->info.type];
    7462           0 :                 u64 sec = err->info.ts;
    7463           0 :                 u32 nsec;
    7464             : 
    7465           0 :                 nsec = do_div(sec, NSEC_PER_SEC);
    7466           0 :                 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
    7467           0 :                            err->loc, err_text);
    7468           0 :                 seq_printf(m, "%s", err->cmd);
    7469           0 :                 tracing_err_log_show_pos(m, err->info.pos);
    7470             :         }
    7471             : 
    7472           0 :         return 0;
    7473             : }
    7474             : 
    7475             : static const struct seq_operations tracing_err_log_seq_ops = {
    7476             :         .start  = tracing_err_log_seq_start,
    7477             :         .next   = tracing_err_log_seq_next,
    7478             :         .stop   = tracing_err_log_seq_stop,
    7479             :         .show   = tracing_err_log_seq_show
    7480             : };
    7481             : 
    7482           0 : static int tracing_err_log_open(struct inode *inode, struct file *file)
    7483             : {
    7484           0 :         struct trace_array *tr = inode->i_private;
    7485           0 :         int ret = 0;
    7486             : 
    7487           0 :         ret = tracing_check_open_get_tr(tr);
    7488           0 :         if (ret)
    7489             :                 return ret;
    7490             : 
    7491             :         /* If this file was opened for write, then erase contents */
    7492           0 :         if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
    7493           0 :                 clear_tracing_err_log(tr);
    7494             : 
    7495           0 :         if (file->f_mode & FMODE_READ) {
    7496           0 :                 ret = seq_open(file, &tracing_err_log_seq_ops);
    7497           0 :                 if (!ret) {
    7498           0 :                         struct seq_file *m = file->private_data;
    7499           0 :                         m->private = tr;
    7500             :                 } else {
    7501           0 :                         trace_array_put(tr);
    7502             :                 }
    7503             :         }
    7504             :         return ret;
    7505             : }
    7506             : 
    7507           0 : static ssize_t tracing_err_log_write(struct file *file,
    7508             :                                      const char __user *buffer,
    7509             :                                      size_t count, loff_t *ppos)
    7510             : {
    7511           0 :         return count;
    7512             : }
    7513             : 
    7514           0 : static int tracing_err_log_release(struct inode *inode, struct file *file)
    7515             : {
    7516           0 :         struct trace_array *tr = inode->i_private;
    7517             : 
    7518           0 :         trace_array_put(tr);
    7519             : 
    7520           0 :         if (file->f_mode & FMODE_READ)
    7521           0 :                 seq_release(inode, file);
    7522             : 
    7523           0 :         return 0;
    7524             : }
    7525             : 
    7526             : static const struct file_operations tracing_err_log_fops = {
    7527             :         .open           = tracing_err_log_open,
    7528             :         .write          = tracing_err_log_write,
    7529             :         .read           = seq_read,
    7530             :         .llseek         = seq_lseek,
    7531             :         .release        = tracing_err_log_release,
    7532             : };
    7533             : 
    7534           0 : static int tracing_buffers_open(struct inode *inode, struct file *filp)
    7535             : {
    7536           0 :         struct trace_array *tr = inode->i_private;
    7537           0 :         struct ftrace_buffer_info *info;
    7538           0 :         int ret;
    7539             : 
    7540           0 :         ret = tracing_check_open_get_tr(tr);
    7541           0 :         if (ret)
    7542             :                 return ret;
    7543             : 
    7544           0 :         info = kvzalloc(sizeof(*info), GFP_KERNEL);
    7545           0 :         if (!info) {
    7546           0 :                 trace_array_put(tr);
    7547           0 :                 return -ENOMEM;
    7548             :         }
    7549             : 
    7550           0 :         mutex_lock(&trace_types_lock);
    7551             : 
    7552           0 :         info->iter.tr                = tr;
    7553           0 :         info->iter.cpu_file  = tracing_get_cpu(inode);
    7554           0 :         info->iter.trace     = tr->current_trace;
    7555           0 :         info->iter.array_buffer = &tr->array_buffer;
    7556           0 :         info->spare          = NULL;
    7557             :         /* Force reading ring buffer for first read */
    7558           0 :         info->read           = (unsigned int)-1;
    7559             : 
    7560           0 :         filp->private_data = info;
    7561             : 
    7562           0 :         tr->trace_ref++;
    7563             : 
    7564           0 :         mutex_unlock(&trace_types_lock);
    7565             : 
    7566           0 :         ret = nonseekable_open(inode, filp);
    7567           0 :         if (ret < 0)
    7568           0 :                 trace_array_put(tr);
    7569             : 
    7570             :         return ret;
    7571             : }
    7572             : 
    7573             : static __poll_t
    7574           0 : tracing_buffers_poll(struct file *filp, poll_table *poll_table)
    7575             : {
    7576           0 :         struct ftrace_buffer_info *info = filp->private_data;
    7577           0 :         struct trace_iterator *iter = &info->iter;
    7578             : 
    7579           0 :         return trace_poll(iter, filp, poll_table);
    7580             : }
    7581             : 
    7582             : static ssize_t
    7583           0 : tracing_buffers_read(struct file *filp, char __user *ubuf,
    7584             :                      size_t count, loff_t *ppos)
    7585             : {
    7586           0 :         struct ftrace_buffer_info *info = filp->private_data;
    7587           0 :         struct trace_iterator *iter = &info->iter;
    7588           0 :         ssize_t ret = 0;
    7589           0 :         ssize_t size;
    7590             : 
    7591           0 :         if (!count)
    7592             :                 return 0;
    7593             : 
    7594             : #ifdef CONFIG_TRACER_MAX_TRACE
    7595             :         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
    7596             :                 return -EBUSY;
    7597             : #endif
    7598             : 
    7599           0 :         if (!info->spare) {
    7600           0 :                 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
    7601             :                                                           iter->cpu_file);
    7602           0 :                 if (IS_ERR(info->spare)) {
    7603           0 :                         ret = PTR_ERR(info->spare);
    7604           0 :                         info->spare = NULL;
    7605             :                 } else {
    7606           0 :                         info->spare_cpu = iter->cpu_file;
    7607             :                 }
    7608             :         }
    7609           0 :         if (!info->spare)
    7610             :                 return ret;
    7611             : 
    7612             :         /* Do we have previous read data to read? */
    7613           0 :         if (info->read < PAGE_SIZE)
    7614           0 :                 goto read;
    7615             : 
    7616           0 :  again:
    7617           0 :         trace_access_lock(iter->cpu_file);
    7618           0 :         ret = ring_buffer_read_page(iter->array_buffer->buffer,
    7619             :                                     &info->spare,
    7620             :                                     count,
    7621             :                                     iter->cpu_file, 0);
    7622           0 :         trace_access_unlock(iter->cpu_file);
    7623             : 
    7624           0 :         if (ret < 0) {
    7625           0 :                 if (trace_empty(iter)) {
    7626           0 :                         if ((filp->f_flags & O_NONBLOCK))
    7627             :                                 return -EAGAIN;
    7628             : 
    7629           0 :                         ret = wait_on_pipe(iter, 0);
    7630           0 :                         if (ret)
    7631           0 :                                 return ret;
    7632             : 
    7633           0 :                         goto again;
    7634             :                 }
    7635             :                 return 0;
    7636             :         }
    7637             : 
    7638           0 :         info->read = 0;
    7639           0 :  read:
    7640           0 :         size = PAGE_SIZE - info->read;
    7641           0 :         if (size > count)
    7642             :                 size = count;
    7643             : 
    7644           0 :         ret = copy_to_user(ubuf, info->spare + info->read, size);
    7645           0 :         if (ret == size)
    7646             :                 return -EFAULT;
    7647             : 
    7648           0 :         size -= ret;
    7649             : 
    7650           0 :         *ppos += size;
    7651           0 :         info->read += size;
    7652             : 
    7653           0 :         return size;
    7654             : }
    7655             : 
    7656           0 : static int tracing_buffers_release(struct inode *inode, struct file *file)
    7657             : {
    7658           0 :         struct ftrace_buffer_info *info = file->private_data;
    7659           0 :         struct trace_iterator *iter = &info->iter;
    7660             : 
    7661           0 :         mutex_lock(&trace_types_lock);
    7662             : 
    7663           0 :         iter->tr->trace_ref--;
    7664             : 
    7665           0 :         __trace_array_put(iter->tr);
    7666             : 
    7667           0 :         if (info->spare)
    7668           0 :                 ring_buffer_free_read_page(iter->array_buffer->buffer,
    7669           0 :                                            info->spare_cpu, info->spare);
    7670           0 :         kvfree(info);
    7671             : 
    7672           0 :         mutex_unlock(&trace_types_lock);
    7673             : 
    7674           0 :         return 0;
    7675             : }
    7676             : 
    7677             : struct buffer_ref {
    7678             :         struct trace_buffer     *buffer;
    7679             :         void                    *page;
    7680             :         int                     cpu;
    7681             :         refcount_t              refcount;
    7682             : };
    7683             : 
    7684           0 : static void buffer_ref_release(struct buffer_ref *ref)
    7685             : {
    7686           0 :         if (!refcount_dec_and_test(&ref->refcount))
    7687             :                 return;
    7688           0 :         ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
    7689           0 :         kfree(ref);
    7690             : }
    7691             : 
    7692           0 : static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
    7693             :                                     struct pipe_buffer *buf)
    7694             : {
    7695           0 :         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
    7696             : 
    7697           0 :         buffer_ref_release(ref);
    7698           0 :         buf->private = 0;
    7699           0 : }
    7700             : 
    7701           0 : static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
    7702             :                                 struct pipe_buffer *buf)
    7703             : {
    7704           0 :         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
    7705             : 
    7706           0 :         if (refcount_read(&ref->refcount) > INT_MAX/2)
    7707             :                 return false;
    7708             : 
    7709           0 :         refcount_inc(&ref->refcount);
    7710           0 :         return true;
    7711             : }
    7712             : 
    7713             : /* Pipe buffer operations for a buffer. */
    7714             : static const struct pipe_buf_operations buffer_pipe_buf_ops = {
    7715             :         .release                = buffer_pipe_buf_release,
    7716             :         .get                    = buffer_pipe_buf_get,
    7717             : };
    7718             : 
    7719             : /*
    7720             :  * Callback from splice_to_pipe(), if we need to release some pages
    7721             :  * at the end of the spd in case we error'ed out in filling the pipe.
    7722             :  */
    7723           0 : static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
    7724             : {
    7725           0 :         struct buffer_ref *ref =
    7726           0 :                 (struct buffer_ref *)spd->partial[i].private;
    7727             : 
    7728           0 :         buffer_ref_release(ref);
    7729           0 :         spd->partial[i].private = 0;
    7730           0 : }
    7731             : 
    7732             : static ssize_t
    7733           0 : tracing_buffers_splice_read(struct file *file, loff_t *ppos,
    7734             :                             struct pipe_inode_info *pipe, size_t len,
    7735             :                             unsigned int flags)
    7736             : {
    7737           0 :         struct ftrace_buffer_info *info = file->private_data;
    7738           0 :         struct trace_iterator *iter = &info->iter;
    7739           0 :         struct partial_page partial_def[PIPE_DEF_BUFFERS];
    7740           0 :         struct page *pages_def[PIPE_DEF_BUFFERS];
    7741           0 :         struct splice_pipe_desc spd = {
    7742             :                 .pages          = pages_def,
    7743             :                 .partial        = partial_def,
    7744             :                 .nr_pages_max   = PIPE_DEF_BUFFERS,
    7745             :                 .ops            = &buffer_pipe_buf_ops,
    7746             :                 .spd_release    = buffer_spd_release,
    7747             :         };
    7748           0 :         struct buffer_ref *ref;
    7749           0 :         int entries, i;
    7750           0 :         ssize_t ret = 0;
    7751             : 
    7752             : #ifdef CONFIG_TRACER_MAX_TRACE
    7753             :         if (iter->snapshot && iter->tr->current_trace->use_max_tr)
    7754             :                 return -EBUSY;
    7755             : #endif
    7756             : 
    7757           0 :         if (*ppos & (PAGE_SIZE - 1))
    7758             :                 return -EINVAL;
    7759             : 
    7760           0 :         if (len & (PAGE_SIZE - 1)) {
    7761           0 :                 if (len < PAGE_SIZE)
    7762             :                         return -EINVAL;
    7763           0 :                 len &= PAGE_MASK;
    7764             :         }
    7765             : 
    7766           0 :         if (splice_grow_spd(pipe, &spd))
    7767             :                 return -ENOMEM;
    7768             : 
    7769           0 :  again:
    7770           0 :         trace_access_lock(iter->cpu_file);
    7771           0 :         entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
    7772             : 
    7773           0 :         for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
    7774           0 :                 struct page *page;
    7775           0 :                 int r;
    7776             : 
    7777           0 :                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
    7778           0 :                 if (!ref) {
    7779             :                         ret = -ENOMEM;
    7780             :                         break;
    7781             :                 }
    7782             : 
    7783           0 :                 refcount_set(&ref->refcount, 1);
    7784           0 :                 ref->buffer = iter->array_buffer->buffer;
    7785           0 :                 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
    7786           0 :                 if (IS_ERR(ref->page)) {
    7787           0 :                         ret = PTR_ERR(ref->page);
    7788           0 :                         ref->page = NULL;
    7789           0 :                         kfree(ref);
    7790           0 :                         break;
    7791             :                 }
    7792           0 :                 ref->cpu = iter->cpu_file;
    7793             : 
    7794           0 :                 r = ring_buffer_read_page(ref->buffer, &ref->page,
    7795             :                                           len, iter->cpu_file, 1);
    7796           0 :                 if (r < 0) {
    7797           0 :                         ring_buffer_free_read_page(ref->buffer, ref->cpu,
    7798             :                                                    ref->page);
    7799           0 :                         kfree(ref);
    7800           0 :                         break;
    7801             :                 }
    7802             : 
    7803           0 :                 page = virt_to_page(ref->page);
    7804             : 
    7805           0 :                 spd.pages[i] = page;
    7806           0 :                 spd.partial[i].len = PAGE_SIZE;
    7807           0 :                 spd.partial[i].offset = 0;
    7808           0 :                 spd.partial[i].private = (unsigned long)ref;
    7809           0 :                 spd.nr_pages++;
    7810           0 :                 *ppos += PAGE_SIZE;
    7811             : 
    7812           0 :                 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
    7813             :         }
    7814             : 
    7815           0 :         trace_access_unlock(iter->cpu_file);
    7816           0 :         spd.nr_pages = i;
    7817             : 
    7818             :         /* did we read anything? */
    7819           0 :         if (!spd.nr_pages) {
    7820           0 :                 if (ret)
    7821           0 :                         goto out;
    7822             : 
    7823           0 :                 ret = -EAGAIN;
    7824           0 :                 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
    7825           0 :                         goto out;
    7826             : 
    7827           0 :                 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
    7828           0 :                 if (ret)
    7829           0 :                         goto out;
    7830             : 
    7831           0 :                 goto again;
    7832             :         }
    7833             : 
    7834           0 :         ret = splice_to_pipe(pipe, &spd);
    7835           0 : out:
    7836           0 :         splice_shrink_spd(&spd);
    7837             : 
    7838           0 :         return ret;
    7839             : }
    7840             : 
    7841             : static const struct file_operations tracing_buffers_fops = {
    7842             :         .open           = tracing_buffers_open,
    7843             :         .read           = tracing_buffers_read,
    7844             :         .poll           = tracing_buffers_poll,
    7845             :         .release        = tracing_buffers_release,
    7846             :         .splice_read    = tracing_buffers_splice_read,
    7847             :         .llseek         = no_llseek,
    7848             : };
    7849             : 
    7850             : static ssize_t
    7851           0 : tracing_stats_read(struct file *filp, char __user *ubuf,
    7852             :                    size_t count, loff_t *ppos)
    7853             : {
    7854           0 :         struct inode *inode = file_inode(filp);
    7855           0 :         struct trace_array *tr = inode->i_private;
    7856           0 :         struct array_buffer *trace_buf = &tr->array_buffer;
    7857           0 :         int cpu = tracing_get_cpu(inode);
    7858           0 :         struct trace_seq *s;
    7859           0 :         unsigned long cnt;
    7860           0 :         unsigned long long t;
    7861           0 :         unsigned long usec_rem;
    7862             : 
    7863           0 :         s = kmalloc(sizeof(*s), GFP_KERNEL);
    7864           0 :         if (!s)
    7865             :                 return -ENOMEM;
    7866             : 
    7867           0 :         trace_seq_init(s);
    7868             : 
    7869           0 :         cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
    7870           0 :         trace_seq_printf(s, "entries: %ld\n", cnt);
    7871             : 
    7872           0 :         cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
    7873           0 :         trace_seq_printf(s, "overrun: %ld\n", cnt);
    7874             : 
    7875           0 :         cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
    7876           0 :         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
    7877             : 
    7878           0 :         cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
    7879           0 :         trace_seq_printf(s, "bytes: %ld\n", cnt);
    7880             : 
    7881           0 :         if (trace_clocks[tr->clock_id].in_ns) {
    7882             :                 /* local or global for trace_clock */
    7883           0 :                 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
    7884           0 :                 usec_rem = do_div(t, USEC_PER_SEC);
    7885           0 :                 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
    7886             :                                                                 t, usec_rem);
    7887             : 
    7888           0 :                 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
    7889           0 :                 usec_rem = do_div(t, USEC_PER_SEC);
    7890           0 :                 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
    7891             :         } else {
    7892             :                 /* counter or tsc mode for trace_clock */
    7893           0 :                 trace_seq_printf(s, "oldest event ts: %llu\n",
    7894             :                                 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
    7895             : 
    7896           0 :                 trace_seq_printf(s, "now ts: %llu\n",
    7897             :                                 ring_buffer_time_stamp(trace_buf->buffer, cpu));
    7898             :         }
    7899             : 
    7900           0 :         cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
    7901           0 :         trace_seq_printf(s, "dropped events: %ld\n", cnt);
    7902             : 
    7903           0 :         cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
    7904           0 :         trace_seq_printf(s, "read events: %ld\n", cnt);
    7905             : 
    7906           0 :         count = simple_read_from_buffer(ubuf, count, ppos,
    7907           0 :                                         s->buffer, trace_seq_used(s));
    7908             : 
    7909           0 :         kfree(s);
    7910             : 
    7911           0 :         return count;
    7912             : }
    7913             : 
    7914             : static const struct file_operations tracing_stats_fops = {
    7915             :         .open           = tracing_open_generic_tr,
    7916             :         .read           = tracing_stats_read,
    7917             :         .llseek         = generic_file_llseek,
    7918             :         .release        = tracing_release_generic_tr,
    7919             : };
    7920             : 
    7921             : #ifdef CONFIG_DYNAMIC_FTRACE
    7922             : 
    7923             : static ssize_t
    7924             : tracing_read_dyn_info(struct file *filp, char __user *ubuf,
    7925             :                   size_t cnt, loff_t *ppos)
    7926             : {
    7927             :         ssize_t ret;
    7928             :         char *buf;
    7929             :         int r;
    7930             : 
    7931             :         /* 256 should be plenty to hold the amount needed */
    7932             :         buf = kmalloc(256, GFP_KERNEL);
    7933             :         if (!buf)
    7934             :                 return -ENOMEM;
    7935             : 
    7936             :         r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
    7937             :                       ftrace_update_tot_cnt,
    7938             :                       ftrace_number_of_pages,
    7939             :                       ftrace_number_of_groups);
    7940             : 
    7941             :         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    7942             :         kfree(buf);
    7943             :         return ret;
    7944             : }
    7945             : 
    7946             : static const struct file_operations tracing_dyn_info_fops = {
    7947             :         .open           = tracing_open_generic,
    7948             :         .read           = tracing_read_dyn_info,
    7949             :         .llseek         = generic_file_llseek,
    7950             : };
    7951             : #endif /* CONFIG_DYNAMIC_FTRACE */
    7952             : 
    7953             : #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
    7954             : static void
    7955             : ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
    7956             :                 struct trace_array *tr, struct ftrace_probe_ops *ops,
    7957             :                 void *data)
    7958             : {
    7959             :         tracing_snapshot_instance(tr);
    7960             : }
    7961             : 
    7962             : static void
    7963             : ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
    7964             :                       struct trace_array *tr, struct ftrace_probe_ops *ops,
    7965             :                       void *data)
    7966             : {
    7967             :         struct ftrace_func_mapper *mapper = data;
    7968             :         long *count = NULL;
    7969             : 
    7970             :         if (mapper)
    7971             :                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
    7972             : 
    7973             :         if (count) {
    7974             : 
    7975             :                 if (*count <= 0)
    7976             :                         return;
    7977             : 
    7978             :                 (*count)--;
    7979             :         }
    7980             : 
    7981             :         tracing_snapshot_instance(tr);
    7982             : }
    7983             : 
    7984             : static int
    7985             : ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
    7986             :                       struct ftrace_probe_ops *ops, void *data)
    7987             : {
    7988             :         struct ftrace_func_mapper *mapper = data;
    7989             :         long *count = NULL;
    7990             : 
    7991             :         seq_printf(m, "%ps:", (void *)ip);
    7992             : 
    7993             :         seq_puts(m, "snapshot");
    7994             : 
    7995             :         if (mapper)
    7996             :                 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
    7997             : 
    7998             :         if (count)
    7999             :                 seq_printf(m, ":count=%ld\n", *count);
    8000             :         else
    8001             :                 seq_puts(m, ":unlimited\n");
    8002             : 
    8003             :         return 0;
    8004             : }
    8005             : 
    8006             : static int
    8007             : ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
    8008             :                      unsigned long ip, void *init_data, void **data)
    8009             : {
    8010             :         struct ftrace_func_mapper *mapper = *data;
    8011             : 
    8012             :         if (!mapper) {
    8013             :                 mapper = allocate_ftrace_func_mapper();
    8014             :                 if (!mapper)
    8015             :                         return -ENOMEM;
    8016             :                 *data = mapper;
    8017             :         }
    8018             : 
    8019             :         return ftrace_func_mapper_add_ip(mapper, ip, init_data);
    8020             : }
    8021             : 
    8022             : static void
    8023             : ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
    8024             :                      unsigned long ip, void *data)
    8025             : {
    8026             :         struct ftrace_func_mapper *mapper = data;
    8027             : 
    8028             :         if (!ip) {
    8029             :                 if (!mapper)
    8030             :                         return;
    8031             :                 free_ftrace_func_mapper(mapper, NULL);
    8032             :                 return;
    8033             :         }
    8034             : 
    8035             :         ftrace_func_mapper_remove_ip(mapper, ip);
    8036             : }
    8037             : 
    8038             : static struct ftrace_probe_ops snapshot_probe_ops = {
    8039             :         .func                   = ftrace_snapshot,
    8040             :         .print                  = ftrace_snapshot_print,
    8041             : };
    8042             : 
    8043             : static struct ftrace_probe_ops snapshot_count_probe_ops = {
    8044             :         .func                   = ftrace_count_snapshot,
    8045             :         .print                  = ftrace_snapshot_print,
    8046             :         .init                   = ftrace_snapshot_init,
    8047             :         .free                   = ftrace_snapshot_free,
    8048             : };
    8049             : 
    8050             : static int
    8051             : ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
    8052             :                                char *glob, char *cmd, char *param, int enable)
    8053             : {
    8054             :         struct ftrace_probe_ops *ops;
    8055             :         void *count = (void *)-1;
    8056             :         char *number;
    8057             :         int ret;
    8058             : 
    8059             :         if (!tr)
    8060             :                 return -ENODEV;
    8061             : 
    8062             :         /* hash funcs only work with set_ftrace_filter */
    8063             :         if (!enable)
    8064             :                 return -EINVAL;
    8065             : 
    8066             :         ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
    8067             : 
    8068             :         if (glob[0] == '!')
    8069             :                 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
    8070             : 
    8071             :         if (!param)
    8072             :                 goto out_reg;
    8073             : 
    8074             :         number = strsep(&param, ":");
    8075             : 
    8076             :         if (!strlen(number))
    8077             :                 goto out_reg;
    8078             : 
    8079             :         /*
    8080             :          * We use the callback data field (which is a pointer)
    8081             :          * as our counter.
    8082             :          */
    8083             :         ret = kstrtoul(number, 0, (unsigned long *)&count);
    8084             :         if (ret)
    8085             :                 return ret;
    8086             : 
    8087             :  out_reg:
    8088             :         ret = tracing_alloc_snapshot_instance(tr);
    8089             :         if (ret < 0)
    8090             :                 goto out;
    8091             : 
    8092             :         ret = register_ftrace_function_probe(glob, tr, ops, count);
    8093             : 
    8094             :  out:
    8095             :         return ret < 0 ? ret : 0;
    8096             : }
    8097             : 
    8098             : static struct ftrace_func_command ftrace_snapshot_cmd = {
    8099             :         .name                   = "snapshot",
    8100             :         .func                   = ftrace_trace_snapshot_callback,
    8101             : };
    8102             : 
    8103             : static __init int register_snapshot_cmd(void)
    8104             : {
    8105             :         return register_ftrace_command(&ftrace_snapshot_cmd);
    8106             : }
    8107             : #else
    8108           1 : static inline __init int register_snapshot_cmd(void) { return 0; }
    8109             : #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
    8110             : 
    8111           2 : static struct dentry *tracing_get_dentry(struct trace_array *tr)
    8112             : {
    8113           2 :         if (WARN_ON(!tr->dir))
    8114           2 :                 return ERR_PTR(-ENODEV);
    8115             : 
    8116             :         /* Top directory uses NULL as the parent */
    8117           2 :         if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
    8118           2 :                 return NULL;
    8119             : 
    8120             :         /* All sub buffers have a descriptor */
    8121             :         return tr->dir;
    8122             : }
    8123             : 
    8124           4 : static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
    8125             : {
    8126           4 :         struct dentry *d_tracer;
    8127             : 
    8128           4 :         if (tr->percpu_dir)
    8129             :                 return tr->percpu_dir;
    8130             : 
    8131           1 :         d_tracer = tracing_get_dentry(tr);
    8132           1 :         if (IS_ERR(d_tracer))
    8133             :                 return NULL;
    8134             : 
    8135           1 :         tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
    8136             : 
    8137           1 :         MEM_FAIL(!tr->percpu_dir,
    8138             :                   "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
    8139             : 
    8140           1 :         return tr->percpu_dir;
    8141             : }
    8142             : 
    8143             : static struct dentry *
    8144          20 : trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
    8145             :                       void *data, long cpu, const struct file_operations *fops)
    8146             : {
    8147          20 :         struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
    8148             : 
    8149          20 :         if (ret) /* See tracing_get_cpu() */
    8150          20 :                 d_inode(ret)->i_cdev = (void *)(cpu + 1);
    8151          20 :         return ret;
    8152             : }
    8153             : 
    8154             : static void
    8155           4 : tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
    8156             : {
    8157           4 :         struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
    8158           4 :         struct dentry *d_cpu;
    8159           4 :         char cpu_dir[30]; /* 30 characters should be more than enough */
    8160             : 
    8161           4 :         if (!d_percpu)
    8162           0 :                 return;
    8163             : 
    8164           4 :         snprintf(cpu_dir, 30, "cpu%ld", cpu);
    8165           4 :         d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
    8166           4 :         if (!d_cpu) {
    8167           0 :                 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
    8168           0 :                 return;
    8169             :         }
    8170             : 
    8171             :         /* per cpu trace_pipe */
    8172           4 :         trace_create_cpu_file("trace_pipe", 0444, d_cpu,
    8173             :                                 tr, cpu, &tracing_pipe_fops);
    8174             : 
    8175             :         /* per cpu trace */
    8176           4 :         trace_create_cpu_file("trace", 0644, d_cpu,
    8177             :                                 tr, cpu, &tracing_fops);
    8178             : 
    8179           4 :         trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
    8180             :                                 tr, cpu, &tracing_buffers_fops);
    8181             : 
    8182           4 :         trace_create_cpu_file("stats", 0444, d_cpu,
    8183             :                                 tr, cpu, &tracing_stats_fops);
    8184             : 
    8185           4 :         trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
    8186             :                                 tr, cpu, &tracing_entries_fops);
    8187             : 
    8188             : #ifdef CONFIG_TRACER_SNAPSHOT
    8189             :         trace_create_cpu_file("snapshot", 0644, d_cpu,
    8190             :                                 tr, cpu, &snapshot_fops);
    8191             : 
    8192             :         trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
    8193             :                                 tr, cpu, &snapshot_raw_fops);
    8194             : #endif
    8195             : }
    8196             : 
    8197             : #ifdef CONFIG_FTRACE_SELFTEST
    8198             : /* Let selftest have access to static functions in this file */
    8199             : #include "trace_selftest.c"
    8200             : #endif
    8201             : 
    8202             : static ssize_t
    8203           0 : trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
    8204             :                         loff_t *ppos)
    8205             : {
    8206           0 :         struct trace_option_dentry *topt = filp->private_data;
    8207           0 :         char *buf;
    8208             : 
    8209           0 :         if (topt->flags->val & topt->opt->bit)
    8210             :                 buf = "1\n";
    8211             :         else
    8212           0 :                 buf = "0\n";
    8213             : 
    8214           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
    8215             : }
    8216             : 
    8217             : static ssize_t
    8218           0 : trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
    8219             :                          loff_t *ppos)
    8220             : {
    8221           0 :         struct trace_option_dentry *topt = filp->private_data;
    8222           0 :         unsigned long val;
    8223           0 :         int ret;
    8224             : 
    8225           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    8226           0 :         if (ret)
    8227           0 :                 return ret;
    8228             : 
    8229           0 :         if (val != 0 && val != 1)
    8230             :                 return -EINVAL;
    8231             : 
    8232           0 :         if (!!(topt->flags->val & topt->opt->bit) != val) {
    8233           0 :                 mutex_lock(&trace_types_lock);
    8234           0 :                 ret = __set_tracer_option(topt->tr, topt->flags,
    8235             :                                           topt->opt, !val);
    8236           0 :                 mutex_unlock(&trace_types_lock);
    8237           0 :                 if (ret)
    8238           0 :                         return ret;
    8239             :         }
    8240             : 
    8241           0 :         *ppos += cnt;
    8242             : 
    8243           0 :         return cnt;
    8244             : }
    8245             : 
    8246             : 
    8247             : static const struct file_operations trace_options_fops = {
    8248             :         .open = tracing_open_generic,
    8249             :         .read = trace_options_read,
    8250             :         .write = trace_options_write,
    8251             :         .llseek = generic_file_llseek,
    8252             : };
    8253             : 
    8254             : /*
    8255             :  * In order to pass in both the trace_array descriptor as well as the index
    8256             :  * to the flag that the trace option file represents, the trace_array
    8257             :  * has a character array of trace_flags_index[], which holds the index
    8258             :  * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
    8259             :  * The address of this character array is passed to the flag option file
    8260             :  * read/write callbacks.
    8261             :  *
    8262             :  * In order to extract both the index and the trace_array descriptor,
    8263             :  * get_tr_index() uses the following algorithm.
    8264             :  *
    8265             :  *   idx = *ptr;
    8266             :  *
    8267             :  * As the pointer itself contains the address of the index (remember
    8268             :  * index[1] == 1).
    8269             :  *
    8270             :  * Then to get the trace_array descriptor, by subtracting that index
    8271             :  * from the ptr, we get to the start of the index itself.
    8272             :  *
    8273             :  *   ptr - idx == &index[0]
    8274             :  *
    8275             :  * Then a simple container_of() from that pointer gets us to the
    8276             :  * trace_array descriptor.
    8277             :  */
    8278           0 : static void get_tr_index(void *data, struct trace_array **ptr,
    8279             :                          unsigned int *pindex)
    8280             : {
    8281           0 :         *pindex = *(unsigned char *)data;
    8282             : 
    8283           0 :         *ptr = container_of(data - *pindex, struct trace_array,
    8284             :                             trace_flags_index);
    8285             : }
    8286             : 
    8287             : static ssize_t
    8288           0 : trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
    8289             :                         loff_t *ppos)
    8290             : {
    8291           0 :         void *tr_index = filp->private_data;
    8292           0 :         struct trace_array *tr;
    8293           0 :         unsigned int index;
    8294           0 :         char *buf;
    8295             : 
    8296           0 :         get_tr_index(tr_index, &tr, &index);
    8297             : 
    8298           0 :         if (tr->trace_flags & (1 << index))
    8299             :                 buf = "1\n";
    8300             :         else
    8301           0 :                 buf = "0\n";
    8302             : 
    8303           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
    8304             : }
    8305             : 
    8306             : static ssize_t
    8307           0 : trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
    8308             :                          loff_t *ppos)
    8309             : {
    8310           0 :         void *tr_index = filp->private_data;
    8311           0 :         struct trace_array *tr;
    8312           0 :         unsigned int index;
    8313           0 :         unsigned long val;
    8314           0 :         int ret;
    8315             : 
    8316           0 :         get_tr_index(tr_index, &tr, &index);
    8317             : 
    8318           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    8319           0 :         if (ret)
    8320           0 :                 return ret;
    8321             : 
    8322           0 :         if (val != 0 && val != 1)
    8323             :                 return -EINVAL;
    8324             : 
    8325           0 :         mutex_lock(&event_mutex);
    8326           0 :         mutex_lock(&trace_types_lock);
    8327           0 :         ret = set_tracer_flag(tr, 1 << index, val);
    8328           0 :         mutex_unlock(&trace_types_lock);
    8329           0 :         mutex_unlock(&event_mutex);
    8330             : 
    8331           0 :         if (ret < 0)
    8332           0 :                 return ret;
    8333             : 
    8334           0 :         *ppos += cnt;
    8335             : 
    8336           0 :         return cnt;
    8337             : }
    8338             : 
    8339             : static const struct file_operations trace_options_core_fops = {
    8340             :         .open = tracing_open_generic,
    8341             :         .read = trace_options_core_read,
    8342             :         .write = trace_options_core_write,
    8343             :         .llseek = generic_file_llseek,
    8344             : };
    8345             : 
    8346        2781 : struct dentry *trace_create_file(const char *name,
    8347             :                                  umode_t mode,
    8348             :                                  struct dentry *parent,
    8349             :                                  void *data,
    8350             :                                  const struct file_operations *fops)
    8351             : {
    8352        2781 :         struct dentry *ret;
    8353             : 
    8354        2781 :         ret = tracefs_create_file(name, mode, parent, data, fops);
    8355        2781 :         if (!ret)
    8356           0 :                 pr_warn("Could not create tracefs '%s' entry\n", name);
    8357             : 
    8358        2781 :         return ret;
    8359             : }
    8360             : 
    8361             : 
    8362          28 : static struct dentry *trace_options_init_dentry(struct trace_array *tr)
    8363             : {
    8364          28 :         struct dentry *d_tracer;
    8365             : 
    8366          28 :         if (tr->options)
    8367             :                 return tr->options;
    8368             : 
    8369           1 :         d_tracer = tracing_get_dentry(tr);
    8370           1 :         if (IS_ERR(d_tracer))
    8371             :                 return NULL;
    8372             : 
    8373           1 :         tr->options = tracefs_create_dir("options", d_tracer);
    8374           1 :         if (!tr->options) {
    8375           0 :                 pr_warn("Could not create tracefs directory 'options'\n");
    8376           0 :                 return NULL;
    8377             :         }
    8378             : 
    8379             :         return tr->options;
    8380             : }
    8381             : 
    8382             : static void
    8383           2 : create_trace_option_file(struct trace_array *tr,
    8384             :                          struct trace_option_dentry *topt,
    8385             :                          struct tracer_flags *flags,
    8386             :                          struct tracer_opt *opt)
    8387             : {
    8388           2 :         struct dentry *t_options;
    8389             : 
    8390           2 :         t_options = trace_options_init_dentry(tr);
    8391           2 :         if (!t_options)
    8392             :                 return;
    8393             : 
    8394           2 :         topt->flags = flags;
    8395           2 :         topt->opt = opt;
    8396           2 :         topt->tr = tr;
    8397             : 
    8398           2 :         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
    8399             :                                     &trace_options_fops);
    8400             : 
    8401             : }
    8402             : 
    8403             : static void
    8404           1 : create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
    8405             : {
    8406           1 :         struct trace_option_dentry *topts;
    8407           1 :         struct trace_options *tr_topts;
    8408           1 :         struct tracer_flags *flags;
    8409           1 :         struct tracer_opt *opts;
    8410           1 :         int cnt;
    8411           1 :         int i;
    8412             : 
    8413           1 :         if (!tracer)
    8414             :                 return;
    8415             : 
    8416           1 :         flags = tracer->flags;
    8417             : 
    8418           1 :         if (!flags || !flags->opts)
    8419             :                 return;
    8420             : 
    8421             :         /*
    8422             :          * If this is an instance, only create flags for tracers
    8423             :          * the instance may have.
    8424             :          */
    8425           1 :         if (!trace_ok_for_array(tracer, tr))
    8426             :                 return;
    8427             : 
    8428           1 :         for (i = 0; i < tr->nr_topts; i++) {
    8429             :                 /* Make sure there's no duplicate flags. */
    8430           0 :                 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
    8431             :                         return;
    8432             :         }
    8433             : 
    8434           3 :         opts = flags->opts;
    8435             : 
    8436           3 :         for (cnt = 0; opts[cnt].name; cnt++)
    8437           2 :                 ;
    8438             : 
    8439           1 :         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
    8440           1 :         if (!topts)
    8441             :                 return;
    8442             : 
    8443           1 :         tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
    8444             :                             GFP_KERNEL);
    8445           1 :         if (!tr_topts) {
    8446           0 :                 kfree(topts);
    8447           0 :                 return;
    8448             :         }
    8449             : 
    8450           1 :         tr->topts = tr_topts;
    8451           1 :         tr->topts[tr->nr_topts].tracer = tracer;
    8452           1 :         tr->topts[tr->nr_topts].topts = topts;
    8453           1 :         tr->nr_topts++;
    8454             : 
    8455           3 :         for (cnt = 0; opts[cnt].name; cnt++) {
    8456           2 :                 create_trace_option_file(tr, &topts[cnt], flags,
    8457             :                                          &opts[cnt]);
    8458           2 :                 MEM_FAIL(topts[cnt].entry == NULL,
    8459             :                           "Failed to create trace option: %s",
    8460             :                           opts[cnt].name);
    8461             :         }
    8462             : }
    8463             : 
    8464             : static struct dentry *
    8465          25 : create_trace_option_core_file(struct trace_array *tr,
    8466             :                               const char *option, long index)
    8467             : {
    8468          25 :         struct dentry *t_options;
    8469             : 
    8470          25 :         t_options = trace_options_init_dentry(tr);
    8471          25 :         if (!t_options)
    8472             :                 return NULL;
    8473             : 
    8474          25 :         return trace_create_file(option, 0644, t_options,
    8475          25 :                                  (void *)&tr->trace_flags_index[index],
    8476             :                                  &trace_options_core_fops);
    8477             : }
    8478             : 
    8479           1 : static void create_trace_options_dir(struct trace_array *tr)
    8480             : {
    8481           1 :         struct dentry *t_options;
    8482           1 :         bool top_level = tr == &global_trace;
    8483           1 :         int i;
    8484             : 
    8485           1 :         t_options = trace_options_init_dentry(tr);
    8486           1 :         if (!t_options)
    8487             :                 return;
    8488             : 
    8489          26 :         for (i = 0; trace_options[i]; i++) {
    8490          25 :                 if (top_level ||
    8491           0 :                     !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
    8492          25 :                         create_trace_option_core_file(tr, trace_options[i], i);
    8493             :         }
    8494             : }
    8495             : 
    8496             : static ssize_t
    8497           0 : rb_simple_read(struct file *filp, char __user *ubuf,
    8498             :                size_t cnt, loff_t *ppos)
    8499             : {
    8500           0 :         struct trace_array *tr = filp->private_data;
    8501           0 :         char buf[64];
    8502           0 :         int r;
    8503             : 
    8504           0 :         r = tracer_tracing_is_on(tr);
    8505           0 :         r = sprintf(buf, "%d\n", r);
    8506             : 
    8507           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    8508             : }
    8509             : 
    8510             : static ssize_t
    8511           0 : rb_simple_write(struct file *filp, const char __user *ubuf,
    8512             :                 size_t cnt, loff_t *ppos)
    8513             : {
    8514           0 :         struct trace_array *tr = filp->private_data;
    8515           0 :         struct trace_buffer *buffer = tr->array_buffer.buffer;
    8516           0 :         unsigned long val;
    8517           0 :         int ret;
    8518             : 
    8519           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    8520           0 :         if (ret)
    8521           0 :                 return ret;
    8522             : 
    8523           0 :         if (buffer) {
    8524           0 :                 mutex_lock(&trace_types_lock);
    8525           0 :                 if (!!val == tracer_tracing_is_on(tr)) {
    8526           0 :                         val = 0; /* do nothing */
    8527           0 :                 } else if (val) {
    8528           0 :                         tracer_tracing_on(tr);
    8529           0 :                         if (tr->current_trace->start)
    8530           0 :                                 tr->current_trace->start(tr);
    8531             :                 } else {
    8532           0 :                         tracer_tracing_off(tr);
    8533           0 :                         if (tr->current_trace->stop)
    8534           0 :                                 tr->current_trace->stop(tr);
    8535             :                 }
    8536           0 :                 mutex_unlock(&trace_types_lock);
    8537             :         }
    8538             : 
    8539           0 :         (*ppos)++;
    8540             : 
    8541           0 :         return cnt;
    8542             : }
    8543             : 
    8544             : static const struct file_operations rb_simple_fops = {
    8545             :         .open           = tracing_open_generic_tr,
    8546             :         .read           = rb_simple_read,
    8547             :         .write          = rb_simple_write,
    8548             :         .release        = tracing_release_generic_tr,
    8549             :         .llseek         = default_llseek,
    8550             : };
    8551             : 
    8552             : static ssize_t
    8553           0 : buffer_percent_read(struct file *filp, char __user *ubuf,
    8554             :                     size_t cnt, loff_t *ppos)
    8555             : {
    8556           0 :         struct trace_array *tr = filp->private_data;
    8557           0 :         char buf[64];
    8558           0 :         int r;
    8559             : 
    8560           0 :         r = tr->buffer_percent;
    8561           0 :         r = sprintf(buf, "%d\n", r);
    8562             : 
    8563           0 :         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
    8564             : }
    8565             : 
    8566             : static ssize_t
    8567           0 : buffer_percent_write(struct file *filp, const char __user *ubuf,
    8568             :                      size_t cnt, loff_t *ppos)
    8569             : {
    8570           0 :         struct trace_array *tr = filp->private_data;
    8571           0 :         unsigned long val;
    8572           0 :         int ret;
    8573             : 
    8574           0 :         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
    8575           0 :         if (ret)
    8576           0 :                 return ret;
    8577             : 
    8578           0 :         if (val > 100)
    8579             :                 return -EINVAL;
    8580             : 
    8581           0 :         if (!val)
    8582           0 :                 val = 1;
    8583             : 
    8584           0 :         tr->buffer_percent = val;
    8585             : 
    8586           0 :         (*ppos)++;
    8587             : 
    8588           0 :         return cnt;
    8589             : }
    8590             : 
    8591             : static const struct file_operations buffer_percent_fops = {
    8592             :         .open           = tracing_open_generic_tr,
    8593             :         .read           = buffer_percent_read,
    8594             :         .write          = buffer_percent_write,
    8595             :         .release        = tracing_release_generic_tr,
    8596             :         .llseek         = default_llseek,
    8597             : };
    8598             : 
    8599             : static struct dentry *trace_instance_dir;
    8600             : 
    8601             : static void
    8602             : init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
    8603             : 
    8604             : static int
    8605           1 : allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
    8606             : {
    8607           1 :         enum ring_buffer_flags rb_flags;
    8608             : 
    8609           1 :         rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
    8610             : 
    8611           1 :         buf->tr = tr;
    8612             : 
    8613           1 :         buf->buffer = ring_buffer_alloc(size, rb_flags);
    8614           1 :         if (!buf->buffer)
    8615             :                 return -ENOMEM;
    8616             : 
    8617           1 :         buf->data = alloc_percpu(struct trace_array_cpu);
    8618           1 :         if (!buf->data) {
    8619           0 :                 ring_buffer_free(buf->buffer);
    8620           0 :                 buf->buffer = NULL;
    8621           0 :                 return -ENOMEM;
    8622             :         }
    8623             : 
    8624             :         /* Allocate the first page for all buffers */
    8625           1 :         set_buffer_entries(&tr->array_buffer,
    8626             :                            ring_buffer_size(tr->array_buffer.buffer, 0));
    8627             : 
    8628           1 :         return 0;
    8629             : }
    8630             : 
    8631           1 : static int allocate_trace_buffers(struct trace_array *tr, int size)
    8632             : {
    8633           1 :         int ret;
    8634             : 
    8635           2 :         ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
    8636           1 :         if (ret)
    8637           0 :                 return ret;
    8638             : 
    8639             : #ifdef CONFIG_TRACER_MAX_TRACE
    8640             :         ret = allocate_trace_buffer(tr, &tr->max_buffer,
    8641             :                                     allocate_snapshot ? size : 1);
    8642             :         if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
    8643             :                 ring_buffer_free(tr->array_buffer.buffer);
    8644             :                 tr->array_buffer.buffer = NULL;
    8645             :                 free_percpu(tr->array_buffer.data);
    8646             :                 tr->array_buffer.data = NULL;
    8647             :                 return -ENOMEM;
    8648             :         }
    8649             :         tr->allocated_snapshot = allocate_snapshot;
    8650             : 
    8651             :         /*
    8652             :          * Only the top level trace array gets its snapshot allocated
    8653             :          * from the kernel command line.
    8654             :          */
    8655             :         allocate_snapshot = false;
    8656             : #endif
    8657             : 
    8658             :         return 0;
    8659             : }
    8660             : 
    8661           0 : static void free_trace_buffer(struct array_buffer *buf)
    8662             : {
    8663           0 :         if (buf->buffer) {
    8664           0 :                 ring_buffer_free(buf->buffer);
    8665           0 :                 buf->buffer = NULL;
    8666           0 :                 free_percpu(buf->data);
    8667           0 :                 buf->data = NULL;
    8668             :         }
    8669           0 : }
    8670             : 
    8671           0 : static void free_trace_buffers(struct trace_array *tr)
    8672             : {
    8673           0 :         if (!tr)
    8674             :                 return;
    8675             : 
    8676           0 :         free_trace_buffer(&tr->array_buffer);
    8677             : 
    8678             : #ifdef CONFIG_TRACER_MAX_TRACE
    8679             :         free_trace_buffer(&tr->max_buffer);
    8680             : #endif
    8681             : }
    8682             : 
    8683             : static void init_trace_flags_index(struct trace_array *tr)
    8684             : {
    8685             :         int i;
    8686             : 
    8687             :         /* Used by the trace options files */
    8688          65 :         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
    8689          32 :                 tr->trace_flags_index[i] = i;
    8690             : }
    8691             : 
    8692           1 : static void __update_tracer_options(struct trace_array *tr)
    8693             : {
    8694           1 :         struct tracer *t;
    8695             : 
    8696           2 :         for (t = trace_types; t; t = t->next)
    8697           2 :                 add_tracer_options(tr, t);
    8698           1 : }
    8699             : 
    8700           1 : static void update_tracer_options(struct trace_array *tr)
    8701             : {
    8702           1 :         mutex_lock(&trace_types_lock);
    8703           1 :         __update_tracer_options(tr);
    8704           1 :         mutex_unlock(&trace_types_lock);
    8705           1 : }
    8706             : 
    8707             : /* Must have trace_types_lock held */
    8708           0 : struct trace_array *trace_array_find(const char *instance)
    8709             : {
    8710           0 :         struct trace_array *tr, *found = NULL;
    8711             : 
    8712           0 :         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
    8713           0 :                 if (tr->name && strcmp(tr->name, instance) == 0) {
    8714             :                         found = tr;
    8715             :                         break;
    8716             :                 }
    8717             :         }
    8718             : 
    8719           0 :         return found;
    8720             : }
    8721             : 
    8722           0 : struct trace_array *trace_array_find_get(const char *instance)
    8723             : {
    8724           0 :         struct trace_array *tr;
    8725             : 
    8726           0 :         mutex_lock(&trace_types_lock);
    8727           0 :         tr = trace_array_find(instance);
    8728           0 :         if (tr)
    8729           0 :                 tr->ref++;
    8730           0 :         mutex_unlock(&trace_types_lock);
    8731             : 
    8732           0 :         return tr;
    8733             : }
    8734             : 
    8735           0 : static int trace_array_create_dir(struct trace_array *tr)
    8736             : {
    8737           0 :         int ret;
    8738             : 
    8739           0 :         tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
    8740           0 :         if (!tr->dir)
    8741             :                 return -EINVAL;
    8742             : 
    8743           0 :         ret = event_trace_add_tracer(tr->dir, tr);
    8744           0 :         if (ret)
    8745           0 :                 tracefs_remove(tr->dir);
    8746             : 
    8747           0 :         init_tracer_tracefs(tr, tr->dir);
    8748           0 :         __update_tracer_options(tr);
    8749             : 
    8750           0 :         return ret;
    8751             : }
    8752             : 
    8753           0 : static struct trace_array *trace_array_create(const char *name)
    8754             : {
    8755           0 :         struct trace_array *tr;
    8756           0 :         int ret;
    8757             : 
    8758           0 :         ret = -ENOMEM;
    8759           0 :         tr = kzalloc(sizeof(*tr), GFP_KERNEL);
    8760           0 :         if (!tr)
    8761           0 :                 return ERR_PTR(ret);
    8762             : 
    8763           0 :         tr->name = kstrdup(name, GFP_KERNEL);
    8764           0 :         if (!tr->name)
    8765           0 :                 goto out_free_tr;
    8766             : 
    8767           0 :         if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
    8768             :                 goto out_free_tr;
    8769             : 
    8770           0 :         tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
    8771             : 
    8772           0 :         cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
    8773             : 
    8774           0 :         raw_spin_lock_init(&tr->start_lock);
    8775             : 
    8776           0 :         tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
    8777             : 
    8778           0 :         tr->current_trace = &nop_trace;
    8779             : 
    8780           0 :         INIT_LIST_HEAD(&tr->systems);
    8781           0 :         INIT_LIST_HEAD(&tr->events);
    8782           0 :         INIT_LIST_HEAD(&tr->hist_vars);
    8783           0 :         INIT_LIST_HEAD(&tr->err_log);
    8784             : 
    8785           0 :         if (allocate_trace_buffers(tr, trace_buf_size) < 0)
    8786           0 :                 goto out_free_tr;
    8787             : 
    8788           0 :         if (ftrace_allocate_ftrace_ops(tr) < 0)
    8789             :                 goto out_free_tr;
    8790             : 
    8791           0 :         ftrace_init_trace_array(tr);
    8792             : 
    8793           0 :         init_trace_flags_index(tr);
    8794             : 
    8795           0 :         if (trace_instance_dir) {
    8796           0 :                 ret = trace_array_create_dir(tr);
    8797           0 :                 if (ret)
    8798           0 :                         goto out_free_tr;
    8799             :         } else
    8800           0 :                 __trace_early_add_events(tr);
    8801             : 
    8802           0 :         list_add(&tr->list, &ftrace_trace_arrays);
    8803             : 
    8804           0 :         tr->ref++;
    8805             : 
    8806           0 :         return tr;
    8807             : 
    8808           0 :  out_free_tr:
    8809           0 :         ftrace_free_ftrace_ops(tr);
    8810           0 :         free_trace_buffers(tr);
    8811           0 :         free_cpumask_var(tr->tracing_cpumask);
    8812           0 :         kfree(tr->name);
    8813           0 :         kfree(tr);
    8814             : 
    8815           0 :         return ERR_PTR(ret);
    8816             : }
    8817             : 
    8818           0 : static int instance_mkdir(const char *name)
    8819             : {
    8820           0 :         struct trace_array *tr;
    8821           0 :         int ret;
    8822             : 
    8823           0 :         mutex_lock(&event_mutex);
    8824           0 :         mutex_lock(&trace_types_lock);
    8825             : 
    8826           0 :         ret = -EEXIST;
    8827           0 :         if (trace_array_find(name))
    8828           0 :                 goto out_unlock;
    8829             : 
    8830           0 :         tr = trace_array_create(name);
    8831             : 
    8832           0 :         ret = PTR_ERR_OR_ZERO(tr);
    8833             : 
    8834           0 : out_unlock:
    8835           0 :         mutex_unlock(&trace_types_lock);
    8836           0 :         mutex_unlock(&event_mutex);
    8837           0 :         return ret;
    8838             : }
    8839             : 
    8840             : /**
    8841             :  * trace_array_get_by_name - Create/Lookup a trace array, given its name.
    8842             :  * @name: The name of the trace array to be looked up/created.
    8843             :  *
    8844             :  * Returns pointer to trace array with given name.
    8845             :  * NULL, if it cannot be created.
    8846             :  *
    8847             :  * NOTE: This function increments the reference counter associated with the
    8848             :  * trace array returned. This makes sure it cannot be freed while in use.
    8849             :  * Use trace_array_put() once the trace array is no longer needed.
    8850             :  * If the trace_array is to be freed, trace_array_destroy() needs to
    8851             :  * be called after the trace_array_put(), or simply let user space delete
    8852             :  * it from the tracefs instances directory. But until the
    8853             :  * trace_array_put() is called, user space can not delete it.
    8854             :  *
    8855             :  */
    8856           0 : struct trace_array *trace_array_get_by_name(const char *name)
    8857             : {
    8858           0 :         struct trace_array *tr;
    8859             : 
    8860           0 :         mutex_lock(&event_mutex);
    8861           0 :         mutex_lock(&trace_types_lock);
    8862             : 
    8863           0 :         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
    8864           0 :                 if (tr->name && strcmp(tr->name, name) == 0)
    8865           0 :                         goto out_unlock;
    8866             :         }
    8867             : 
    8868           0 :         tr = trace_array_create(name);
    8869             : 
    8870           0 :         if (IS_ERR(tr))
    8871             :                 tr = NULL;
    8872           0 : out_unlock:
    8873           0 :         if (tr)
    8874           0 :                 tr->ref++;
    8875             : 
    8876           0 :         mutex_unlock(&trace_types_lock);
    8877           0 :         mutex_unlock(&event_mutex);
    8878           0 :         return tr;
    8879             : }
    8880             : EXPORT_SYMBOL_GPL(trace_array_get_by_name);
    8881             : 
    8882           0 : static int __remove_instance(struct trace_array *tr)
    8883             : {
    8884           0 :         int i;
    8885             : 
    8886             :         /* Reference counter for a newly created trace array = 1. */
    8887           0 :         if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
    8888             :                 return -EBUSY;
    8889             : 
    8890           0 :         list_del(&tr->list);
    8891             : 
    8892             :         /* Disable all the flags that were enabled coming in */
    8893           0 :         for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
    8894           0 :                 if ((1 << i) & ZEROED_TRACE_FLAGS)
    8895           0 :                         set_tracer_flag(tr, 1 << i, 0);
    8896             :         }
    8897             : 
    8898           0 :         tracing_set_nop(tr);
    8899           0 :         clear_ftrace_function_probes(tr);
    8900           0 :         event_trace_del_tracer(tr);
    8901           0 :         ftrace_clear_pids(tr);
    8902           0 :         ftrace_destroy_function_files(tr);
    8903           0 :         tracefs_remove(tr->dir);
    8904           0 :         free_trace_buffers(tr);
    8905             : 
    8906           0 :         for (i = 0; i < tr->nr_topts; i++) {
    8907           0 :                 kfree(tr->topts[i].topts);
    8908             :         }
    8909           0 :         kfree(tr->topts);
    8910             : 
    8911           0 :         free_cpumask_var(tr->tracing_cpumask);
    8912           0 :         kfree(tr->name);
    8913           0 :         kfree(tr);
    8914             : 
    8915           0 :         return 0;
    8916             : }
    8917             : 
    8918           0 : int trace_array_destroy(struct trace_array *this_tr)
    8919             : {
    8920           0 :         struct trace_array *tr;
    8921           0 :         int ret;
    8922             : 
    8923           0 :         if (!this_tr)
    8924             :                 return -EINVAL;
    8925             : 
    8926           0 :         mutex_lock(&event_mutex);
    8927           0 :         mutex_lock(&trace_types_lock);
    8928             : 
    8929           0 :         ret = -ENODEV;
    8930             : 
    8931             :         /* Making sure trace array exists before destroying it. */
    8932           0 :         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
    8933           0 :                 if (tr == this_tr) {
    8934           0 :                         ret = __remove_instance(tr);
    8935           0 :                         break;
    8936             :                 }
    8937             :         }
    8938             : 
    8939           0 :         mutex_unlock(&trace_types_lock);
    8940           0 :         mutex_unlock(&event_mutex);
    8941             : 
    8942           0 :         return ret;
    8943             : }
    8944             : EXPORT_SYMBOL_GPL(trace_array_destroy);
    8945             : 
    8946           0 : static int instance_rmdir(const char *name)
    8947             : {
    8948           0 :         struct trace_array *tr;
    8949           0 :         int ret;
    8950             : 
    8951           0 :         mutex_lock(&event_mutex);
    8952           0 :         mutex_lock(&trace_types_lock);
    8953             : 
    8954           0 :         ret = -ENODEV;
    8955           0 :         tr = trace_array_find(name);
    8956           0 :         if (tr)
    8957           0 :                 ret = __remove_instance(tr);
    8958             : 
    8959           0 :         mutex_unlock(&trace_types_lock);
    8960           0 :         mutex_unlock(&event_mutex);
    8961             : 
    8962           0 :         return ret;
    8963             : }
    8964             : 
    8965           1 : static __init void create_trace_instances(struct dentry *d_tracer)
    8966             : {
    8967           1 :         struct trace_array *tr;
    8968             : 
    8969           1 :         trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
    8970             :                                                          instance_mkdir,
    8971             :                                                          instance_rmdir);
    8972           1 :         if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
    8973             :                 return;
    8974             : 
    8975           1 :         mutex_lock(&event_mutex);
    8976           1 :         mutex_lock(&trace_types_lock);
    8977             : 
    8978           2 :         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
    8979           1 :                 if (!tr->name)
    8980           1 :                         continue;
    8981           0 :                 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
    8982             :                              "Failed to create instance directory\n"))
    8983             :                         break;
    8984             :         }
    8985             : 
    8986           1 :         mutex_unlock(&trace_types_lock);
    8987           1 :         mutex_unlock(&event_mutex);
    8988             : }
    8989             : 
    8990             : static void
    8991           1 : init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
    8992             : {
    8993           1 :         struct trace_event_file *file;
    8994           1 :         int cpu;
    8995             : 
    8996           1 :         trace_create_file("available_tracers", 0444, d_tracer,
    8997             :                         tr, &show_traces_fops);
    8998             : 
    8999           1 :         trace_create_file("current_tracer", 0644, d_tracer,
    9000             :                         tr, &set_tracer_fops);
    9001             : 
    9002           1 :         trace_create_file("tracing_cpumask", 0644, d_tracer,
    9003             :                           tr, &tracing_cpumask_fops);
    9004             : 
    9005           1 :         trace_create_file("trace_options", 0644, d_tracer,
    9006             :                           tr, &tracing_iter_fops);
    9007             : 
    9008           1 :         trace_create_file("trace", 0644, d_tracer,
    9009             :                           tr, &tracing_fops);
    9010             : 
    9011           1 :         trace_create_file("trace_pipe", 0444, d_tracer,
    9012             :                           tr, &tracing_pipe_fops);
    9013             : 
    9014           1 :         trace_create_file("buffer_size_kb", 0644, d_tracer,
    9015             :                           tr, &tracing_entries_fops);
    9016             : 
    9017           1 :         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
    9018             :                           tr, &tracing_total_entries_fops);
    9019             : 
    9020           1 :         trace_create_file("free_buffer", 0200, d_tracer,
    9021             :                           tr, &tracing_free_buffer_fops);
    9022             : 
    9023           1 :         trace_create_file("trace_marker", 0220, d_tracer,
    9024             :                           tr, &tracing_mark_fops);
    9025             : 
    9026           1 :         file = __find_event_file(tr, "ftrace", "print");
    9027           1 :         if (file && file->dir)
    9028           1 :                 trace_create_file("trigger", 0644, file->dir, file,
    9029             :                                   &event_trigger_fops);
    9030           1 :         tr->trace_marker_file = file;
    9031             : 
    9032           1 :         trace_create_file("trace_marker_raw", 0220, d_tracer,
    9033             :                           tr, &tracing_mark_raw_fops);
    9034             : 
    9035           1 :         trace_create_file("trace_clock", 0644, d_tracer, tr,
    9036             :                           &trace_clock_fops);
    9037             : 
    9038           1 :         trace_create_file("tracing_on", 0644, d_tracer,
    9039             :                           tr, &rb_simple_fops);
    9040             : 
    9041           1 :         trace_create_file("timestamp_mode", 0444, d_tracer, tr,
    9042             :                           &trace_time_stamp_mode_fops);
    9043             : 
    9044           1 :         tr->buffer_percent = 50;
    9045             : 
    9046           1 :         trace_create_file("buffer_percent", 0444, d_tracer,
    9047             :                         tr, &buffer_percent_fops);
    9048             : 
    9049           1 :         create_trace_options_dir(tr);
    9050             : 
    9051             : #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
    9052             :         trace_create_maxlat_file(tr, d_tracer);
    9053             : #endif
    9054             : 
    9055           1 :         if (ftrace_create_function_files(tr, d_tracer))
    9056             :                 MEM_FAIL(1, "Could not allocate function filter files");
    9057             : 
    9058             : #ifdef CONFIG_TRACER_SNAPSHOT
    9059             :         trace_create_file("snapshot", 0644, d_tracer,
    9060             :                           tr, &snapshot_fops);
    9061             : #endif
    9062             : 
    9063           1 :         trace_create_file("error_log", 0644, d_tracer,
    9064             :                           tr, &tracing_err_log_fops);
    9065             : 
    9066           6 :         for_each_tracing_cpu(cpu)
    9067           4 :                 tracing_init_tracefs_percpu(tr, cpu);
    9068             : 
    9069           1 :         ftrace_init_tracefs(tr, d_tracer);
    9070           1 : }
    9071             : 
    9072           0 : static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
    9073             : {
    9074           0 :         struct vfsmount *mnt;
    9075           0 :         struct file_system_type *type;
    9076             : 
    9077             :         /*
    9078             :          * To maintain backward compatibility for tools that mount
    9079             :          * debugfs to get to the tracing facility, tracefs is automatically
    9080             :          * mounted to the debugfs/tracing directory.
    9081             :          */
    9082           0 :         type = get_fs_type("tracefs");
    9083           0 :         if (!type)
    9084             :                 return NULL;
    9085           0 :         mnt = vfs_submount(mntpt, type, "tracefs", NULL);
    9086           0 :         put_filesystem(type);
    9087           0 :         if (IS_ERR(mnt))
    9088             :                 return NULL;
    9089           0 :         mntget(mnt);
    9090             : 
    9091           0 :         return mnt;
    9092             : }
    9093             : 
    9094             : /**
    9095             :  * tracing_init_dentry - initialize top level trace array
    9096             :  *
    9097             :  * This is called when creating files or directories in the tracing
    9098             :  * directory. It is called via fs_initcall() by any of the boot up code
    9099             :  * and expects to return the dentry of the top level tracing directory.
    9100             :  */
    9101           2 : int tracing_init_dentry(void)
    9102             : {
    9103           2 :         struct trace_array *tr = &global_trace;
    9104             : 
    9105           2 :         if (security_locked_down(LOCKDOWN_TRACEFS)) {
    9106           0 :                 pr_warn("Tracing disabled due to lockdown\n");
    9107           0 :                 return -EPERM;
    9108             :         }
    9109             : 
    9110             :         /* The top level trace array uses  NULL as parent */
    9111           2 :         if (tr->dir)
    9112             :                 return 0;
    9113             : 
    9114           1 :         if (WARN_ON(!tracefs_initialized()))
    9115             :                 return -ENODEV;
    9116             : 
    9117             :         /*
    9118             :          * As there may still be users that expect the tracing
    9119             :          * files to exist in debugfs/tracing, we must automount
    9120             :          * the tracefs file system there, so older tools still
    9121             :          * work with the newer kerenl.
    9122             :          */
    9123           1 :         tr->dir = debugfs_create_automount("tracing", NULL,
    9124             :                                            trace_automount, NULL);
    9125             : 
    9126           1 :         return 0;
    9127             : }
    9128             : 
    9129             : extern struct trace_eval_map *__start_ftrace_eval_maps[];
    9130             : extern struct trace_eval_map *__stop_ftrace_eval_maps[];
    9131             : 
    9132             : static struct workqueue_struct *eval_map_wq __initdata;
    9133             : static struct work_struct eval_map_work __initdata;
    9134             : 
    9135           1 : static void __init eval_map_work_func(struct work_struct *work)
    9136             : {
    9137           1 :         int len;
    9138             : 
    9139           1 :         len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
    9140           1 :         trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
    9141           1 : }
    9142             : 
    9143           1 : static int __init trace_eval_init(void)
    9144             : {
    9145           1 :         INIT_WORK(&eval_map_work, eval_map_work_func);
    9146             : 
    9147           1 :         eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
    9148           1 :         if (!eval_map_wq) {
    9149           0 :                 pr_err("Unable to allocate eval_map_wq\n");
    9150             :                 /* Do work here */
    9151           0 :                 eval_map_work_func(&eval_map_work);
    9152           0 :                 return -ENOMEM;
    9153             :         }
    9154             : 
    9155           1 :         queue_work(eval_map_wq, &eval_map_work);
    9156           1 :         return 0;
    9157             : }
    9158             : 
    9159           1 : static int __init trace_eval_sync(void)
    9160             : {
    9161             :         /* Make sure the eval map updates are finished */
    9162           1 :         if (eval_map_wq)
    9163           1 :                 destroy_workqueue(eval_map_wq);
    9164           1 :         return 0;
    9165             : }
    9166             : 
    9167             : late_initcall_sync(trace_eval_sync);
    9168             : 
    9169             : 
    9170             : #ifdef CONFIG_MODULES
    9171             : static void trace_module_add_evals(struct module *mod)
    9172             : {
    9173             :         if (!mod->num_trace_evals)
    9174             :                 return;
    9175             : 
    9176             :         /*
    9177             :          * Modules with bad taint do not have events created, do
    9178             :          * not bother with enums either.
    9179             :          */
    9180             :         if (trace_module_has_bad_taint(mod))
    9181             :                 return;
    9182             : 
    9183             :         trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
    9184             : }
    9185             : 
    9186             : #ifdef CONFIG_TRACE_EVAL_MAP_FILE
    9187             : static void trace_module_remove_evals(struct module *mod)
    9188             : {
    9189             :         union trace_eval_map_item *map;
    9190             :         union trace_eval_map_item **last = &trace_eval_maps;
    9191             : 
    9192             :         if (!mod->num_trace_evals)
    9193             :                 return;
    9194             : 
    9195             :         mutex_lock(&trace_eval_mutex);
    9196             : 
    9197             :         map = trace_eval_maps;
    9198             : 
    9199             :         while (map) {
    9200             :                 if (map->head.mod == mod)
    9201             :                         break;
    9202             :                 map = trace_eval_jmp_to_tail(map);
    9203             :                 last = &map->tail.next;
    9204             :                 map = map->tail.next;
    9205             :         }
    9206             :         if (!map)
    9207             :                 goto out;
    9208             : 
    9209             :         *last = trace_eval_jmp_to_tail(map)->tail.next;
    9210             :         kfree(map);
    9211             :  out:
    9212             :         mutex_unlock(&trace_eval_mutex);
    9213             : }
    9214             : #else
    9215             : static inline void trace_module_remove_evals(struct module *mod) { }
    9216             : #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
    9217             : 
    9218             : static int trace_module_notify(struct notifier_block *self,
    9219             :                                unsigned long val, void *data)
    9220             : {
    9221             :         struct module *mod = data;
    9222             : 
    9223             :         switch (val) {
    9224             :         case MODULE_STATE_COMING:
    9225             :                 trace_module_add_evals(mod);
    9226             :                 break;
    9227             :         case MODULE_STATE_GOING:
    9228             :                 trace_module_remove_evals(mod);
    9229             :                 break;
    9230             :         }
    9231             : 
    9232             :         return NOTIFY_OK;
    9233             : }
    9234             : 
    9235             : static struct notifier_block trace_module_nb = {
    9236             :         .notifier_call = trace_module_notify,
    9237             :         .priority = 0,
    9238             : };
    9239             : #endif /* CONFIG_MODULES */
    9240             : 
    9241           1 : static __init int tracer_init_tracefs(void)
    9242             : {
    9243           1 :         int ret;
    9244             : 
    9245           1 :         trace_access_lock_init();
    9246             : 
    9247           1 :         ret = tracing_init_dentry();
    9248           1 :         if (ret)
    9249             :                 return 0;
    9250             : 
    9251           1 :         event_trace_init();
    9252             : 
    9253           1 :         init_tracer_tracefs(&global_trace, NULL);
    9254           1 :         ftrace_init_tracefs_toplevel(&global_trace, NULL);
    9255             : 
    9256           1 :         trace_create_file("tracing_thresh", 0644, NULL,
    9257             :                         &global_trace, &tracing_thresh_fops);
    9258             : 
    9259           1 :         trace_create_file("README", 0444, NULL,
    9260             :                         NULL, &tracing_readme_fops);
    9261             : 
    9262           1 :         trace_create_file("saved_cmdlines", 0444, NULL,
    9263             :                         NULL, &tracing_saved_cmdlines_fops);
    9264             : 
    9265           1 :         trace_create_file("saved_cmdlines_size", 0644, NULL,
    9266             :                           NULL, &tracing_saved_cmdlines_size_fops);
    9267             : 
    9268           1 :         trace_create_file("saved_tgids", 0444, NULL,
    9269             :                         NULL, &tracing_saved_tgids_fops);
    9270             : 
    9271           1 :         trace_eval_init();
    9272             : 
    9273           1 :         trace_create_eval_file(NULL);
    9274             : 
    9275             : #ifdef CONFIG_MODULES
    9276             :         register_module_notifier(&trace_module_nb);
    9277             : #endif
    9278             : 
    9279             : #ifdef CONFIG_DYNAMIC_FTRACE
    9280             :         trace_create_file("dyn_ftrace_total_info", 0444, NULL,
    9281             :                         NULL, &tracing_dyn_info_fops);
    9282             : #endif
    9283             : 
    9284           1 :         create_trace_instances(NULL);
    9285             : 
    9286           1 :         update_tracer_options(&global_trace);
    9287             : 
    9288           1 :         return 0;
    9289             : }
    9290             : 
    9291           0 : static int trace_panic_handler(struct notifier_block *this,
    9292             :                                unsigned long event, void *unused)
    9293             : {
    9294           0 :         if (ftrace_dump_on_oops)
    9295           0 :                 ftrace_dump(ftrace_dump_on_oops);
    9296           0 :         return NOTIFY_OK;
    9297             : }
    9298             : 
    9299             : static struct notifier_block trace_panic_notifier = {
    9300             :         .notifier_call  = trace_panic_handler,
    9301             :         .next           = NULL,
    9302             :         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
    9303             : };
    9304             : 
    9305           0 : static int trace_die_handler(struct notifier_block *self,
    9306             :                              unsigned long val,
    9307             :                              void *data)
    9308             : {
    9309           0 :         switch (val) {
    9310           0 :         case DIE_OOPS:
    9311           0 :                 if (ftrace_dump_on_oops)
    9312           0 :                         ftrace_dump(ftrace_dump_on_oops);
    9313             :                 break;
    9314             :         default:
    9315             :                 break;
    9316             :         }
    9317           0 :         return NOTIFY_OK;
    9318             : }
    9319             : 
    9320             : static struct notifier_block trace_die_notifier = {
    9321             :         .notifier_call = trace_die_handler,
    9322             :         .priority = 200
    9323             : };
    9324             : 
    9325             : /*
    9326             :  * printk is set to max of 1024, we really don't need it that big.
    9327             :  * Nothing should be printing 1000 characters anyway.
    9328             :  */
    9329             : #define TRACE_MAX_PRINT         1000
    9330             : 
    9331             : /*
    9332             :  * Define here KERN_TRACE so that we have one place to modify
    9333             :  * it if we decide to change what log level the ftrace dump
    9334             :  * should be at.
    9335             :  */
    9336             : #define KERN_TRACE              KERN_EMERG
    9337             : 
    9338             : void
    9339           0 : trace_printk_seq(struct trace_seq *s)
    9340             : {
    9341             :         /* Probably should print a warning here. */
    9342           0 :         if (s->seq.len >= TRACE_MAX_PRINT)
    9343           0 :                 s->seq.len = TRACE_MAX_PRINT;
    9344             : 
    9345             :         /*
    9346             :          * More paranoid code. Although the buffer size is set to
    9347             :          * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
    9348             :          * an extra layer of protection.
    9349             :          */
    9350           0 :         if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
    9351           0 :                 s->seq.len = s->seq.size - 1;
    9352             : 
    9353             :         /* should be zero ended, but we are paranoid. */
    9354           0 :         s->buffer[s->seq.len] = 0;
    9355             : 
    9356           0 :         printk(KERN_TRACE "%s", s->buffer);
    9357             : 
    9358           0 :         trace_seq_init(s);
    9359           0 : }
    9360             : 
    9361           0 : void trace_init_global_iter(struct trace_iterator *iter)
    9362             : {
    9363           0 :         iter->tr = &global_trace;
    9364           0 :         iter->trace = iter->tr->current_trace;
    9365           0 :         iter->cpu_file = RING_BUFFER_ALL_CPUS;
    9366           0 :         iter->array_buffer = &global_trace.array_buffer;
    9367             : 
    9368           0 :         if (iter->trace && iter->trace->open)
    9369           0 :                 iter->trace->open(iter);
    9370             : 
    9371             :         /* Annotate start of buffers if we had overruns */
    9372           0 :         if (ring_buffer_overruns(iter->array_buffer->buffer))
    9373           0 :                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
    9374             : 
    9375             :         /* Output in nanoseconds only if we are using a clock in nanoseconds. */
    9376           0 :         if (trace_clocks[iter->tr->clock_id].in_ns)
    9377           0 :                 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
    9378           0 : }
    9379             : 
    9380           0 : void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
    9381             : {
    9382             :         /* use static because iter can be a bit big for the stack */
    9383           0 :         static struct trace_iterator iter;
    9384           0 :         static atomic_t dump_running;
    9385           0 :         struct trace_array *tr = &global_trace;
    9386           0 :         unsigned int old_userobj;
    9387           0 :         unsigned long flags;
    9388           0 :         int cnt = 0, cpu;
    9389             : 
    9390             :         /* Only allow one dump user at a time. */
    9391           0 :         if (atomic_inc_return(&dump_running) != 1) {
    9392           0 :                 atomic_dec(&dump_running);
    9393           0 :                 return;
    9394             :         }
    9395             : 
    9396             :         /*
    9397             :          * Always turn off tracing when we dump.
    9398             :          * We don't need to show trace output of what happens
    9399             :          * between multiple crashes.
    9400             :          *
    9401             :          * If the user does a sysrq-z, then they can re-enable
    9402             :          * tracing with echo 1 > tracing_on.
    9403             :          */
    9404           0 :         tracing_off();
    9405             : 
    9406           0 :         local_irq_save(flags);
    9407           0 :         printk_nmi_direct_enter();
    9408             : 
    9409             :         /* Simulate the iterator */
    9410           0 :         trace_init_global_iter(&iter);
    9411             :         /* Can not use kmalloc for iter.temp and iter.fmt */
    9412           0 :         iter.temp = static_temp_buf;
    9413           0 :         iter.temp_size = STATIC_TEMP_BUF_SIZE;
    9414           0 :         iter.fmt = static_fmt_buf;
    9415           0 :         iter.fmt_size = STATIC_FMT_BUF_SIZE;
    9416             : 
    9417           0 :         for_each_tracing_cpu(cpu) {
    9418           0 :                 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
    9419             :         }
    9420             : 
    9421           0 :         old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
    9422             : 
    9423             :         /* don't look at user memory in panic mode */
    9424           0 :         tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
    9425             : 
    9426           0 :         switch (oops_dump_mode) {
    9427           0 :         case DUMP_ALL:
    9428           0 :                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
    9429           0 :                 break;
    9430           0 :         case DUMP_ORIG:
    9431           0 :                 iter.cpu_file = raw_smp_processor_id();
    9432           0 :                 break;
    9433           0 :         case DUMP_NONE:
    9434           0 :                 goto out_enable;
    9435           0 :         default:
    9436           0 :                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
    9437           0 :                 iter.cpu_file = RING_BUFFER_ALL_CPUS;
    9438             :         }
    9439             : 
    9440           0 :         printk(KERN_TRACE "Dumping ftrace buffer:\n");
    9441             : 
    9442             :         /* Did function tracer already get disabled? */
    9443           0 :         if (ftrace_is_dead()) {
    9444             :                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
    9445             :                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
    9446             :         }
    9447             : 
    9448             :         /*
    9449             :          * We need to stop all tracing on all CPUS to read
    9450             :          * the next buffer. This is a bit expensive, but is
    9451             :          * not done often. We fill all what we can read,
    9452             :          * and then release the locks again.
    9453             :          */
    9454             : 
    9455           0 :         while (!trace_empty(&iter)) {
    9456             : 
    9457           0 :                 if (!cnt)
    9458           0 :                         printk(KERN_TRACE "---------------------------------\n");
    9459             : 
    9460           0 :                 cnt++;
    9461             : 
    9462           0 :                 trace_iterator_reset(&iter);
    9463           0 :                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
    9464             : 
    9465           0 :                 if (trace_find_next_entry_inc(&iter) != NULL) {
    9466           0 :                         int ret;
    9467             : 
    9468           0 :                         ret = print_trace_line(&iter);
    9469           0 :                         if (ret != TRACE_TYPE_NO_CONSUME)
    9470           0 :                                 trace_consume(&iter);
    9471             :                 }
    9472           0 :                 touch_nmi_watchdog();
    9473             : 
    9474           0 :                 trace_printk_seq(&iter.seq);
    9475             :         }
    9476             : 
    9477           0 :         if (!cnt)
    9478           0 :                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
    9479             :         else
    9480           0 :                 printk(KERN_TRACE "---------------------------------\n");
    9481             : 
    9482           0 :  out_enable:
    9483           0 :         tr->trace_flags |= old_userobj;
    9484             : 
    9485           0 :         for_each_tracing_cpu(cpu) {
    9486           0 :                 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
    9487             :         }
    9488           0 :         atomic_dec(&dump_running);
    9489           0 :         printk_nmi_direct_exit();
    9490           0 :         local_irq_restore(flags);
    9491             : }
    9492             : EXPORT_SYMBOL_GPL(ftrace_dump);
    9493             : 
    9494             : #define WRITE_BUFSIZE  4096
    9495             : 
    9496           0 : ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
    9497             :                                 size_t count, loff_t *ppos,
    9498             :                                 int (*createfn)(const char *))
    9499             : {
    9500           0 :         char *kbuf, *buf, *tmp;
    9501           0 :         int ret = 0;
    9502           0 :         size_t done = 0;
    9503           0 :         size_t size;
    9504             : 
    9505           0 :         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
    9506           0 :         if (!kbuf)
    9507             :                 return -ENOMEM;
    9508             : 
    9509           0 :         while (done < count) {
    9510           0 :                 size = count - done;
    9511             : 
    9512           0 :                 if (size >= WRITE_BUFSIZE)
    9513             :                         size = WRITE_BUFSIZE - 1;
    9514             : 
    9515           0 :                 if (copy_from_user(kbuf, buffer + done, size)) {
    9516           0 :                         ret = -EFAULT;
    9517           0 :                         goto out;
    9518             :                 }
    9519           0 :                 kbuf[size] = '\0';
    9520           0 :                 buf = kbuf;
    9521           0 :                 do {
    9522           0 :                         tmp = strchr(buf, '\n');
    9523           0 :                         if (tmp) {
    9524           0 :                                 *tmp = '\0';
    9525           0 :                                 size = tmp - buf + 1;
    9526             :                         } else {
    9527           0 :                                 size = strlen(buf);
    9528           0 :                                 if (done + size < count) {
    9529           0 :                                         if (buf != kbuf)
    9530             :                                                 break;
    9531             :                                         /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
    9532           0 :                                         pr_warn("Line length is too long: Should be less than %d\n",
    9533             :                                                 WRITE_BUFSIZE - 2);
    9534           0 :                                         ret = -EINVAL;
    9535           0 :                                         goto out;
    9536             :                                 }
    9537             :                         }
    9538           0 :                         done += size;
    9539             : 
    9540             :                         /* Remove comments */
    9541           0 :                         tmp = strchr(buf, '#');
    9542             : 
    9543           0 :                         if (tmp)
    9544           0 :                                 *tmp = '\0';
    9545             : 
    9546           0 :                         ret = createfn(buf);
    9547           0 :                         if (ret)
    9548           0 :                                 goto out;
    9549           0 :                         buf += size;
    9550             : 
    9551           0 :                 } while (done < count);
    9552             :         }
    9553           0 :         ret = done;
    9554             : 
    9555           0 : out:
    9556           0 :         kfree(kbuf);
    9557             : 
    9558           0 :         return ret;
    9559             : }
    9560             : 
    9561           1 : __init static int tracer_alloc_buffers(void)
    9562             : {
    9563           1 :         int ring_buf_size;
    9564           1 :         int ret = -ENOMEM;
    9565             : 
    9566             : 
    9567           1 :         if (security_locked_down(LOCKDOWN_TRACEFS)) {
    9568           0 :                 pr_warn("Tracing disabled due to lockdown\n");
    9569           0 :                 return -EPERM;
    9570             :         }
    9571             : 
    9572             :         /*
    9573             :          * Make sure we don't accidentally add more trace options
    9574             :          * than we have bits for.
    9575             :          */
    9576           1 :         BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
    9577             : 
    9578           1 :         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
    9579             :                 goto out;
    9580             : 
    9581           1 :         if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
    9582             :                 goto out_free_buffer_mask;
    9583             : 
    9584             :         /* Only allocate trace_printk buffers if a trace_printk exists */
    9585           1 :         if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
    9586             :                 /* Must be called before global_trace.buffer is allocated */
    9587           0 :                 trace_printk_init_buffers();
    9588             : 
    9589             :         /* To save memory, keep the ring buffer size to its minimum */
    9590           1 :         if (ring_buffer_expanded)
    9591           0 :                 ring_buf_size = trace_buf_size;
    9592             :         else
    9593             :                 ring_buf_size = 1;
    9594             : 
    9595           1 :         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
    9596           1 :         cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
    9597             : 
    9598           1 :         raw_spin_lock_init(&global_trace.start_lock);
    9599             : 
    9600             :         /*
    9601             :          * The prepare callbacks allocates some memory for the ring buffer. We
    9602             :          * don't free the buffer if the CPU goes down. If we were to free
    9603             :          * the buffer, then the user would lose any trace that was in the
    9604             :          * buffer. The memory will be removed once the "instance" is removed.
    9605             :          */
    9606           1 :         ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
    9607             :                                       "trace/RB:preapre", trace_rb_cpu_prepare,
    9608             :                                       NULL);
    9609           1 :         if (ret < 0)
    9610           0 :                 goto out_free_cpumask;
    9611             :         /* Used for event triggers */
    9612           1 :         ret = -ENOMEM;
    9613           1 :         temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
    9614           1 :         if (!temp_buffer)
    9615           0 :                 goto out_rm_hp_state;
    9616             : 
    9617           1 :         if (trace_create_savedcmd() < 0)
    9618           0 :                 goto out_free_temp_buffer;
    9619             : 
    9620             :         /* TODO: make the number of buffers hot pluggable with CPUS */
    9621           1 :         if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
    9622           0 :                 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
    9623           0 :                 goto out_free_savedcmd;
    9624             :         }
    9625             : 
    9626           1 :         if (global_trace.buffer_disabled)
    9627           0 :                 tracing_off();
    9628             : 
    9629           1 :         if (trace_boot_clock) {
    9630           0 :                 ret = tracing_set_clock(&global_trace, trace_boot_clock);
    9631           0 :                 if (ret < 0)
    9632           0 :                         pr_warn("Trace clock %s not defined, going back to default\n",
    9633             :                                 trace_boot_clock);
    9634             :         }
    9635             : 
    9636             :         /*
    9637             :          * register_tracer() might reference current_trace, so it
    9638             :          * needs to be set before we register anything. This is
    9639             :          * just a bootstrap of current_trace anyway.
    9640             :          */
    9641           1 :         global_trace.current_trace = &nop_trace;
    9642             : 
    9643           1 :         global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
    9644             : 
    9645           1 :         ftrace_init_global_array_ops(&global_trace);
    9646             : 
    9647          33 :         init_trace_flags_index(&global_trace);
    9648             : 
    9649           1 :         register_tracer(&nop_trace);
    9650             : 
    9651             :         /* Function tracing may start here (via kernel command line) */
    9652           1 :         init_function_trace();
    9653             : 
    9654             :         /* All seems OK, enable tracing */
    9655           1 :         tracing_disabled = 0;
    9656             : 
    9657           1 :         atomic_notifier_chain_register(&panic_notifier_list,
    9658             :                                        &trace_panic_notifier);
    9659             : 
    9660           1 :         register_die_notifier(&trace_die_notifier);
    9661             : 
    9662           1 :         global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
    9663             : 
    9664           1 :         INIT_LIST_HEAD(&global_trace.systems);
    9665           1 :         INIT_LIST_HEAD(&global_trace.events);
    9666           1 :         INIT_LIST_HEAD(&global_trace.hist_vars);
    9667           1 :         INIT_LIST_HEAD(&global_trace.err_log);
    9668           1 :         list_add(&global_trace.list, &ftrace_trace_arrays);
    9669             : 
    9670           1 :         apply_trace_boot_options();
    9671             : 
    9672           1 :         register_snapshot_cmd();
    9673             : 
    9674           1 :         return 0;
    9675             : 
    9676           0 : out_free_savedcmd:
    9677           0 :         free_saved_cmdlines_buffer(savedcmd);
    9678           0 : out_free_temp_buffer:
    9679           0 :         ring_buffer_free(temp_buffer);
    9680           0 : out_rm_hp_state:
    9681           0 :         cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
    9682             : out_free_cpumask:
    9683           1 :         free_cpumask_var(global_trace.tracing_cpumask);
    9684           1 : out_free_buffer_mask:
    9685           1 :         free_cpumask_var(tracing_buffer_mask);
    9686           1 : out:
    9687             :         return ret;
    9688             : }
    9689             : 
    9690           1 : void __init early_trace_init(void)
    9691             : {
    9692           1 :         if (tracepoint_printk) {
    9693           0 :                 tracepoint_print_iter =
    9694           0 :                         kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
    9695           0 :                 if (MEM_FAIL(!tracepoint_print_iter,
    9696             :                              "Failed to allocate trace iterator\n"))
    9697           0 :                         tracepoint_printk = 0;
    9698             :                 else
    9699           0 :                         static_key_enable(&tracepoint_printk_key.key);
    9700             :         }
    9701           1 :         tracer_alloc_buffers();
    9702           1 : }
    9703             : 
    9704           1 : void __init trace_init(void)
    9705             : {
    9706           1 :         trace_event_init();
    9707           1 : }
    9708             : 
    9709           1 : __init static int clear_boot_tracer(void)
    9710             : {
    9711             :         /*
    9712             :          * The default tracer at boot buffer is an init section.
    9713             :          * This function is called in lateinit. If we did not
    9714             :          * find the boot tracer, then clear it out, to prevent
    9715             :          * later registration from accessing the buffer that is
    9716             :          * about to be freed.
    9717             :          */
    9718           1 :         if (!default_bootup_tracer)
    9719             :                 return 0;
    9720             : 
    9721           0 :         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
    9722             :                default_bootup_tracer);
    9723           0 :         default_bootup_tracer = NULL;
    9724             : 
    9725           0 :         return 0;
    9726             : }
    9727             : 
    9728             : fs_initcall(tracer_init_tracefs);
    9729             : late_initcall_sync(clear_boot_tracer);
    9730             : 
    9731             : #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
    9732           1 : __init static int tracing_set_default_clock(void)
    9733             : {
    9734             :         /* sched_clock_stable() is determined in late_initcall */
    9735           1 :         if (!trace_boot_clock && !sched_clock_stable()) {
    9736           0 :                 if (security_locked_down(LOCKDOWN_TRACEFS)) {
    9737           0 :                         pr_warn("Can not set tracing clock due to lockdown\n");
    9738           0 :                         return -EPERM;
    9739             :                 }
    9740             : 
    9741           0 :                 printk(KERN_WARNING
    9742             :                        "Unstable clock detected, switching default tracing clock to \"global\"\n"
    9743             :                        "If you want to keep using the local clock, then add:\n"
    9744             :                        "  \"trace_clock=local\"\n"
    9745             :                        "on the kernel command line\n");
    9746           0 :                 tracing_set_clock(&global_trace, "global");
    9747             :         }
    9748             : 
    9749             :         return 0;
    9750             : }
    9751             : late_initcall_sync(tracing_set_default_clock);
    9752             : #endif

Generated by: LCOV version 1.14