Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * trace_events_trigger - trace event triggers
4 : *
5 : * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
6 : */
7 :
8 : #include <linux/security.h>
9 : #include <linux/module.h>
10 : #include <linux/ctype.h>
11 : #include <linux/mutex.h>
12 : #include <linux/slab.h>
13 : #include <linux/rculist.h>
14 :
15 : #include "trace.h"
16 :
17 : static LIST_HEAD(trigger_commands);
18 : static DEFINE_MUTEX(trigger_cmd_mutex);
19 :
20 0 : void trigger_data_free(struct event_trigger_data *data)
21 : {
22 0 : if (data->cmd_ops->set_filter)
23 0 : data->cmd_ops->set_filter(NULL, data, NULL);
24 :
25 : /* make sure current triggers exit before free */
26 0 : tracepoint_synchronize_unregister();
27 :
28 0 : kfree(data);
29 0 : }
30 :
31 : /**
32 : * event_triggers_call - Call triggers associated with a trace event
33 : * @file: The trace_event_file associated with the event
34 : * @rec: The trace entry for the event, NULL for unconditional invocation
35 : *
36 : * For each trigger associated with an event, invoke the trigger
37 : * function registered with the associated trigger command. If rec is
38 : * non-NULL, it means that the trigger requires further processing and
39 : * shouldn't be unconditionally invoked. If rec is non-NULL and the
40 : * trigger has a filter associated with it, rec will checked against
41 : * the filter and if the record matches the trigger will be invoked.
42 : * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
43 : * in any case until the current event is written, the trigger
44 : * function isn't invoked but the bit associated with the deferred
45 : * trigger is set in the return value.
46 : *
47 : * Returns an enum event_trigger_type value containing a set bit for
48 : * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 : *
50 : * Called from tracepoint handlers (with rcu_read_lock_sched() held).
51 : *
52 : * Return: an enum event_trigger_type value containing a set bit for
53 : * any trigger that should be deferred, ETT_NONE if nothing to defer.
54 : */
55 : enum event_trigger_type
56 0 : event_triggers_call(struct trace_event_file *file, void *rec,
57 : struct ring_buffer_event *event)
58 : {
59 0 : struct event_trigger_data *data;
60 0 : enum event_trigger_type tt = ETT_NONE;
61 0 : struct event_filter *filter;
62 :
63 0 : if (list_empty(&file->triggers))
64 : return tt;
65 :
66 0 : list_for_each_entry_rcu(data, &file->triggers, list) {
67 0 : if (data->paused)
68 0 : continue;
69 0 : if (!rec) {
70 0 : data->ops->func(data, rec, event);
71 0 : continue;
72 : }
73 0 : filter = rcu_dereference_sched(data->filter);
74 0 : if (filter && !filter_match_preds(filter, rec))
75 0 : continue;
76 0 : if (event_command_post_trigger(data->cmd_ops)) {
77 0 : tt |= data->cmd_ops->trigger_type;
78 0 : continue;
79 : }
80 0 : data->ops->func(data, rec, event);
81 : }
82 : return tt;
83 : }
84 : EXPORT_SYMBOL_GPL(event_triggers_call);
85 :
86 : /**
87 : * event_triggers_post_call - Call 'post_triggers' for a trace event
88 : * @file: The trace_event_file associated with the event
89 : * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
90 : *
91 : * For each trigger associated with an event, invoke the trigger
92 : * function registered with the associated trigger command, if the
93 : * corresponding bit is set in the tt enum passed into this function.
94 : * See @event_triggers_call for details on how those bits are set.
95 : *
96 : * Called from tracepoint handlers (with rcu_read_lock_sched() held).
97 : */
98 : void
99 0 : event_triggers_post_call(struct trace_event_file *file,
100 : enum event_trigger_type tt)
101 : {
102 0 : struct event_trigger_data *data;
103 :
104 0 : list_for_each_entry_rcu(data, &file->triggers, list) {
105 0 : if (data->paused)
106 0 : continue;
107 0 : if (data->cmd_ops->trigger_type & tt)
108 0 : data->ops->func(data, NULL, NULL);
109 : }
110 0 : }
111 : EXPORT_SYMBOL_GPL(event_triggers_post_call);
112 :
113 : #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
114 :
115 0 : static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
116 : {
117 0 : struct trace_event_file *event_file = event_file_data(m->private);
118 :
119 0 : if (t == SHOW_AVAILABLE_TRIGGERS) {
120 0 : (*pos)++;
121 0 : return NULL;
122 : }
123 0 : return seq_list_next(t, &event_file->triggers, pos);
124 : }
125 :
126 0 : static void *trigger_start(struct seq_file *m, loff_t *pos)
127 : {
128 0 : struct trace_event_file *event_file;
129 :
130 : /* ->stop() is called even if ->start() fails */
131 0 : mutex_lock(&event_mutex);
132 0 : event_file = event_file_data(m->private);
133 0 : if (unlikely(!event_file))
134 0 : return ERR_PTR(-ENODEV);
135 :
136 0 : if (list_empty(&event_file->triggers))
137 0 : return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
138 :
139 0 : return seq_list_start(&event_file->triggers, *pos);
140 : }
141 :
142 0 : static void trigger_stop(struct seq_file *m, void *t)
143 : {
144 0 : mutex_unlock(&event_mutex);
145 0 : }
146 :
147 0 : static int trigger_show(struct seq_file *m, void *v)
148 : {
149 0 : struct event_trigger_data *data;
150 0 : struct event_command *p;
151 :
152 0 : if (v == SHOW_AVAILABLE_TRIGGERS) {
153 0 : seq_puts(m, "# Available triggers:\n");
154 0 : seq_putc(m, '#');
155 0 : mutex_lock(&trigger_cmd_mutex);
156 0 : list_for_each_entry_reverse(p, &trigger_commands, list)
157 0 : seq_printf(m, " %s", p->name);
158 0 : seq_putc(m, '\n');
159 0 : mutex_unlock(&trigger_cmd_mutex);
160 0 : return 0;
161 : }
162 :
163 0 : data = list_entry(v, struct event_trigger_data, list);
164 0 : data->ops->print(m, data->ops, data);
165 :
166 0 : return 0;
167 : }
168 :
169 : static const struct seq_operations event_triggers_seq_ops = {
170 : .start = trigger_start,
171 : .next = trigger_next,
172 : .stop = trigger_stop,
173 : .show = trigger_show,
174 : };
175 :
176 0 : static int event_trigger_regex_open(struct inode *inode, struct file *file)
177 : {
178 0 : int ret;
179 :
180 0 : ret = security_locked_down(LOCKDOWN_TRACEFS);
181 0 : if (ret)
182 : return ret;
183 :
184 0 : mutex_lock(&event_mutex);
185 :
186 0 : if (unlikely(!event_file_data(file))) {
187 0 : mutex_unlock(&event_mutex);
188 0 : return -ENODEV;
189 : }
190 :
191 0 : if ((file->f_mode & FMODE_WRITE) &&
192 : (file->f_flags & O_TRUNC)) {
193 0 : struct trace_event_file *event_file;
194 0 : struct event_command *p;
195 :
196 0 : event_file = event_file_data(file);
197 :
198 0 : list_for_each_entry(p, &trigger_commands, list) {
199 0 : if (p->unreg_all)
200 0 : p->unreg_all(event_file);
201 : }
202 : }
203 :
204 0 : if (file->f_mode & FMODE_READ) {
205 0 : ret = seq_open(file, &event_triggers_seq_ops);
206 0 : if (!ret) {
207 0 : struct seq_file *m = file->private_data;
208 0 : m->private = file;
209 : }
210 : }
211 :
212 0 : mutex_unlock(&event_mutex);
213 :
214 0 : return ret;
215 : }
216 :
217 0 : int trigger_process_regex(struct trace_event_file *file, char *buff)
218 : {
219 0 : char *command, *next;
220 0 : struct event_command *p;
221 0 : int ret = -EINVAL;
222 :
223 0 : next = buff = skip_spaces(buff);
224 0 : command = strsep(&next, ": \t");
225 0 : if (next) {
226 0 : next = skip_spaces(next);
227 0 : if (!*next)
228 0 : next = NULL;
229 : }
230 0 : command = (command[0] != '!') ? command : command + 1;
231 :
232 0 : mutex_lock(&trigger_cmd_mutex);
233 0 : list_for_each_entry(p, &trigger_commands, list) {
234 0 : if (strcmp(p->name, command) == 0) {
235 0 : ret = p->func(p, file, buff, command, next);
236 0 : goto out_unlock;
237 : }
238 : }
239 0 : out_unlock:
240 0 : mutex_unlock(&trigger_cmd_mutex);
241 :
242 0 : return ret;
243 : }
244 :
245 0 : static ssize_t event_trigger_regex_write(struct file *file,
246 : const char __user *ubuf,
247 : size_t cnt, loff_t *ppos)
248 : {
249 0 : struct trace_event_file *event_file;
250 0 : ssize_t ret;
251 0 : char *buf;
252 :
253 0 : if (!cnt)
254 : return 0;
255 :
256 0 : if (cnt >= PAGE_SIZE)
257 : return -EINVAL;
258 :
259 0 : buf = memdup_user_nul(ubuf, cnt);
260 0 : if (IS_ERR(buf))
261 0 : return PTR_ERR(buf);
262 :
263 0 : strim(buf);
264 :
265 0 : mutex_lock(&event_mutex);
266 0 : event_file = event_file_data(file);
267 0 : if (unlikely(!event_file)) {
268 0 : mutex_unlock(&event_mutex);
269 0 : kfree(buf);
270 0 : return -ENODEV;
271 : }
272 0 : ret = trigger_process_regex(event_file, buf);
273 0 : mutex_unlock(&event_mutex);
274 :
275 0 : kfree(buf);
276 0 : if (ret < 0)
277 0 : goto out;
278 :
279 0 : *ppos += cnt;
280 0 : ret = cnt;
281 : out:
282 : return ret;
283 : }
284 :
285 0 : static int event_trigger_regex_release(struct inode *inode, struct file *file)
286 : {
287 0 : mutex_lock(&event_mutex);
288 :
289 0 : if (file->f_mode & FMODE_READ)
290 0 : seq_release(inode, file);
291 :
292 0 : mutex_unlock(&event_mutex);
293 :
294 0 : return 0;
295 : }
296 :
297 : static ssize_t
298 0 : event_trigger_write(struct file *filp, const char __user *ubuf,
299 : size_t cnt, loff_t *ppos)
300 : {
301 0 : return event_trigger_regex_write(filp, ubuf, cnt, ppos);
302 : }
303 :
304 : static int
305 0 : event_trigger_open(struct inode *inode, struct file *filp)
306 : {
307 : /* Checks for tracefs lockdown */
308 0 : return event_trigger_regex_open(inode, filp);
309 : }
310 :
311 : static int
312 0 : event_trigger_release(struct inode *inode, struct file *file)
313 : {
314 0 : return event_trigger_regex_release(inode, file);
315 : }
316 :
317 : const struct file_operations event_trigger_fops = {
318 : .open = event_trigger_open,
319 : .read = seq_read,
320 : .write = event_trigger_write,
321 : .llseek = tracing_lseek,
322 : .release = event_trigger_release,
323 : };
324 :
325 : /*
326 : * Currently we only register event commands from __init, so mark this
327 : * __init too.
328 : */
329 5 : __init int register_event_command(struct event_command *cmd)
330 : {
331 5 : struct event_command *p;
332 5 : int ret = 0;
333 :
334 5 : mutex_lock(&trigger_cmd_mutex);
335 15 : list_for_each_entry(p, &trigger_commands, list) {
336 10 : if (strcmp(cmd->name, p->name) == 0) {
337 0 : ret = -EBUSY;
338 0 : goto out_unlock;
339 : }
340 : }
341 5 : list_add(&cmd->list, &trigger_commands);
342 5 : out_unlock:
343 5 : mutex_unlock(&trigger_cmd_mutex);
344 :
345 5 : return ret;
346 : }
347 :
348 : /*
349 : * Currently we only unregister event commands from __init, so mark
350 : * this __init too.
351 : */
352 0 : __init int unregister_event_command(struct event_command *cmd)
353 : {
354 0 : struct event_command *p, *n;
355 0 : int ret = -ENODEV;
356 :
357 0 : mutex_lock(&trigger_cmd_mutex);
358 0 : list_for_each_entry_safe(p, n, &trigger_commands, list) {
359 0 : if (strcmp(cmd->name, p->name) == 0) {
360 0 : ret = 0;
361 0 : list_del_init(&p->list);
362 0 : goto out_unlock;
363 : }
364 : }
365 0 : out_unlock:
366 0 : mutex_unlock(&trigger_cmd_mutex);
367 :
368 0 : return ret;
369 : }
370 :
371 : /**
372 : * event_trigger_print - Generic event_trigger_ops @print implementation
373 : * @name: The name of the event trigger
374 : * @m: The seq_file being printed to
375 : * @data: Trigger-specific data
376 : * @filter_str: filter_str to print, if present
377 : *
378 : * Common implementation for event triggers to print themselves.
379 : *
380 : * Usually wrapped by a function that simply sets the @name of the
381 : * trigger command and then invokes this.
382 : *
383 : * Return: 0 on success, errno otherwise
384 : */
385 : static int
386 0 : event_trigger_print(const char *name, struct seq_file *m,
387 : void *data, char *filter_str)
388 : {
389 0 : long count = (long)data;
390 :
391 0 : seq_puts(m, name);
392 :
393 0 : if (count == -1)
394 0 : seq_puts(m, ":unlimited");
395 : else
396 0 : seq_printf(m, ":count=%ld", count);
397 :
398 0 : if (filter_str)
399 0 : seq_printf(m, " if %s\n", filter_str);
400 : else
401 0 : seq_putc(m, '\n');
402 :
403 0 : return 0;
404 : }
405 :
406 : /**
407 : * event_trigger_init - Generic event_trigger_ops @init implementation
408 : * @ops: The trigger ops associated with the trigger
409 : * @data: Trigger-specific data
410 : *
411 : * Common implementation of event trigger initialization.
412 : *
413 : * Usually used directly as the @init method in event trigger
414 : * implementations.
415 : *
416 : * Return: 0 on success, errno otherwise
417 : */
418 0 : int event_trigger_init(struct event_trigger_ops *ops,
419 : struct event_trigger_data *data)
420 : {
421 0 : data->ref++;
422 0 : return 0;
423 : }
424 :
425 : /**
426 : * event_trigger_free - Generic event_trigger_ops @free implementation
427 : * @ops: The trigger ops associated with the trigger
428 : * @data: Trigger-specific data
429 : *
430 : * Common implementation of event trigger de-initialization.
431 : *
432 : * Usually used directly as the @free method in event trigger
433 : * implementations.
434 : */
435 : static void
436 0 : event_trigger_free(struct event_trigger_ops *ops,
437 : struct event_trigger_data *data)
438 : {
439 0 : if (WARN_ON_ONCE(data->ref <= 0))
440 : return;
441 :
442 0 : data->ref--;
443 0 : if (!data->ref)
444 0 : trigger_data_free(data);
445 : }
446 :
447 0 : int trace_event_trigger_enable_disable(struct trace_event_file *file,
448 : int trigger_enable)
449 : {
450 0 : int ret = 0;
451 :
452 0 : if (trigger_enable) {
453 0 : if (atomic_inc_return(&file->tm_ref) > 1)
454 : return ret;
455 0 : set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
456 0 : ret = trace_event_enable_disable(file, 1, 1);
457 : } else {
458 0 : if (atomic_dec_return(&file->tm_ref) > 0)
459 : return ret;
460 0 : clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
461 0 : ret = trace_event_enable_disable(file, 0, 1);
462 : }
463 :
464 : return ret;
465 : }
466 :
467 : /**
468 : * clear_event_triggers - Clear all triggers associated with a trace array
469 : * @tr: The trace array to clear
470 : *
471 : * For each trigger, the triggering event has its tm_ref decremented
472 : * via trace_event_trigger_enable_disable(), and any associated event
473 : * (in the case of enable/disable_event triggers) will have its sm_ref
474 : * decremented via free()->trace_event_enable_disable(). That
475 : * combination effectively reverses the soft-mode/trigger state added
476 : * by trigger registration.
477 : *
478 : * Must be called with event_mutex held.
479 : */
480 : void
481 0 : clear_event_triggers(struct trace_array *tr)
482 : {
483 0 : struct trace_event_file *file;
484 :
485 0 : list_for_each_entry(file, &tr->events, list) {
486 0 : struct event_trigger_data *data, *n;
487 0 : list_for_each_entry_safe(data, n, &file->triggers, list) {
488 0 : trace_event_trigger_enable_disable(file, 0);
489 0 : list_del_rcu(&data->list);
490 0 : if (data->ops->free)
491 0 : data->ops->free(data->ops, data);
492 : }
493 : }
494 0 : }
495 :
496 : /**
497 : * update_cond_flag - Set or reset the TRIGGER_COND bit
498 : * @file: The trace_event_file associated with the event
499 : *
500 : * If an event has triggers and any of those triggers has a filter or
501 : * a post_trigger, trigger invocation needs to be deferred until after
502 : * the current event has logged its data, and the event should have
503 : * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
504 : * cleared.
505 : */
506 0 : void update_cond_flag(struct trace_event_file *file)
507 : {
508 0 : struct event_trigger_data *data;
509 0 : bool set_cond = false;
510 :
511 0 : lockdep_assert_held(&event_mutex);
512 :
513 0 : list_for_each_entry(data, &file->triggers, list) {
514 0 : if (data->filter || event_command_post_trigger(data->cmd_ops) ||
515 0 : event_command_needs_rec(data->cmd_ops)) {
516 0 : set_cond = true;
517 0 : break;
518 : }
519 : }
520 :
521 0 : if (set_cond)
522 0 : set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
523 : else
524 0 : clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
525 0 : }
526 :
527 : /**
528 : * register_trigger - Generic event_command @reg implementation
529 : * @glob: The raw string used to register the trigger
530 : * @ops: The trigger ops associated with the trigger
531 : * @data: Trigger-specific data to associate with the trigger
532 : * @file: The trace_event_file associated with the event
533 : *
534 : * Common implementation for event trigger registration.
535 : *
536 : * Usually used directly as the @reg method in event command
537 : * implementations.
538 : *
539 : * Return: 0 on success, errno otherwise
540 : */
541 0 : static int register_trigger(char *glob, struct event_trigger_ops *ops,
542 : struct event_trigger_data *data,
543 : struct trace_event_file *file)
544 : {
545 0 : struct event_trigger_data *test;
546 0 : int ret = 0;
547 :
548 0 : lockdep_assert_held(&event_mutex);
549 :
550 0 : list_for_each_entry(test, &file->triggers, list) {
551 0 : if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
552 0 : ret = -EEXIST;
553 0 : goto out;
554 : }
555 : }
556 :
557 0 : if (data->ops->init) {
558 0 : ret = data->ops->init(data->ops, data);
559 0 : if (ret < 0)
560 0 : goto out;
561 : }
562 :
563 0 : list_add_rcu(&data->list, &file->triggers);
564 0 : ret++;
565 :
566 0 : update_cond_flag(file);
567 0 : if (trace_event_trigger_enable_disable(file, 1) < 0) {
568 0 : list_del_rcu(&data->list);
569 0 : update_cond_flag(file);
570 0 : ret--;
571 : }
572 0 : out:
573 0 : return ret;
574 : }
575 :
576 : /**
577 : * unregister_trigger - Generic event_command @unreg implementation
578 : * @glob: The raw string used to register the trigger
579 : * @ops: The trigger ops associated with the trigger
580 : * @test: Trigger-specific data used to find the trigger to remove
581 : * @file: The trace_event_file associated with the event
582 : *
583 : * Common implementation for event trigger unregistration.
584 : *
585 : * Usually used directly as the @unreg method in event command
586 : * implementations.
587 : */
588 0 : static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
589 : struct event_trigger_data *test,
590 : struct trace_event_file *file)
591 : {
592 0 : struct event_trigger_data *data;
593 0 : bool unregistered = false;
594 :
595 0 : lockdep_assert_held(&event_mutex);
596 :
597 0 : list_for_each_entry(data, &file->triggers, list) {
598 0 : if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
599 0 : unregistered = true;
600 0 : list_del_rcu(&data->list);
601 0 : trace_event_trigger_enable_disable(file, 0);
602 0 : update_cond_flag(file);
603 0 : break;
604 : }
605 : }
606 :
607 0 : if (unregistered && data->ops->free)
608 0 : data->ops->free(data->ops, data);
609 0 : }
610 :
611 : /**
612 : * event_trigger_callback - Generic event_command @func implementation
613 : * @cmd_ops: The command ops, used for trigger registration
614 : * @file: The trace_event_file associated with the event
615 : * @glob: The raw string used to register the trigger
616 : * @cmd: The cmd portion of the string used to register the trigger
617 : * @param: The params portion of the string used to register the trigger
618 : *
619 : * Common implementation for event command parsing and trigger
620 : * instantiation.
621 : *
622 : * Usually used directly as the @func method in event command
623 : * implementations.
624 : *
625 : * Return: 0 on success, errno otherwise
626 : */
627 : static int
628 0 : event_trigger_callback(struct event_command *cmd_ops,
629 : struct trace_event_file *file,
630 : char *glob, char *cmd, char *param)
631 : {
632 0 : struct event_trigger_data *trigger_data;
633 0 : struct event_trigger_ops *trigger_ops;
634 0 : char *trigger = NULL;
635 0 : char *number;
636 0 : int ret;
637 :
638 : /* separate the trigger from the filter (t:n [if filter]) */
639 0 : if (param && isdigit(param[0])) {
640 0 : trigger = strsep(¶m, " \t");
641 0 : if (param) {
642 0 : param = skip_spaces(param);
643 0 : if (!*param)
644 0 : param = NULL;
645 : }
646 : }
647 :
648 0 : trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
649 :
650 0 : ret = -ENOMEM;
651 0 : trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
652 0 : if (!trigger_data)
653 0 : goto out;
654 :
655 0 : trigger_data->count = -1;
656 0 : trigger_data->ops = trigger_ops;
657 0 : trigger_data->cmd_ops = cmd_ops;
658 0 : trigger_data->private_data = file;
659 0 : INIT_LIST_HEAD(&trigger_data->list);
660 0 : INIT_LIST_HEAD(&trigger_data->named_list);
661 :
662 0 : if (glob[0] == '!') {
663 0 : cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
664 0 : kfree(trigger_data);
665 0 : ret = 0;
666 0 : goto out;
667 : }
668 :
669 0 : if (trigger) {
670 0 : number = strsep(&trigger, ":");
671 :
672 0 : ret = -EINVAL;
673 0 : if (!strlen(number))
674 0 : goto out_free;
675 :
676 : /*
677 : * We use the callback data field (which is a pointer)
678 : * as our counter.
679 : */
680 0 : ret = kstrtoul(number, 0, &trigger_data->count);
681 0 : if (ret)
682 0 : goto out_free;
683 : }
684 :
685 0 : if (!param) /* if param is non-empty, it's supposed to be a filter */
686 0 : goto out_reg;
687 :
688 0 : if (!cmd_ops->set_filter)
689 0 : goto out_reg;
690 :
691 0 : ret = cmd_ops->set_filter(param, trigger_data, file);
692 0 : if (ret < 0)
693 0 : goto out_free;
694 :
695 0 : out_reg:
696 : /* Up the trigger_data count to make sure reg doesn't free it on failure */
697 0 : event_trigger_init(trigger_ops, trigger_data);
698 0 : ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
699 : /*
700 : * The above returns on success the # of functions enabled,
701 : * but if it didn't find any functions it returns zero.
702 : * Consider no functions a failure too.
703 : */
704 0 : if (!ret) {
705 0 : cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
706 0 : ret = -ENOENT;
707 0 : } else if (ret > 0)
708 : ret = 0;
709 :
710 : /* Down the counter of trigger_data or free it if not used anymore */
711 0 : event_trigger_free(trigger_ops, trigger_data);
712 0 : out:
713 0 : return ret;
714 :
715 0 : out_free:
716 0 : if (cmd_ops->set_filter)
717 0 : cmd_ops->set_filter(NULL, trigger_data, NULL);
718 0 : kfree(trigger_data);
719 0 : goto out;
720 : }
721 :
722 : /**
723 : * set_trigger_filter - Generic event_command @set_filter implementation
724 : * @filter_str: The filter string for the trigger, NULL to remove filter
725 : * @trigger_data: Trigger-specific data
726 : * @file: The trace_event_file associated with the event
727 : *
728 : * Common implementation for event command filter parsing and filter
729 : * instantiation.
730 : *
731 : * Usually used directly as the @set_filter method in event command
732 : * implementations.
733 : *
734 : * Also used to remove a filter (if filter_str = NULL).
735 : *
736 : * Return: 0 on success, errno otherwise
737 : */
738 0 : int set_trigger_filter(char *filter_str,
739 : struct event_trigger_data *trigger_data,
740 : struct trace_event_file *file)
741 : {
742 0 : struct event_trigger_data *data = trigger_data;
743 0 : struct event_filter *filter = NULL, *tmp;
744 0 : int ret = -EINVAL;
745 0 : char *s;
746 :
747 0 : if (!filter_str) /* clear the current filter */
748 0 : goto assign;
749 :
750 0 : s = strsep(&filter_str, " \t");
751 :
752 0 : if (!strlen(s) || strcmp(s, "if") != 0)
753 0 : goto out;
754 :
755 0 : if (!filter_str)
756 0 : goto out;
757 :
758 : /* The filter is for the 'trigger' event, not the triggered event */
759 0 : ret = create_event_filter(file->tr, file->event_call,
760 : filter_str, false, &filter);
761 : /*
762 : * If create_event_filter() fails, filter still needs to be freed.
763 : * Which the calling code will do with data->filter.
764 : */
765 0 : assign:
766 0 : tmp = rcu_access_pointer(data->filter);
767 :
768 0 : rcu_assign_pointer(data->filter, filter);
769 :
770 0 : if (tmp) {
771 : /* Make sure the call is done with the filter */
772 0 : tracepoint_synchronize_unregister();
773 0 : free_event_filter(tmp);
774 : }
775 :
776 0 : kfree(data->filter_str);
777 0 : data->filter_str = NULL;
778 :
779 0 : if (filter_str) {
780 0 : data->filter_str = kstrdup(filter_str, GFP_KERNEL);
781 0 : if (!data->filter_str) {
782 0 : free_event_filter(rcu_access_pointer(data->filter));
783 0 : data->filter = NULL;
784 0 : ret = -ENOMEM;
785 : }
786 : }
787 0 : out:
788 0 : return ret;
789 : }
790 :
791 : static LIST_HEAD(named_triggers);
792 :
793 : /**
794 : * find_named_trigger - Find the common named trigger associated with @name
795 : * @name: The name of the set of named triggers to find the common data for
796 : *
797 : * Named triggers are sets of triggers that share a common set of
798 : * trigger data. The first named trigger registered with a given name
799 : * owns the common trigger data that the others subsequently
800 : * registered with the same name will reference. This function
801 : * returns the common trigger data associated with that first
802 : * registered instance.
803 : *
804 : * Return: the common trigger data for the given named trigger on
805 : * success, NULL otherwise.
806 : */
807 0 : struct event_trigger_data *find_named_trigger(const char *name)
808 : {
809 0 : struct event_trigger_data *data;
810 :
811 0 : if (!name)
812 : return NULL;
813 :
814 0 : list_for_each_entry(data, &named_triggers, named_list) {
815 0 : if (data->named_data)
816 0 : continue;
817 0 : if (strcmp(data->name, name) == 0)
818 0 : return data;
819 : }
820 :
821 : return NULL;
822 : }
823 :
824 : /**
825 : * is_named_trigger - determine if a given trigger is a named trigger
826 : * @test: The trigger data to test
827 : *
828 : * Return: true if 'test' is a named trigger, false otherwise.
829 : */
830 0 : bool is_named_trigger(struct event_trigger_data *test)
831 : {
832 0 : struct event_trigger_data *data;
833 :
834 0 : list_for_each_entry(data, &named_triggers, named_list) {
835 0 : if (test == data)
836 : return true;
837 : }
838 :
839 : return false;
840 : }
841 :
842 : /**
843 : * save_named_trigger - save the trigger in the named trigger list
844 : * @name: The name of the named trigger set
845 : * @data: The trigger data to save
846 : *
847 : * Return: 0 if successful, negative error otherwise.
848 : */
849 0 : int save_named_trigger(const char *name, struct event_trigger_data *data)
850 : {
851 0 : data->name = kstrdup(name, GFP_KERNEL);
852 0 : if (!data->name)
853 : return -ENOMEM;
854 :
855 0 : list_add(&data->named_list, &named_triggers);
856 :
857 0 : return 0;
858 : }
859 :
860 : /**
861 : * del_named_trigger - delete a trigger from the named trigger list
862 : * @data: The trigger data to delete
863 : */
864 0 : void del_named_trigger(struct event_trigger_data *data)
865 : {
866 0 : kfree(data->name);
867 0 : data->name = NULL;
868 :
869 0 : list_del(&data->named_list);
870 0 : }
871 :
872 0 : static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
873 : {
874 0 : struct event_trigger_data *test;
875 :
876 0 : list_for_each_entry(test, &named_triggers, named_list) {
877 0 : if (strcmp(test->name, data->name) == 0) {
878 0 : if (pause) {
879 0 : test->paused_tmp = test->paused;
880 0 : test->paused = true;
881 : } else {
882 0 : test->paused = test->paused_tmp;
883 : }
884 : }
885 : }
886 0 : }
887 :
888 : /**
889 : * pause_named_trigger - Pause all named triggers with the same name
890 : * @data: The trigger data of a named trigger to pause
891 : *
892 : * Pauses a named trigger along with all other triggers having the
893 : * same name. Because named triggers share a common set of data,
894 : * pausing only one is meaningless, so pausing one named trigger needs
895 : * to pause all triggers with the same name.
896 : */
897 0 : void pause_named_trigger(struct event_trigger_data *data)
898 : {
899 0 : __pause_named_trigger(data, true);
900 0 : }
901 :
902 : /**
903 : * unpause_named_trigger - Un-pause all named triggers with the same name
904 : * @data: The trigger data of a named trigger to unpause
905 : *
906 : * Un-pauses a named trigger along with all other triggers having the
907 : * same name. Because named triggers share a common set of data,
908 : * unpausing only one is meaningless, so unpausing one named trigger
909 : * needs to unpause all triggers with the same name.
910 : */
911 0 : void unpause_named_trigger(struct event_trigger_data *data)
912 : {
913 0 : __pause_named_trigger(data, false);
914 0 : }
915 :
916 : /**
917 : * set_named_trigger_data - Associate common named trigger data
918 : * @data: The trigger data of a named trigger to unpause
919 : *
920 : * Named triggers are sets of triggers that share a common set of
921 : * trigger data. The first named trigger registered with a given name
922 : * owns the common trigger data that the others subsequently
923 : * registered with the same name will reference. This function
924 : * associates the common trigger data from the first trigger with the
925 : * given trigger.
926 : */
927 0 : void set_named_trigger_data(struct event_trigger_data *data,
928 : struct event_trigger_data *named_data)
929 : {
930 0 : data->named_data = named_data;
931 0 : }
932 :
933 : struct event_trigger_data *
934 0 : get_named_trigger_data(struct event_trigger_data *data)
935 : {
936 0 : return data->named_data;
937 : }
938 :
939 : static void
940 0 : traceon_trigger(struct event_trigger_data *data, void *rec,
941 : struct ring_buffer_event *event)
942 : {
943 0 : if (tracing_is_on())
944 : return;
945 :
946 0 : tracing_on();
947 : }
948 :
949 : static void
950 0 : traceon_count_trigger(struct event_trigger_data *data, void *rec,
951 : struct ring_buffer_event *event)
952 : {
953 0 : if (tracing_is_on())
954 : return;
955 :
956 0 : if (!data->count)
957 : return;
958 :
959 0 : if (data->count != -1)
960 0 : (data->count)--;
961 :
962 0 : tracing_on();
963 : }
964 :
965 : static void
966 0 : traceoff_trigger(struct event_trigger_data *data, void *rec,
967 : struct ring_buffer_event *event)
968 : {
969 0 : if (!tracing_is_on())
970 : return;
971 :
972 0 : tracing_off();
973 : }
974 :
975 : static void
976 0 : traceoff_count_trigger(struct event_trigger_data *data, void *rec,
977 : struct ring_buffer_event *event)
978 : {
979 0 : if (!tracing_is_on())
980 : return;
981 :
982 0 : if (!data->count)
983 : return;
984 :
985 0 : if (data->count != -1)
986 0 : (data->count)--;
987 :
988 0 : tracing_off();
989 : }
990 :
991 : static int
992 0 : traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
993 : struct event_trigger_data *data)
994 : {
995 0 : return event_trigger_print("traceon", m, (void *)data->count,
996 : data->filter_str);
997 : }
998 :
999 : static int
1000 0 : traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1001 : struct event_trigger_data *data)
1002 : {
1003 0 : return event_trigger_print("traceoff", m, (void *)data->count,
1004 : data->filter_str);
1005 : }
1006 :
1007 : static struct event_trigger_ops traceon_trigger_ops = {
1008 : .func = traceon_trigger,
1009 : .print = traceon_trigger_print,
1010 : .init = event_trigger_init,
1011 : .free = event_trigger_free,
1012 : };
1013 :
1014 : static struct event_trigger_ops traceon_count_trigger_ops = {
1015 : .func = traceon_count_trigger,
1016 : .print = traceon_trigger_print,
1017 : .init = event_trigger_init,
1018 : .free = event_trigger_free,
1019 : };
1020 :
1021 : static struct event_trigger_ops traceoff_trigger_ops = {
1022 : .func = traceoff_trigger,
1023 : .print = traceoff_trigger_print,
1024 : .init = event_trigger_init,
1025 : .free = event_trigger_free,
1026 : };
1027 :
1028 : static struct event_trigger_ops traceoff_count_trigger_ops = {
1029 : .func = traceoff_count_trigger,
1030 : .print = traceoff_trigger_print,
1031 : .init = event_trigger_init,
1032 : .free = event_trigger_free,
1033 : };
1034 :
1035 : static struct event_trigger_ops *
1036 0 : onoff_get_trigger_ops(char *cmd, char *param)
1037 : {
1038 0 : struct event_trigger_ops *ops;
1039 :
1040 : /* we register both traceon and traceoff to this callback */
1041 0 : if (strcmp(cmd, "traceon") == 0)
1042 0 : ops = param ? &traceon_count_trigger_ops :
1043 : &traceon_trigger_ops;
1044 : else
1045 0 : ops = param ? &traceoff_count_trigger_ops :
1046 : &traceoff_trigger_ops;
1047 :
1048 0 : return ops;
1049 : }
1050 :
1051 : static struct event_command trigger_traceon_cmd = {
1052 : .name = "traceon",
1053 : .trigger_type = ETT_TRACE_ONOFF,
1054 : .func = event_trigger_callback,
1055 : .reg = register_trigger,
1056 : .unreg = unregister_trigger,
1057 : .get_trigger_ops = onoff_get_trigger_ops,
1058 : .set_filter = set_trigger_filter,
1059 : };
1060 :
1061 : static struct event_command trigger_traceoff_cmd = {
1062 : .name = "traceoff",
1063 : .trigger_type = ETT_TRACE_ONOFF,
1064 : .flags = EVENT_CMD_FL_POST_TRIGGER,
1065 : .func = event_trigger_callback,
1066 : .reg = register_trigger,
1067 : .unreg = unregister_trigger,
1068 : .get_trigger_ops = onoff_get_trigger_ops,
1069 : .set_filter = set_trigger_filter,
1070 : };
1071 :
1072 : #ifdef CONFIG_TRACER_SNAPSHOT
1073 : static void
1074 : snapshot_trigger(struct event_trigger_data *data, void *rec,
1075 : struct ring_buffer_event *event)
1076 : {
1077 : struct trace_event_file *file = data->private_data;
1078 :
1079 : if (file)
1080 : tracing_snapshot_instance(file->tr);
1081 : else
1082 : tracing_snapshot();
1083 : }
1084 :
1085 : static void
1086 : snapshot_count_trigger(struct event_trigger_data *data, void *rec,
1087 : struct ring_buffer_event *event)
1088 : {
1089 : if (!data->count)
1090 : return;
1091 :
1092 : if (data->count != -1)
1093 : (data->count)--;
1094 :
1095 : snapshot_trigger(data, rec, event);
1096 : }
1097 :
1098 : static int
1099 : register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1100 : struct event_trigger_data *data,
1101 : struct trace_event_file *file)
1102 : {
1103 : if (tracing_alloc_snapshot_instance(file->tr) != 0)
1104 : return 0;
1105 :
1106 : return register_trigger(glob, ops, data, file);
1107 : }
1108 :
1109 : static int
1110 : snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1111 : struct event_trigger_data *data)
1112 : {
1113 : return event_trigger_print("snapshot", m, (void *)data->count,
1114 : data->filter_str);
1115 : }
1116 :
1117 : static struct event_trigger_ops snapshot_trigger_ops = {
1118 : .func = snapshot_trigger,
1119 : .print = snapshot_trigger_print,
1120 : .init = event_trigger_init,
1121 : .free = event_trigger_free,
1122 : };
1123 :
1124 : static struct event_trigger_ops snapshot_count_trigger_ops = {
1125 : .func = snapshot_count_trigger,
1126 : .print = snapshot_trigger_print,
1127 : .init = event_trigger_init,
1128 : .free = event_trigger_free,
1129 : };
1130 :
1131 : static struct event_trigger_ops *
1132 : snapshot_get_trigger_ops(char *cmd, char *param)
1133 : {
1134 : return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1135 : }
1136 :
1137 : static struct event_command trigger_snapshot_cmd = {
1138 : .name = "snapshot",
1139 : .trigger_type = ETT_SNAPSHOT,
1140 : .func = event_trigger_callback,
1141 : .reg = register_snapshot_trigger,
1142 : .unreg = unregister_trigger,
1143 : .get_trigger_ops = snapshot_get_trigger_ops,
1144 : .set_filter = set_trigger_filter,
1145 : };
1146 :
1147 : static __init int register_trigger_snapshot_cmd(void)
1148 : {
1149 : int ret;
1150 :
1151 : ret = register_event_command(&trigger_snapshot_cmd);
1152 : WARN_ON(ret < 0);
1153 :
1154 : return ret;
1155 : }
1156 : #else
1157 1 : static __init int register_trigger_snapshot_cmd(void) { return 0; }
1158 : #endif /* CONFIG_TRACER_SNAPSHOT */
1159 :
1160 : #ifdef CONFIG_STACKTRACE
1161 : #ifdef CONFIG_UNWINDER_ORC
1162 : /* Skip 2:
1163 : * event_triggers_post_call()
1164 : * trace_event_raw_event_xxx()
1165 : */
1166 : # define STACK_SKIP 2
1167 : #else
1168 : /*
1169 : * Skip 4:
1170 : * stacktrace_trigger()
1171 : * event_triggers_post_call()
1172 : * trace_event_buffer_commit()
1173 : * trace_event_raw_event_xxx()
1174 : */
1175 : #define STACK_SKIP 4
1176 : #endif
1177 :
1178 : static void
1179 0 : stacktrace_trigger(struct event_trigger_data *data, void *rec,
1180 : struct ring_buffer_event *event)
1181 : {
1182 0 : trace_dump_stack(STACK_SKIP);
1183 0 : }
1184 :
1185 : static void
1186 0 : stacktrace_count_trigger(struct event_trigger_data *data, void *rec,
1187 : struct ring_buffer_event *event)
1188 : {
1189 0 : if (!data->count)
1190 : return;
1191 :
1192 0 : if (data->count != -1)
1193 0 : (data->count)--;
1194 :
1195 0 : stacktrace_trigger(data, rec, event);
1196 : }
1197 :
1198 : static int
1199 0 : stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1200 : struct event_trigger_data *data)
1201 : {
1202 0 : return event_trigger_print("stacktrace", m, (void *)data->count,
1203 : data->filter_str);
1204 : }
1205 :
1206 : static struct event_trigger_ops stacktrace_trigger_ops = {
1207 : .func = stacktrace_trigger,
1208 : .print = stacktrace_trigger_print,
1209 : .init = event_trigger_init,
1210 : .free = event_trigger_free,
1211 : };
1212 :
1213 : static struct event_trigger_ops stacktrace_count_trigger_ops = {
1214 : .func = stacktrace_count_trigger,
1215 : .print = stacktrace_trigger_print,
1216 : .init = event_trigger_init,
1217 : .free = event_trigger_free,
1218 : };
1219 :
1220 : static struct event_trigger_ops *
1221 0 : stacktrace_get_trigger_ops(char *cmd, char *param)
1222 : {
1223 0 : return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1224 : }
1225 :
1226 : static struct event_command trigger_stacktrace_cmd = {
1227 : .name = "stacktrace",
1228 : .trigger_type = ETT_STACKTRACE,
1229 : .flags = EVENT_CMD_FL_POST_TRIGGER,
1230 : .func = event_trigger_callback,
1231 : .reg = register_trigger,
1232 : .unreg = unregister_trigger,
1233 : .get_trigger_ops = stacktrace_get_trigger_ops,
1234 : .set_filter = set_trigger_filter,
1235 : };
1236 :
1237 1 : static __init int register_trigger_stacktrace_cmd(void)
1238 : {
1239 1 : int ret;
1240 :
1241 1 : ret = register_event_command(&trigger_stacktrace_cmd);
1242 1 : WARN_ON(ret < 0);
1243 :
1244 1 : return ret;
1245 : }
1246 : #else
1247 : static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1248 : #endif /* CONFIG_STACKTRACE */
1249 :
1250 0 : static __init void unregister_trigger_traceon_traceoff_cmds(void)
1251 : {
1252 0 : unregister_event_command(&trigger_traceon_cmd);
1253 0 : unregister_event_command(&trigger_traceoff_cmd);
1254 0 : }
1255 :
1256 : static void
1257 0 : event_enable_trigger(struct event_trigger_data *data, void *rec,
1258 : struct ring_buffer_event *event)
1259 : {
1260 0 : struct enable_trigger_data *enable_data = data->private_data;
1261 :
1262 0 : if (enable_data->enable)
1263 0 : clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1264 : else
1265 0 : set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1266 0 : }
1267 :
1268 : static void
1269 0 : event_enable_count_trigger(struct event_trigger_data *data, void *rec,
1270 : struct ring_buffer_event *event)
1271 : {
1272 0 : struct enable_trigger_data *enable_data = data->private_data;
1273 :
1274 0 : if (!data->count)
1275 : return;
1276 :
1277 : /* Skip if the event is in a state we want to switch to */
1278 0 : if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1279 : return;
1280 :
1281 0 : if (data->count != -1)
1282 0 : (data->count)--;
1283 :
1284 0 : event_enable_trigger(data, rec, event);
1285 : }
1286 :
1287 0 : int event_enable_trigger_print(struct seq_file *m,
1288 : struct event_trigger_ops *ops,
1289 : struct event_trigger_data *data)
1290 : {
1291 0 : struct enable_trigger_data *enable_data = data->private_data;
1292 :
1293 0 : seq_printf(m, "%s:%s:%s",
1294 0 : enable_data->hist ?
1295 0 : (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1296 0 : (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1297 0 : enable_data->file->event_call->class->system,
1298 0 : trace_event_name(enable_data->file->event_call));
1299 :
1300 0 : if (data->count == -1)
1301 0 : seq_puts(m, ":unlimited");
1302 : else
1303 0 : seq_printf(m, ":count=%ld", data->count);
1304 :
1305 0 : if (data->filter_str)
1306 0 : seq_printf(m, " if %s\n", data->filter_str);
1307 : else
1308 0 : seq_putc(m, '\n');
1309 :
1310 0 : return 0;
1311 : }
1312 :
1313 0 : void event_enable_trigger_free(struct event_trigger_ops *ops,
1314 : struct event_trigger_data *data)
1315 : {
1316 0 : struct enable_trigger_data *enable_data = data->private_data;
1317 :
1318 0 : if (WARN_ON_ONCE(data->ref <= 0))
1319 : return;
1320 :
1321 0 : data->ref--;
1322 0 : if (!data->ref) {
1323 : /* Remove the SOFT_MODE flag */
1324 0 : trace_event_enable_disable(enable_data->file, 0, 1);
1325 0 : module_put(enable_data->file->event_call->mod);
1326 0 : trigger_data_free(data);
1327 0 : kfree(enable_data);
1328 : }
1329 : }
1330 :
1331 : static struct event_trigger_ops event_enable_trigger_ops = {
1332 : .func = event_enable_trigger,
1333 : .print = event_enable_trigger_print,
1334 : .init = event_trigger_init,
1335 : .free = event_enable_trigger_free,
1336 : };
1337 :
1338 : static struct event_trigger_ops event_enable_count_trigger_ops = {
1339 : .func = event_enable_count_trigger,
1340 : .print = event_enable_trigger_print,
1341 : .init = event_trigger_init,
1342 : .free = event_enable_trigger_free,
1343 : };
1344 :
1345 : static struct event_trigger_ops event_disable_trigger_ops = {
1346 : .func = event_enable_trigger,
1347 : .print = event_enable_trigger_print,
1348 : .init = event_trigger_init,
1349 : .free = event_enable_trigger_free,
1350 : };
1351 :
1352 : static struct event_trigger_ops event_disable_count_trigger_ops = {
1353 : .func = event_enable_count_trigger,
1354 : .print = event_enable_trigger_print,
1355 : .init = event_trigger_init,
1356 : .free = event_enable_trigger_free,
1357 : };
1358 :
1359 0 : int event_enable_trigger_func(struct event_command *cmd_ops,
1360 : struct trace_event_file *file,
1361 : char *glob, char *cmd, char *param)
1362 : {
1363 0 : struct trace_event_file *event_enable_file;
1364 0 : struct enable_trigger_data *enable_data;
1365 0 : struct event_trigger_data *trigger_data;
1366 0 : struct event_trigger_ops *trigger_ops;
1367 0 : struct trace_array *tr = file->tr;
1368 0 : const char *system;
1369 0 : const char *event;
1370 0 : bool hist = false;
1371 0 : char *trigger;
1372 0 : char *number;
1373 0 : bool enable;
1374 0 : int ret;
1375 :
1376 0 : if (!param)
1377 : return -EINVAL;
1378 :
1379 : /* separate the trigger from the filter (s:e:n [if filter]) */
1380 0 : trigger = strsep(¶m, " \t");
1381 0 : if (!trigger)
1382 : return -EINVAL;
1383 0 : if (param) {
1384 0 : param = skip_spaces(param);
1385 0 : if (!*param)
1386 0 : param = NULL;
1387 : }
1388 :
1389 0 : system = strsep(&trigger, ":");
1390 0 : if (!trigger)
1391 : return -EINVAL;
1392 :
1393 0 : event = strsep(&trigger, ":");
1394 :
1395 0 : ret = -EINVAL;
1396 0 : event_enable_file = find_event_file(tr, system, event);
1397 0 : if (!event_enable_file)
1398 0 : goto out;
1399 :
1400 : #ifdef CONFIG_HIST_TRIGGERS
1401 : hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1402 : (strcmp(cmd, DISABLE_HIST_STR) == 0));
1403 :
1404 : enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1405 : (strcmp(cmd, ENABLE_HIST_STR) == 0));
1406 : #else
1407 0 : enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1408 : #endif
1409 0 : trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1410 :
1411 0 : ret = -ENOMEM;
1412 0 : trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1413 0 : if (!trigger_data)
1414 0 : goto out;
1415 :
1416 0 : enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1417 0 : if (!enable_data) {
1418 0 : kfree(trigger_data);
1419 0 : goto out;
1420 : }
1421 :
1422 0 : trigger_data->count = -1;
1423 0 : trigger_data->ops = trigger_ops;
1424 0 : trigger_data->cmd_ops = cmd_ops;
1425 0 : INIT_LIST_HEAD(&trigger_data->list);
1426 0 : RCU_INIT_POINTER(trigger_data->filter, NULL);
1427 :
1428 0 : enable_data->hist = hist;
1429 0 : enable_data->enable = enable;
1430 0 : enable_data->file = event_enable_file;
1431 0 : trigger_data->private_data = enable_data;
1432 :
1433 0 : if (glob[0] == '!') {
1434 0 : cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1435 0 : kfree(trigger_data);
1436 0 : kfree(enable_data);
1437 0 : ret = 0;
1438 0 : goto out;
1439 : }
1440 :
1441 : /* Up the trigger_data count to make sure nothing frees it on failure */
1442 0 : event_trigger_init(trigger_ops, trigger_data);
1443 :
1444 0 : if (trigger) {
1445 0 : number = strsep(&trigger, ":");
1446 :
1447 0 : ret = -EINVAL;
1448 0 : if (!strlen(number))
1449 0 : goto out_free;
1450 :
1451 : /*
1452 : * We use the callback data field (which is a pointer)
1453 : * as our counter.
1454 : */
1455 0 : ret = kstrtoul(number, 0, &trigger_data->count);
1456 0 : if (ret)
1457 0 : goto out_free;
1458 : }
1459 :
1460 0 : if (!param) /* if param is non-empty, it's supposed to be a filter */
1461 0 : goto out_reg;
1462 :
1463 0 : if (!cmd_ops->set_filter)
1464 0 : goto out_reg;
1465 :
1466 0 : ret = cmd_ops->set_filter(param, trigger_data, file);
1467 0 : if (ret < 0)
1468 0 : goto out_free;
1469 :
1470 0 : out_reg:
1471 : /* Don't let event modules unload while probe registered */
1472 0 : ret = try_module_get(event_enable_file->event_call->mod);
1473 0 : if (!ret) {
1474 : ret = -EBUSY;
1475 : goto out_free;
1476 : }
1477 :
1478 0 : ret = trace_event_enable_disable(event_enable_file, 1, 1);
1479 0 : if (ret < 0)
1480 0 : goto out_put;
1481 0 : ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1482 : /*
1483 : * The above returns on success the # of functions enabled,
1484 : * but if it didn't find any functions it returns zero.
1485 : * Consider no functions a failure too.
1486 : */
1487 0 : if (!ret) {
1488 0 : ret = -ENOENT;
1489 0 : goto out_disable;
1490 0 : } else if (ret < 0)
1491 0 : goto out_disable;
1492 : /* Just return zero, not the number of enabled functions */
1493 0 : ret = 0;
1494 0 : event_trigger_free(trigger_ops, trigger_data);
1495 : out:
1496 : return ret;
1497 :
1498 0 : out_disable:
1499 0 : trace_event_enable_disable(event_enable_file, 0, 1);
1500 0 : out_put:
1501 0 : module_put(event_enable_file->event_call->mod);
1502 0 : out_free:
1503 0 : if (cmd_ops->set_filter)
1504 0 : cmd_ops->set_filter(NULL, trigger_data, NULL);
1505 0 : event_trigger_free(trigger_ops, trigger_data);
1506 0 : kfree(enable_data);
1507 0 : goto out;
1508 : }
1509 :
1510 0 : int event_enable_register_trigger(char *glob,
1511 : struct event_trigger_ops *ops,
1512 : struct event_trigger_data *data,
1513 : struct trace_event_file *file)
1514 : {
1515 0 : struct enable_trigger_data *enable_data = data->private_data;
1516 0 : struct enable_trigger_data *test_enable_data;
1517 0 : struct event_trigger_data *test;
1518 0 : int ret = 0;
1519 :
1520 0 : lockdep_assert_held(&event_mutex);
1521 :
1522 0 : list_for_each_entry(test, &file->triggers, list) {
1523 0 : test_enable_data = test->private_data;
1524 0 : if (test_enable_data &&
1525 0 : (test->cmd_ops->trigger_type ==
1526 0 : data->cmd_ops->trigger_type) &&
1527 0 : (test_enable_data->file == enable_data->file)) {
1528 0 : ret = -EEXIST;
1529 0 : goto out;
1530 : }
1531 : }
1532 :
1533 0 : if (data->ops->init) {
1534 0 : ret = data->ops->init(data->ops, data);
1535 0 : if (ret < 0)
1536 0 : goto out;
1537 : }
1538 :
1539 0 : list_add_rcu(&data->list, &file->triggers);
1540 0 : ret++;
1541 :
1542 0 : update_cond_flag(file);
1543 0 : if (trace_event_trigger_enable_disable(file, 1) < 0) {
1544 0 : list_del_rcu(&data->list);
1545 0 : update_cond_flag(file);
1546 0 : ret--;
1547 : }
1548 0 : out:
1549 0 : return ret;
1550 : }
1551 :
1552 0 : void event_enable_unregister_trigger(char *glob,
1553 : struct event_trigger_ops *ops,
1554 : struct event_trigger_data *test,
1555 : struct trace_event_file *file)
1556 : {
1557 0 : struct enable_trigger_data *test_enable_data = test->private_data;
1558 0 : struct enable_trigger_data *enable_data;
1559 0 : struct event_trigger_data *data;
1560 0 : bool unregistered = false;
1561 :
1562 0 : lockdep_assert_held(&event_mutex);
1563 :
1564 0 : list_for_each_entry(data, &file->triggers, list) {
1565 0 : enable_data = data->private_data;
1566 0 : if (enable_data &&
1567 0 : (data->cmd_ops->trigger_type ==
1568 0 : test->cmd_ops->trigger_type) &&
1569 0 : (enable_data->file == test_enable_data->file)) {
1570 0 : unregistered = true;
1571 0 : list_del_rcu(&data->list);
1572 0 : trace_event_trigger_enable_disable(file, 0);
1573 0 : update_cond_flag(file);
1574 0 : break;
1575 : }
1576 : }
1577 :
1578 0 : if (unregistered && data->ops->free)
1579 0 : data->ops->free(data->ops, data);
1580 0 : }
1581 :
1582 : static struct event_trigger_ops *
1583 0 : event_enable_get_trigger_ops(char *cmd, char *param)
1584 : {
1585 0 : struct event_trigger_ops *ops;
1586 0 : bool enable;
1587 :
1588 : #ifdef CONFIG_HIST_TRIGGERS
1589 : enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1590 : (strcmp(cmd, ENABLE_HIST_STR) == 0));
1591 : #else
1592 0 : enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1593 : #endif
1594 0 : if (enable)
1595 0 : ops = param ? &event_enable_count_trigger_ops :
1596 : &event_enable_trigger_ops;
1597 : else
1598 0 : ops = param ? &event_disable_count_trigger_ops :
1599 : &event_disable_trigger_ops;
1600 :
1601 0 : return ops;
1602 : }
1603 :
1604 : static struct event_command trigger_enable_cmd = {
1605 : .name = ENABLE_EVENT_STR,
1606 : .trigger_type = ETT_EVENT_ENABLE,
1607 : .func = event_enable_trigger_func,
1608 : .reg = event_enable_register_trigger,
1609 : .unreg = event_enable_unregister_trigger,
1610 : .get_trigger_ops = event_enable_get_trigger_ops,
1611 : .set_filter = set_trigger_filter,
1612 : };
1613 :
1614 : static struct event_command trigger_disable_cmd = {
1615 : .name = DISABLE_EVENT_STR,
1616 : .trigger_type = ETT_EVENT_ENABLE,
1617 : .func = event_enable_trigger_func,
1618 : .reg = event_enable_register_trigger,
1619 : .unreg = event_enable_unregister_trigger,
1620 : .get_trigger_ops = event_enable_get_trigger_ops,
1621 : .set_filter = set_trigger_filter,
1622 : };
1623 :
1624 0 : static __init void unregister_trigger_enable_disable_cmds(void)
1625 : {
1626 0 : unregister_event_command(&trigger_enable_cmd);
1627 0 : unregister_event_command(&trigger_disable_cmd);
1628 0 : }
1629 :
1630 1 : static __init int register_trigger_enable_disable_cmds(void)
1631 : {
1632 1 : int ret;
1633 :
1634 1 : ret = register_event_command(&trigger_enable_cmd);
1635 1 : if (WARN_ON(ret < 0))
1636 : return ret;
1637 1 : ret = register_event_command(&trigger_disable_cmd);
1638 1 : if (WARN_ON(ret < 0))
1639 0 : unregister_trigger_enable_disable_cmds();
1640 :
1641 : return ret;
1642 : }
1643 :
1644 1 : static __init int register_trigger_traceon_traceoff_cmds(void)
1645 : {
1646 1 : int ret;
1647 :
1648 1 : ret = register_event_command(&trigger_traceon_cmd);
1649 1 : if (WARN_ON(ret < 0))
1650 : return ret;
1651 1 : ret = register_event_command(&trigger_traceoff_cmd);
1652 1 : if (WARN_ON(ret < 0))
1653 0 : unregister_trigger_traceon_traceoff_cmds();
1654 :
1655 : return ret;
1656 : }
1657 :
1658 1 : __init int register_trigger_cmds(void)
1659 : {
1660 1 : register_trigger_traceon_traceoff_cmds();
1661 1 : register_trigger_snapshot_cmd();
1662 1 : register_trigger_stacktrace_cmd();
1663 1 : register_trigger_enable_disable_cmds();
1664 1 : register_trigger_hist_enable_disable_cmds();
1665 1 : register_trigger_hist_cmd();
1666 :
1667 1 : return 0;
1668 : }
|