Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Copyright (C) 2007 Alan Stern
4 : * Copyright (C) IBM Corporation, 2009
5 : * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
6 : *
7 : * Thanks to Ingo Molnar for his many suggestions.
8 : *
9 : * Authors: Alan Stern <stern@rowland.harvard.edu>
10 : * K.Prasad <prasad@linux.vnet.ibm.com>
11 : * Frederic Weisbecker <fweisbec@gmail.com>
12 : */
13 :
14 : /*
15 : * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
16 : * using the CPU's debug registers.
17 : * This file contains the arch-independent routines.
18 : */
19 :
20 : #include <linux/irqflags.h>
21 : #include <linux/kallsyms.h>
22 : #include <linux/notifier.h>
23 : #include <linux/kprobes.h>
24 : #include <linux/kdebug.h>
25 : #include <linux/kernel.h>
26 : #include <linux/module.h>
27 : #include <linux/percpu.h>
28 : #include <linux/sched.h>
29 : #include <linux/init.h>
30 : #include <linux/slab.h>
31 : #include <linux/list.h>
32 : #include <linux/cpu.h>
33 : #include <linux/smp.h>
34 : #include <linux/bug.h>
35 :
36 : #include <linux/hw_breakpoint.h>
37 : /*
38 : * Constraints data
39 : */
40 : struct bp_cpuinfo {
41 : /* Number of pinned cpu breakpoints in a cpu */
42 : unsigned int cpu_pinned;
43 : /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
44 : unsigned int *tsk_pinned;
45 : /* Number of non-pinned cpu/task breakpoints in a cpu */
46 : unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
47 : };
48 :
49 : static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
50 : static int nr_slots[TYPE_MAX];
51 :
52 4 : static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
53 : {
54 4 : return per_cpu_ptr(bp_cpuinfo + type, cpu);
55 : }
56 :
57 : /* Keep track of the breakpoints attached to tasks */
58 : static LIST_HEAD(bp_task_head);
59 :
60 : static int constraints_initialized;
61 :
62 : /* Gather the number of total pinned and un-pinned bp in a cpuset */
63 : struct bp_busy_slots {
64 : unsigned int pinned;
65 : unsigned int flexible;
66 : };
67 :
68 : /* Serialize accesses to the above constraints */
69 : static DEFINE_MUTEX(nr_bp_mutex);
70 :
71 0 : __weak int hw_breakpoint_weight(struct perf_event *bp)
72 : {
73 0 : return 1;
74 : }
75 :
76 0 : static inline enum bp_type_idx find_slot_idx(u64 bp_type)
77 : {
78 0 : if (bp_type & HW_BREAKPOINT_RW)
79 : return TYPE_DATA;
80 :
81 : return TYPE_INST;
82 : }
83 :
84 : /*
85 : * Report the maximum number of pinned breakpoints a task
86 : * have in this cpu
87 : */
88 0 : static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
89 : {
90 0 : unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
91 0 : int i;
92 :
93 0 : for (i = nr_slots[type] - 1; i >= 0; i--) {
94 0 : if (tsk_pinned[i] > 0)
95 0 : return i + 1;
96 : }
97 :
98 : return 0;
99 : }
100 :
101 : /*
102 : * Count the number of breakpoints of the same type and same task.
103 : * The given event must be not on the list.
104 : */
105 0 : static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
106 : {
107 0 : struct task_struct *tsk = bp->hw.target;
108 0 : struct perf_event *iter;
109 0 : int count = 0;
110 :
111 0 : list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
112 0 : if (iter->hw.target == tsk &&
113 0 : find_slot_idx(iter->attr.bp_type) == type &&
114 0 : (iter->cpu < 0 || cpu == iter->cpu))
115 0 : count += hw_breakpoint_weight(iter);
116 : }
117 :
118 0 : return count;
119 : }
120 :
121 0 : static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
122 : {
123 0 : if (bp->cpu >= 0)
124 0 : return cpumask_of(bp->cpu);
125 : return cpu_possible_mask;
126 : }
127 :
128 : /*
129 : * Report the number of pinned/un-pinned breakpoints we have in
130 : * a given cpu (cpu > -1) or in all of them (cpu = -1).
131 : */
132 : static void
133 0 : fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134 : enum bp_type_idx type)
135 : {
136 0 : const struct cpumask *cpumask = cpumask_of_bp(bp);
137 0 : int cpu;
138 :
139 0 : for_each_cpu(cpu, cpumask) {
140 0 : struct bp_cpuinfo *info = get_bp_info(cpu, type);
141 0 : int nr;
142 :
143 0 : nr = info->cpu_pinned;
144 0 : if (!bp->hw.target)
145 0 : nr += max_task_bp_pinned(cpu, type);
146 : else
147 0 : nr += task_bp_pinned(cpu, bp, type);
148 :
149 0 : if (nr > slots->pinned)
150 0 : slots->pinned = nr;
151 :
152 0 : nr = info->flexible;
153 0 : if (nr > slots->flexible)
154 0 : slots->flexible = nr;
155 : }
156 0 : }
157 :
158 : /*
159 : * For now, continue to consider flexible as pinned, until we can
160 : * ensure no flexible event can ever be scheduled before a pinned event
161 : * in a same cpu.
162 : */
163 : static void
164 0 : fetch_this_slot(struct bp_busy_slots *slots, int weight)
165 : {
166 0 : slots->pinned += weight;
167 : }
168 :
169 : /*
170 : * Add a pinned breakpoint for the given task in our constraint table
171 : */
172 0 : static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
173 : enum bp_type_idx type, int weight)
174 : {
175 0 : unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
176 0 : int old_idx, new_idx;
177 :
178 0 : old_idx = task_bp_pinned(cpu, bp, type) - 1;
179 0 : new_idx = old_idx + weight;
180 :
181 0 : if (old_idx >= 0)
182 0 : tsk_pinned[old_idx]--;
183 0 : if (new_idx >= 0)
184 0 : tsk_pinned[new_idx]++;
185 0 : }
186 :
187 : /*
188 : * Add/remove the given breakpoint in our constraint table
189 : */
190 : static void
191 0 : toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
192 : int weight)
193 : {
194 0 : const struct cpumask *cpumask = cpumask_of_bp(bp);
195 0 : int cpu;
196 :
197 0 : if (!enable)
198 0 : weight = -weight;
199 :
200 : /* Pinned counter cpu profiling */
201 0 : if (!bp->hw.target) {
202 0 : get_bp_info(bp->cpu, type)->cpu_pinned += weight;
203 0 : return;
204 : }
205 :
206 : /* Pinned counter task profiling */
207 0 : for_each_cpu(cpu, cpumask)
208 0 : toggle_bp_task_slot(bp, cpu, type, weight);
209 :
210 0 : if (enable)
211 0 : list_add_tail(&bp->hw.bp_list, &bp_task_head);
212 : else
213 0 : list_del(&bp->hw.bp_list);
214 : }
215 :
216 0 : __weak int arch_reserve_bp_slot(struct perf_event *bp)
217 : {
218 0 : return 0;
219 : }
220 :
221 0 : __weak void arch_release_bp_slot(struct perf_event *bp)
222 : {
223 0 : }
224 :
225 : /*
226 : * Function to perform processor-specific cleanup during unregistration
227 : */
228 0 : __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
229 : {
230 : /*
231 : * A weak stub function here for those archs that don't define
232 : * it inside arch/.../kernel/hw_breakpoint.c
233 : */
234 0 : }
235 :
236 : /*
237 : * Constraints to check before allowing this new breakpoint counter:
238 : *
239 : * == Non-pinned counter == (Considered as pinned for now)
240 : *
241 : * - If attached to a single cpu, check:
242 : *
243 : * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
244 : * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
245 : *
246 : * -> If there are already non-pinned counters in this cpu, it means
247 : * there is already a free slot for them.
248 : * Otherwise, we check that the maximum number of per task
249 : * breakpoints (for this cpu) plus the number of per cpu breakpoint
250 : * (for this cpu) doesn't cover every registers.
251 : *
252 : * - If attached to every cpus, check:
253 : *
254 : * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
255 : * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
256 : *
257 : * -> This is roughly the same, except we check the number of per cpu
258 : * bp for every cpu and we keep the max one. Same for the per tasks
259 : * breakpoints.
260 : *
261 : *
262 : * == Pinned counter ==
263 : *
264 : * - If attached to a single cpu, check:
265 : *
266 : * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
267 : * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
268 : *
269 : * -> Same checks as before. But now the info->flexible, if any, must keep
270 : * one register at least (or they will never be fed).
271 : *
272 : * - If attached to every cpus, check:
273 : *
274 : * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
275 : * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
276 : */
277 0 : static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
278 : {
279 0 : struct bp_busy_slots slots = {0};
280 0 : enum bp_type_idx type;
281 0 : int weight;
282 0 : int ret;
283 :
284 : /* We couldn't initialize breakpoint constraints on boot */
285 0 : if (!constraints_initialized)
286 : return -ENOMEM;
287 :
288 : /* Basic checks */
289 0 : if (bp_type == HW_BREAKPOINT_EMPTY ||
290 0 : bp_type == HW_BREAKPOINT_INVALID)
291 : return -EINVAL;
292 :
293 0 : type = find_slot_idx(bp_type);
294 0 : weight = hw_breakpoint_weight(bp);
295 :
296 0 : fetch_bp_busy_slots(&slots, bp, type);
297 : /*
298 : * Simulate the addition of this breakpoint to the constraints
299 : * and see the result.
300 : */
301 0 : fetch_this_slot(&slots, weight);
302 :
303 : /* Flexible counters need to keep at least one slot */
304 0 : if (slots.pinned + (!!slots.flexible) > nr_slots[type])
305 : return -ENOSPC;
306 :
307 0 : ret = arch_reserve_bp_slot(bp);
308 0 : if (ret)
309 : return ret;
310 :
311 0 : toggle_bp_slot(bp, true, type, weight);
312 :
313 0 : return 0;
314 : }
315 :
316 0 : int reserve_bp_slot(struct perf_event *bp)
317 : {
318 0 : int ret;
319 :
320 0 : mutex_lock(&nr_bp_mutex);
321 :
322 0 : ret = __reserve_bp_slot(bp, bp->attr.bp_type);
323 :
324 0 : mutex_unlock(&nr_bp_mutex);
325 :
326 0 : return ret;
327 : }
328 :
329 0 : static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
330 : {
331 0 : enum bp_type_idx type;
332 0 : int weight;
333 :
334 0 : arch_release_bp_slot(bp);
335 :
336 0 : type = find_slot_idx(bp_type);
337 0 : weight = hw_breakpoint_weight(bp);
338 0 : toggle_bp_slot(bp, false, type, weight);
339 0 : }
340 :
341 0 : void release_bp_slot(struct perf_event *bp)
342 : {
343 0 : mutex_lock(&nr_bp_mutex);
344 :
345 0 : arch_unregister_hw_breakpoint(bp);
346 0 : __release_bp_slot(bp, bp->attr.bp_type);
347 :
348 0 : mutex_unlock(&nr_bp_mutex);
349 0 : }
350 :
351 0 : static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
352 : {
353 0 : int err;
354 :
355 0 : __release_bp_slot(bp, old_type);
356 :
357 0 : err = __reserve_bp_slot(bp, new_type);
358 0 : if (err) {
359 : /*
360 : * Reserve the old_type slot back in case
361 : * there's no space for the new type.
362 : *
363 : * This must succeed, because we just released
364 : * the old_type slot in the __release_bp_slot
365 : * call above. If not, something is broken.
366 : */
367 0 : WARN_ON(__reserve_bp_slot(bp, old_type));
368 : }
369 :
370 0 : return err;
371 : }
372 :
373 0 : static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
374 : {
375 0 : int ret;
376 :
377 0 : mutex_lock(&nr_bp_mutex);
378 0 : ret = __modify_bp_slot(bp, old_type, new_type);
379 0 : mutex_unlock(&nr_bp_mutex);
380 0 : return ret;
381 : }
382 :
383 : /*
384 : * Allow the kernel debugger to reserve breakpoint slots without
385 : * taking a lock using the dbg_* variant of for the reserve and
386 : * release breakpoint slots.
387 : */
388 0 : int dbg_reserve_bp_slot(struct perf_event *bp)
389 : {
390 0 : if (mutex_is_locked(&nr_bp_mutex))
391 : return -1;
392 :
393 0 : return __reserve_bp_slot(bp, bp->attr.bp_type);
394 : }
395 :
396 0 : int dbg_release_bp_slot(struct perf_event *bp)
397 : {
398 0 : if (mutex_is_locked(&nr_bp_mutex))
399 : return -1;
400 :
401 0 : __release_bp_slot(bp, bp->attr.bp_type);
402 :
403 0 : return 0;
404 : }
405 :
406 0 : static int hw_breakpoint_parse(struct perf_event *bp,
407 : const struct perf_event_attr *attr,
408 : struct arch_hw_breakpoint *hw)
409 : {
410 0 : int err;
411 :
412 0 : err = hw_breakpoint_arch_parse(bp, attr, hw);
413 0 : if (err)
414 : return err;
415 :
416 0 : if (arch_check_bp_in_kernelspace(hw)) {
417 0 : if (attr->exclude_kernel)
418 : return -EINVAL;
419 : /*
420 : * Don't let unprivileged users set a breakpoint in the trap
421 : * path to avoid trap recursion attacks.
422 : */
423 0 : if (!capable(CAP_SYS_ADMIN))
424 0 : return -EPERM;
425 : }
426 :
427 : return 0;
428 : }
429 :
430 0 : int register_perf_hw_breakpoint(struct perf_event *bp)
431 : {
432 0 : struct arch_hw_breakpoint hw = { };
433 0 : int err;
434 :
435 0 : err = reserve_bp_slot(bp);
436 0 : if (err)
437 : return err;
438 :
439 0 : err = hw_breakpoint_parse(bp, &bp->attr, &hw);
440 0 : if (err) {
441 0 : release_bp_slot(bp);
442 0 : return err;
443 : }
444 :
445 0 : bp->hw.info = hw;
446 :
447 0 : return 0;
448 : }
449 :
450 : /**
451 : * register_user_hw_breakpoint - register a hardware breakpoint for user space
452 : * @attr: breakpoint attributes
453 : * @triggered: callback to trigger when we hit the breakpoint
454 : * @tsk: pointer to 'task_struct' of the process to which the address belongs
455 : */
456 : struct perf_event *
457 0 : register_user_hw_breakpoint(struct perf_event_attr *attr,
458 : perf_overflow_handler_t triggered,
459 : void *context,
460 : struct task_struct *tsk)
461 : {
462 0 : return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
463 : context);
464 : }
465 : EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
466 :
467 0 : static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
468 : struct perf_event_attr *from)
469 : {
470 0 : to->bp_addr = from->bp_addr;
471 0 : to->bp_type = from->bp_type;
472 0 : to->bp_len = from->bp_len;
473 0 : to->disabled = from->disabled;
474 : }
475 :
476 : int
477 0 : modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
478 : bool check)
479 : {
480 0 : struct arch_hw_breakpoint hw = { };
481 0 : int err;
482 :
483 0 : err = hw_breakpoint_parse(bp, attr, &hw);
484 0 : if (err)
485 : return err;
486 :
487 0 : if (check) {
488 0 : struct perf_event_attr old_attr;
489 :
490 0 : old_attr = bp->attr;
491 0 : hw_breakpoint_copy_attr(&old_attr, attr);
492 0 : if (memcmp(&old_attr, attr, sizeof(*attr)))
493 0 : return -EINVAL;
494 : }
495 :
496 0 : if (bp->attr.bp_type != attr->bp_type) {
497 0 : err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
498 0 : if (err)
499 : return err;
500 : }
501 :
502 0 : hw_breakpoint_copy_attr(&bp->attr, attr);
503 0 : bp->hw.info = hw;
504 :
505 0 : return 0;
506 : }
507 :
508 : /**
509 : * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
510 : * @bp: the breakpoint structure to modify
511 : * @attr: new breakpoint attributes
512 : */
513 0 : int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
514 : {
515 0 : int err;
516 :
517 : /*
518 : * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
519 : * will not be possible to raise IPIs that invoke __perf_event_disable.
520 : * So call the function directly after making sure we are targeting the
521 : * current task.
522 : */
523 0 : if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
524 0 : perf_event_disable_local(bp);
525 : else
526 0 : perf_event_disable(bp);
527 :
528 0 : err = modify_user_hw_breakpoint_check(bp, attr, false);
529 :
530 0 : if (!bp->attr.disabled)
531 0 : perf_event_enable(bp);
532 :
533 0 : return err;
534 : }
535 : EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
536 :
537 : /**
538 : * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
539 : * @bp: the breakpoint structure to unregister
540 : */
541 8148 : void unregister_hw_breakpoint(struct perf_event *bp)
542 : {
543 8148 : if (!bp)
544 : return;
545 0 : perf_event_release_kernel(bp);
546 : }
547 : EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
548 :
549 : /**
550 : * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
551 : * @attr: breakpoint attributes
552 : * @triggered: callback to trigger when we hit the breakpoint
553 : *
554 : * @return a set of per_cpu pointers to perf events
555 : */
556 : struct perf_event * __percpu *
557 0 : register_wide_hw_breakpoint(struct perf_event_attr *attr,
558 : perf_overflow_handler_t triggered,
559 : void *context)
560 : {
561 0 : struct perf_event * __percpu *cpu_events, *bp;
562 0 : long err = 0;
563 0 : int cpu;
564 :
565 0 : cpu_events = alloc_percpu(typeof(*cpu_events));
566 0 : if (!cpu_events)
567 0 : return (void __percpu __force *)ERR_PTR(-ENOMEM);
568 :
569 0 : get_online_cpus();
570 0 : for_each_online_cpu(cpu) {
571 0 : bp = perf_event_create_kernel_counter(attr, cpu, NULL,
572 : triggered, context);
573 0 : if (IS_ERR(bp)) {
574 0 : err = PTR_ERR(bp);
575 0 : break;
576 : }
577 :
578 0 : per_cpu(*cpu_events, cpu) = bp;
579 : }
580 0 : put_online_cpus();
581 :
582 0 : if (likely(!err))
583 : return cpu_events;
584 :
585 0 : unregister_wide_hw_breakpoint(cpu_events);
586 0 : return (void __percpu __force *)ERR_PTR(err);
587 : }
588 : EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
589 :
590 : /**
591 : * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
592 : * @cpu_events: the per cpu set of events to unregister
593 : */
594 0 : void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
595 : {
596 0 : int cpu;
597 :
598 0 : for_each_possible_cpu(cpu)
599 0 : unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
600 :
601 0 : free_percpu(cpu_events);
602 0 : }
603 : EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
604 :
605 : static struct notifier_block hw_breakpoint_exceptions_nb = {
606 : .notifier_call = hw_breakpoint_exceptions_notify,
607 : /* we need to be notified first */
608 : .priority = 0x7fffffff
609 : };
610 :
611 0 : static void bp_perf_event_destroy(struct perf_event *event)
612 : {
613 0 : release_bp_slot(event);
614 0 : }
615 :
616 0 : static int hw_breakpoint_event_init(struct perf_event *bp)
617 : {
618 0 : int err;
619 :
620 0 : if (bp->attr.type != PERF_TYPE_BREAKPOINT)
621 : return -ENOENT;
622 :
623 : /*
624 : * no branch sampling for breakpoint events
625 : */
626 0 : if (has_branch_stack(bp))
627 : return -EOPNOTSUPP;
628 :
629 0 : err = register_perf_hw_breakpoint(bp);
630 0 : if (err)
631 : return err;
632 :
633 0 : bp->destroy = bp_perf_event_destroy;
634 :
635 0 : return 0;
636 : }
637 :
638 0 : static int hw_breakpoint_add(struct perf_event *bp, int flags)
639 : {
640 0 : if (!(flags & PERF_EF_START))
641 0 : bp->hw.state = PERF_HES_STOPPED;
642 :
643 0 : if (is_sampling_event(bp)) {
644 0 : bp->hw.last_period = bp->hw.sample_period;
645 0 : perf_swevent_set_period(bp);
646 : }
647 :
648 0 : return arch_install_hw_breakpoint(bp);
649 : }
650 :
651 0 : static void hw_breakpoint_del(struct perf_event *bp, int flags)
652 : {
653 0 : arch_uninstall_hw_breakpoint(bp);
654 0 : }
655 :
656 0 : static void hw_breakpoint_start(struct perf_event *bp, int flags)
657 : {
658 0 : bp->hw.state = 0;
659 0 : }
660 :
661 0 : static void hw_breakpoint_stop(struct perf_event *bp, int flags)
662 : {
663 0 : bp->hw.state = PERF_HES_STOPPED;
664 0 : }
665 :
666 : static struct pmu perf_breakpoint = {
667 : .task_ctx_nr = perf_sw_context, /* could eventually get its own */
668 :
669 : .event_init = hw_breakpoint_event_init,
670 : .add = hw_breakpoint_add,
671 : .del = hw_breakpoint_del,
672 : .start = hw_breakpoint_start,
673 : .stop = hw_breakpoint_stop,
674 : .read = hw_breakpoint_pmu_read,
675 : };
676 :
677 1 : int __init init_hw_breakpoint(void)
678 : {
679 1 : int cpu, err_cpu;
680 1 : int i;
681 :
682 2 : for (i = 0; i < TYPE_MAX; i++)
683 1 : nr_slots[i] = hw_breakpoint_slots(i);
684 :
685 5 : for_each_possible_cpu(cpu) {
686 8 : for (i = 0; i < TYPE_MAX; i++) {
687 4 : struct bp_cpuinfo *info = get_bp_info(cpu, i);
688 :
689 4 : info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
690 : GFP_KERNEL);
691 4 : if (!info->tsk_pinned)
692 0 : goto err_alloc;
693 : }
694 : }
695 :
696 1 : constraints_initialized = 1;
697 :
698 1 : perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
699 :
700 1 : return register_die_notifier(&hw_breakpoint_exceptions_nb);
701 :
702 0 : err_alloc:
703 0 : for_each_possible_cpu(err_cpu) {
704 0 : for (i = 0; i < TYPE_MAX; i++)
705 0 : kfree(get_bp_info(err_cpu, i)->tsk_pinned);
706 0 : if (err_cpu == cpu)
707 : break;
708 : }
709 :
710 : return -ENOMEM;
711 : }
712 :
713 :
|