Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * kernel/lockdep.c
4 : *
5 : * Runtime locking correctness validator
6 : *
7 : * Started by Ingo Molnar:
8 : *
9 : * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 : * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 : *
12 : * this code maps all the lock dependencies as they occur in a live kernel
13 : * and will warn about the following classes of locking bugs:
14 : *
15 : * - lock inversion scenarios
16 : * - circular lock dependencies
17 : * - hardirq/softirq safe/unsafe locking bugs
18 : *
19 : * Bugs are reported even if the current locking scenario does not cause
20 : * any deadlock at this point.
21 : *
22 : * I.e. if anytime in the past two locks were taken in a different order,
23 : * even if it happened for another task, even if those were different
24 : * locks (but of the same class as this lock), this code will detect it.
25 : *
26 : * Thanks to Arjan van de Ven for coming up with the initial idea of
27 : * mapping lock dependencies runtime.
28 : */
29 : #define DISABLE_BRANCH_PROFILING
30 : #include <linux/mutex.h>
31 : #include <linux/sched.h>
32 : #include <linux/sched/clock.h>
33 : #include <linux/sched/task.h>
34 : #include <linux/sched/mm.h>
35 : #include <linux/delay.h>
36 : #include <linux/module.h>
37 : #include <linux/proc_fs.h>
38 : #include <linux/seq_file.h>
39 : #include <linux/spinlock.h>
40 : #include <linux/kallsyms.h>
41 : #include <linux/interrupt.h>
42 : #include <linux/stacktrace.h>
43 : #include <linux/debug_locks.h>
44 : #include <linux/irqflags.h>
45 : #include <linux/utsname.h>
46 : #include <linux/hash.h>
47 : #include <linux/ftrace.h>
48 : #include <linux/stringify.h>
49 : #include <linux/bitmap.h>
50 : #include <linux/bitops.h>
51 : #include <linux/gfp.h>
52 : #include <linux/random.h>
53 : #include <linux/jhash.h>
54 : #include <linux/nmi.h>
55 : #include <linux/rcupdate.h>
56 : #include <linux/kprobes.h>
57 :
58 : #include <asm/sections.h>
59 :
60 : #include "lockdep_internals.h"
61 :
62 : #define CREATE_TRACE_POINTS
63 : #include <trace/events/lock.h>
64 :
65 : #ifdef CONFIG_PROVE_LOCKING
66 : int prove_locking = 1;
67 : module_param(prove_locking, int, 0644);
68 : #else
69 : #define prove_locking 0
70 : #endif
71 :
72 : #ifdef CONFIG_LOCK_STAT
73 : int lock_stat = 1;
74 : module_param(lock_stat, int, 0644);
75 : #else
76 : #define lock_stat 0
77 : #endif
78 :
79 : DEFINE_PER_CPU(unsigned int, lockdep_recursion);
80 : EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
81 :
82 73807467 : static __always_inline bool lockdep_enabled(void)
83 : {
84 1 : if (!debug_locks)
85 : return false;
86 :
87 73806535 : if (this_cpu_read(lockdep_recursion))
88 : return false;
89 :
90 73810140 : if (current->lockdep_recursion)
91 2 : return false;
92 :
93 : return true;
94 : }
95 :
96 : /*
97 : * lockdep_lock: protects the lockdep graph, the hashes and the
98 : * class/list/hash allocators.
99 : *
100 : * This is one of the rare exceptions where it's justified
101 : * to use a raw spinlock - we really dont want the spinlock
102 : * code to recurse back into the lockdep code...
103 : */
104 : static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
105 : static struct task_struct *__owner;
106 :
107 10312 : static inline void lockdep_lock(void)
108 : {
109 10312 : DEBUG_LOCKS_WARN_ON(!irqs_disabled());
110 :
111 10312 : __this_cpu_inc(lockdep_recursion);
112 10312 : arch_spin_lock(&__lock);
113 10314 : __owner = current;
114 10314 : }
115 :
116 10314 : static inline void lockdep_unlock(void)
117 : {
118 10314 : DEBUG_LOCKS_WARN_ON(!irqs_disabled());
119 :
120 10314 : if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
121 0 : return;
122 :
123 10314 : __owner = NULL;
124 10314 : arch_spin_unlock(&__lock);
125 10314 : __this_cpu_dec(lockdep_recursion);
126 : }
127 :
128 23767 : static inline bool lockdep_assert_locked(void)
129 : {
130 23767 : return DEBUG_LOCKS_WARN_ON(__owner != current);
131 : }
132 :
133 : static struct task_struct *lockdep_selftest_task_struct;
134 :
135 :
136 10311 : static int graph_lock(void)
137 : {
138 10311 : lockdep_lock();
139 : /*
140 : * Make sure that if another CPU detected a bug while
141 : * walking the graph we dont change it (while the other
142 : * CPU is busy printing out stuff with the graph lock
143 : * dropped already)
144 : */
145 10313 : if (!debug_locks) {
146 0 : lockdep_unlock();
147 0 : return 0;
148 : }
149 : return 1;
150 : }
151 :
152 10313 : static inline void graph_unlock(void)
153 : {
154 10313 : lockdep_unlock();
155 8284 : }
156 :
157 : /*
158 : * Turn lock debugging off and return with 0 if it was off already,
159 : * and also release the graph lock:
160 : */
161 0 : static inline int debug_locks_off_graph_unlock(void)
162 : {
163 0 : int ret = debug_locks_off();
164 :
165 0 : lockdep_unlock();
166 :
167 0 : return ret;
168 : }
169 :
170 : unsigned long nr_list_entries;
171 : static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
172 : static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
173 :
174 : /*
175 : * All data structures here are protected by the global debug_lock.
176 : *
177 : * nr_lock_classes is the number of elements of lock_classes[] that is
178 : * in use.
179 : */
180 : #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
181 : #define KEYHASH_SIZE (1UL << KEYHASH_BITS)
182 : static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
183 : unsigned long nr_lock_classes;
184 : unsigned long nr_zapped_classes;
185 : #ifndef CONFIG_DEBUG_LOCKDEP
186 : static
187 : #endif
188 : struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
189 : static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
190 :
191 98479032 : static inline struct lock_class *hlock_class(struct held_lock *hlock)
192 : {
193 98479032 : unsigned int class_idx = hlock->class_idx;
194 :
195 : /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
196 98479032 : barrier();
197 :
198 98725741 : if (!test_bit(class_idx, lock_classes_in_use)) {
199 : /*
200 : * Someone passed in garbage, we give up.
201 : */
202 0 : DEBUG_LOCKS_WARN_ON(1);
203 0 : return NULL;
204 : }
205 :
206 : /*
207 : * At this point, if the passed hlock->class_idx is still garbage,
208 : * we just have to live with it
209 : */
210 99206021 : return lock_classes + class_idx;
211 : }
212 :
213 : #ifdef CONFIG_LOCK_STAT
214 : static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
215 :
216 : static inline u64 lockstat_clock(void)
217 : {
218 : return local_clock();
219 : }
220 :
221 : static int lock_point(unsigned long points[], unsigned long ip)
222 : {
223 : int i;
224 :
225 : for (i = 0; i < LOCKSTAT_POINTS; i++) {
226 : if (points[i] == 0) {
227 : points[i] = ip;
228 : break;
229 : }
230 : if (points[i] == ip)
231 : break;
232 : }
233 :
234 : return i;
235 : }
236 :
237 : static void lock_time_inc(struct lock_time *lt, u64 time)
238 : {
239 : if (time > lt->max)
240 : lt->max = time;
241 :
242 : if (time < lt->min || !lt->nr)
243 : lt->min = time;
244 :
245 : lt->total += time;
246 : lt->nr++;
247 : }
248 :
249 : static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
250 : {
251 : if (!src->nr)
252 : return;
253 :
254 : if (src->max > dst->max)
255 : dst->max = src->max;
256 :
257 : if (src->min < dst->min || !dst->nr)
258 : dst->min = src->min;
259 :
260 : dst->total += src->total;
261 : dst->nr += src->nr;
262 : }
263 :
264 : struct lock_class_stats lock_stats(struct lock_class *class)
265 : {
266 : struct lock_class_stats stats;
267 : int cpu, i;
268 :
269 : memset(&stats, 0, sizeof(struct lock_class_stats));
270 : for_each_possible_cpu(cpu) {
271 : struct lock_class_stats *pcs =
272 : &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
273 :
274 : for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
275 : stats.contention_point[i] += pcs->contention_point[i];
276 :
277 : for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
278 : stats.contending_point[i] += pcs->contending_point[i];
279 :
280 : lock_time_add(&pcs->read_waittime, &stats.read_waittime);
281 : lock_time_add(&pcs->write_waittime, &stats.write_waittime);
282 :
283 : lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
284 : lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
285 :
286 : for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
287 : stats.bounces[i] += pcs->bounces[i];
288 : }
289 :
290 : return stats;
291 : }
292 :
293 : void clear_lock_stats(struct lock_class *class)
294 : {
295 : int cpu;
296 :
297 : for_each_possible_cpu(cpu) {
298 : struct lock_class_stats *cpu_stats =
299 : &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
300 :
301 : memset(cpu_stats, 0, sizeof(struct lock_class_stats));
302 : }
303 : memset(class->contention_point, 0, sizeof(class->contention_point));
304 : memset(class->contending_point, 0, sizeof(class->contending_point));
305 : }
306 :
307 : static struct lock_class_stats *get_lock_stats(struct lock_class *class)
308 : {
309 : return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
310 : }
311 :
312 : static void lock_release_holdtime(struct held_lock *hlock)
313 : {
314 : struct lock_class_stats *stats;
315 : u64 holdtime;
316 :
317 : if (!lock_stat)
318 : return;
319 :
320 : holdtime = lockstat_clock() - hlock->holdtime_stamp;
321 :
322 : stats = get_lock_stats(hlock_class(hlock));
323 : if (hlock->read)
324 : lock_time_inc(&stats->read_holdtime, holdtime);
325 : else
326 : lock_time_inc(&stats->write_holdtime, holdtime);
327 : }
328 : #else
329 : static inline void lock_release_holdtime(struct held_lock *hlock)
330 : {
331 : }
332 : #endif
333 :
334 : /*
335 : * We keep a global list of all lock classes. The list is only accessed with
336 : * the lockdep spinlock lock held. free_lock_classes is a list with free
337 : * elements. These elements are linked together by the lock_entry member in
338 : * struct lock_class.
339 : */
340 : LIST_HEAD(all_lock_classes);
341 : static LIST_HEAD(free_lock_classes);
342 :
343 : /**
344 : * struct pending_free - information about data structures about to be freed
345 : * @zapped: Head of a list with struct lock_class elements.
346 : * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
347 : * are about to be freed.
348 : */
349 : struct pending_free {
350 : struct list_head zapped;
351 : DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
352 : };
353 :
354 : /**
355 : * struct delayed_free - data structures used for delayed freeing
356 : *
357 : * A data structure for delayed freeing of data structures that may be
358 : * accessed by RCU readers at the time these were freed.
359 : *
360 : * @rcu_head: Used to schedule an RCU callback for freeing data structures.
361 : * @index: Index of @pf to which freed data structures are added.
362 : * @scheduled: Whether or not an RCU callback has been scheduled.
363 : * @pf: Array with information about data structures about to be freed.
364 : */
365 : static struct delayed_free {
366 : struct rcu_head rcu_head;
367 : int index;
368 : int scheduled;
369 : struct pending_free pf[2];
370 : } delayed_free;
371 :
372 : /*
373 : * The lockdep classes are in a hash-table as well, for fast lookup:
374 : */
375 : #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
376 : #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
377 : #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
378 : #define classhashentry(key) (classhash_table + __classhashfn((key)))
379 :
380 : static struct hlist_head classhash_table[CLASSHASH_SIZE];
381 :
382 : /*
383 : * We put the lock dependency chains into a hash-table as well, to cache
384 : * their existence:
385 : */
386 : #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
387 : #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
388 : #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
389 : #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
390 :
391 : static struct hlist_head chainhash_table[CHAINHASH_SIZE];
392 :
393 : /*
394 : * the id of held_lock
395 : */
396 58779452 : static inline u16 hlock_id(struct held_lock *hlock)
397 : {
398 58779452 : BUILD_BUG_ON(MAX_LOCKDEP_KEYS_BITS + 2 > 16);
399 :
400 58779452 : return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
401 : }
402 :
403 4606 : static inline unsigned int chain_hlock_class_idx(u16 hlock_id)
404 : {
405 4606 : return hlock_id & (MAX_LOCKDEP_KEYS - 1);
406 : }
407 :
408 : /*
409 : * The hash key of the lock dependency chains is a hash itself too:
410 : * it's a hash of all locks taken up to that lock, including that lock.
411 : * It's a 64-bit hash, because it's important for the keys to be
412 : * unique.
413 : */
414 48474739 : static inline u64 iterate_chain_key(u64 key, u32 idx)
415 : {
416 48474739 : u32 k0 = key, k1 = key >> 32;
417 :
418 48474739 : __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
419 :
420 48474739 : return k0 | (u64)k1 << 32;
421 : }
422 :
423 1029 : void lockdep_init_task(struct task_struct *task)
424 : {
425 1029 : task->lockdep_depth = 0; /* no locks held yet */
426 1029 : task->curr_chain_key = INITIAL_CHAIN_KEY;
427 1029 : task->lockdep_recursion = 0;
428 1029 : }
429 :
430 85614869 : static __always_inline void lockdep_recursion_inc(void)
431 : {
432 85614869 : __this_cpu_inc(lockdep_recursion);
433 : }
434 :
435 85496021 : static __always_inline void lockdep_recursion_finish(void)
436 : {
437 85496021 : if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
438 85496021 : __this_cpu_write(lockdep_recursion, 0);
439 : }
440 :
441 0 : void lockdep_set_selftest_task(struct task_struct *task)
442 : {
443 0 : lockdep_selftest_task_struct = task;
444 0 : }
445 :
446 : /*
447 : * Debugging switches:
448 : */
449 :
450 : #define VERBOSE 0
451 : #define VERY_VERBOSE 0
452 :
453 : #if VERBOSE
454 : # define HARDIRQ_VERBOSE 1
455 : # define SOFTIRQ_VERBOSE 1
456 : #else
457 : # define HARDIRQ_VERBOSE 0
458 : # define SOFTIRQ_VERBOSE 0
459 : #endif
460 :
461 : #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
462 : /*
463 : * Quick filtering for interesting events:
464 : */
465 : static int class_filter(struct lock_class *class)
466 : {
467 : #if 0
468 : /* Example */
469 : if (class->name_version == 1 &&
470 : !strcmp(class->name, "lockname"))
471 : return 1;
472 : if (class->name_version == 1 &&
473 : !strcmp(class->name, "&struct->lockfield"))
474 : return 1;
475 : #endif
476 : /* Filter everything else. 1 would be to allow everything else */
477 : return 0;
478 : }
479 : #endif
480 :
481 747 : static int verbose(struct lock_class *class)
482 : {
483 : #if VERBOSE
484 : return class_filter(class);
485 : #endif
486 747 : return 0;
487 : }
488 :
489 0 : static void print_lockdep_off(const char *bug_msg)
490 : {
491 0 : printk(KERN_DEBUG "%s\n", bug_msg);
492 0 : printk(KERN_DEBUG "turning off the locking correctness validator.\n");
493 : #ifdef CONFIG_LOCK_STAT
494 : printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
495 : #endif
496 0 : }
497 :
498 : unsigned long nr_stack_trace_entries;
499 :
500 : #ifdef CONFIG_PROVE_LOCKING
501 : /**
502 : * struct lock_trace - single stack backtrace
503 : * @hash_entry: Entry in a stack_trace_hash[] list.
504 : * @hash: jhash() of @entries.
505 : * @nr_entries: Number of entries in @entries.
506 : * @entries: Actual stack backtrace.
507 : */
508 : struct lock_trace {
509 : struct hlist_node hash_entry;
510 : u32 hash;
511 : u32 nr_entries;
512 : unsigned long entries[] __aligned(sizeof(unsigned long));
513 : };
514 : #define LOCK_TRACE_SIZE_IN_LONGS \
515 : (sizeof(struct lock_trace) / sizeof(unsigned long))
516 : /*
517 : * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
518 : */
519 : static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
520 : static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
521 :
522 1883 : static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
523 : {
524 1883 : return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
525 1499 : memcmp(t1->entries, t2->entries,
526 1499 : t1->nr_entries * sizeof(t1->entries[0])) == 0;
527 : }
528 :
529 5168 : static struct lock_trace *save_trace(void)
530 : {
531 5168 : struct lock_trace *trace, *t2;
532 5168 : struct hlist_head *hash_head;
533 5168 : u32 hash;
534 5168 : int max_entries;
535 :
536 5168 : BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
537 5168 : BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
538 :
539 5168 : trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
540 5168 : max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
541 : LOCK_TRACE_SIZE_IN_LONGS;
542 :
543 5168 : if (max_entries <= 0) {
544 0 : if (!debug_locks_off_graph_unlock())
545 : return NULL;
546 :
547 0 : print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
548 0 : dump_stack();
549 :
550 0 : return NULL;
551 : }
552 5168 : trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
553 :
554 5168 : hash = jhash(trace->entries, trace->nr_entries *
555 : sizeof(trace->entries[0]), 0);
556 5168 : trace->hash = hash;
557 5168 : hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
558 10720 : hlist_for_each_entry(t2, hash_head, hash_entry) {
559 1883 : if (traces_identical(trace, t2))
560 1499 : return t2;
561 : }
562 3669 : nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
563 3669 : hlist_add_head(&trace->hash_entry, hash_head);
564 :
565 3669 : return trace;
566 : }
567 :
568 : /* Return the number of stack traces in the stack_trace[] array. */
569 0 : u64 lockdep_stack_trace_count(void)
570 : {
571 0 : struct lock_trace *trace;
572 0 : u64 c = 0;
573 0 : int i;
574 :
575 0 : for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
576 0 : hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
577 0 : c++;
578 : }
579 : }
580 :
581 0 : return c;
582 : }
583 :
584 : /* Return the number of stack hash chains that have at least one stack trace. */
585 0 : u64 lockdep_stack_hash_count(void)
586 : {
587 0 : u64 c = 0;
588 0 : int i;
589 :
590 0 : for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
591 0 : if (!hlist_empty(&stack_trace_hash[i]))
592 0 : c++;
593 :
594 0 : return c;
595 : }
596 : #endif
597 :
598 : unsigned int nr_hardirq_chains;
599 : unsigned int nr_softirq_chains;
600 : unsigned int nr_process_chains;
601 : unsigned int max_lockdep_depth;
602 :
603 : #ifdef CONFIG_DEBUG_LOCKDEP
604 : /*
605 : * Various lockdep statistics:
606 : */
607 : DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
608 : #endif
609 :
610 : #ifdef CONFIG_PROVE_LOCKING
611 : /*
612 : * Locking printouts:
613 : */
614 :
615 : #define __USAGE(__STATE) \
616 : [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
617 : [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
618 : [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
619 : [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
620 :
621 : static const char *usage_str[] =
622 : {
623 : #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
624 : #include "lockdep_states.h"
625 : #undef LOCKDEP_STATE
626 : [LOCK_USED] = "INITIAL USE",
627 : [LOCK_USED_READ] = "INITIAL READ USE",
628 : /* abused as string storage for verify_lock_unused() */
629 : [LOCK_USAGE_STATES] = "IN-NMI",
630 : };
631 : #endif
632 :
633 0 : const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
634 : {
635 0 : return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
636 : }
637 :
638 1216 : static inline unsigned long lock_flag(enum lock_usage_bit bit)
639 : {
640 1216 : return 1UL << bit;
641 : }
642 :
643 0 : static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
644 : {
645 : /*
646 : * The usage character defaults to '.' (i.e., irqs disabled and not in
647 : * irq context), which is the safest usage category.
648 : */
649 0 : char c = '.';
650 :
651 : /*
652 : * The order of the following usage checks matters, which will
653 : * result in the outcome character as follows:
654 : *
655 : * - '+': irq is enabled and not in irq context
656 : * - '-': in irq context and irq is disabled
657 : * - '?': in irq context and irq is enabled
658 : */
659 0 : if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
660 0 : c = '+';
661 0 : if (class->usage_mask & lock_flag(bit))
662 0 : c = '?';
663 0 : } else if (class->usage_mask & lock_flag(bit))
664 0 : c = '-';
665 :
666 0 : return c;
667 : }
668 :
669 0 : void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
670 : {
671 0 : int i = 0;
672 :
673 : #define LOCKDEP_STATE(__STATE) \
674 : usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
675 : usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
676 : #include "lockdep_states.h"
677 : #undef LOCKDEP_STATE
678 :
679 0 : usage[i] = '\0';
680 0 : }
681 :
682 0 : static void __print_lock_name(struct lock_class *class)
683 : {
684 0 : char str[KSYM_NAME_LEN];
685 0 : const char *name;
686 :
687 0 : name = class->name;
688 0 : if (!name) {
689 0 : name = __get_key_name(class->key, str);
690 0 : printk(KERN_CONT "%s", name);
691 : } else {
692 0 : printk(KERN_CONT "%s", name);
693 0 : if (class->name_version > 1)
694 0 : printk(KERN_CONT "#%d", class->name_version);
695 0 : if (class->subclass)
696 0 : printk(KERN_CONT "/%d", class->subclass);
697 : }
698 0 : }
699 :
700 0 : static void print_lock_name(struct lock_class *class)
701 : {
702 0 : char usage[LOCK_USAGE_CHARS];
703 :
704 0 : get_usage_chars(class, usage);
705 :
706 0 : printk(KERN_CONT " (");
707 0 : __print_lock_name(class);
708 0 : printk(KERN_CONT "){%s}-{%hd:%hd}", usage,
709 0 : class->wait_type_outer ?: class->wait_type_inner,
710 0 : class->wait_type_inner);
711 0 : }
712 :
713 0 : static void print_lockdep_cache(struct lockdep_map *lock)
714 : {
715 0 : const char *name;
716 0 : char str[KSYM_NAME_LEN];
717 :
718 0 : name = lock->name;
719 0 : if (!name)
720 0 : name = __get_key_name(lock->key->subkeys, str);
721 :
722 0 : printk(KERN_CONT "%s", name);
723 0 : }
724 :
725 0 : static void print_lock(struct held_lock *hlock)
726 : {
727 : /*
728 : * We can be called locklessly through debug_show_all_locks() so be
729 : * extra careful, the hlock might have been released and cleared.
730 : *
731 : * If this indeed happens, lets pretend it does not hurt to continue
732 : * to print the lock unless the hlock class_idx does not point to a
733 : * registered class. The rationale here is: since we don't attempt
734 : * to distinguish whether we are in this situation, if it just
735 : * happened we can't count on class_idx to tell either.
736 : */
737 0 : struct lock_class *lock = hlock_class(hlock);
738 :
739 0 : if (!lock) {
740 0 : printk(KERN_CONT "<RELEASED>\n");
741 0 : return;
742 : }
743 :
744 0 : printk(KERN_CONT "%px", hlock->instance);
745 0 : print_lock_name(lock);
746 0 : printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
747 : }
748 :
749 0 : static void lockdep_print_held_locks(struct task_struct *p)
750 : {
751 0 : int i, depth = READ_ONCE(p->lockdep_depth);
752 :
753 0 : if (!depth)
754 0 : printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
755 : else
756 0 : printk("%d lock%s held by %s/%d:\n", depth,
757 0 : depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
758 : /*
759 : * It's not reliable to print a task's held locks if it's not sleeping
760 : * and it's not the current task.
761 : */
762 0 : if (p->state == TASK_RUNNING && p != current)
763 : return;
764 0 : for (i = 0; i < depth; i++) {
765 0 : printk(" #%d: ", i);
766 0 : print_lock(p->held_locks + i);
767 : }
768 : }
769 :
770 0 : static void print_kernel_ident(void)
771 : {
772 0 : printk("%s %.*s %s\n", init_utsname()->release,
773 0 : (int)strcspn(init_utsname()->version, " "),
774 : init_utsname()->version,
775 : print_tainted());
776 0 : }
777 :
778 13581558 : static int very_verbose(struct lock_class *class)
779 : {
780 : #if VERY_VERBOSE
781 : return class_filter(class);
782 : #endif
783 13581558 : return 0;
784 : }
785 :
786 : /*
787 : * Is this the address of a static object:
788 : */
789 : #ifdef __KERNEL__
790 1360757 : static int static_obj(const void *obj)
791 : {
792 1360757 : unsigned long start = (unsigned long) &_stext,
793 1360757 : end = (unsigned long) &_end,
794 1360757 : addr = (unsigned long) obj;
795 :
796 2705109 : if (arch_is_kernel_initmem_freed(addr))
797 : return 0;
798 :
799 : /*
800 : * static variable?
801 : */
802 1360757 : if ((addr >= start) && (addr < end))
803 : return 1;
804 :
805 85 : if (arch_is_kernel_data(addr))
806 : return 1;
807 :
808 : /*
809 : * in-kernel percpu var?
810 : */
811 85 : if (is_kernel_percpu_address(addr))
812 0 : return 1;
813 :
814 : /*
815 : * module static or percpu var?
816 : */
817 1360757 : return is_module_address(addr) || is_module_percpu_address(addr);
818 : }
819 : #endif
820 :
821 : /*
822 : * To make lock name printouts unique, we calculate a unique
823 : * class->name_version generation counter. The caller must hold the graph
824 : * lock.
825 : */
826 747 : static int count_matching_names(struct lock_class *new_class)
827 : {
828 747 : struct lock_class *class;
829 747 : int count = 0;
830 :
831 747 : if (!new_class->name)
832 : return 0;
833 :
834 275807 : list_for_each_entry(class, &all_lock_classes, lock_entry) {
835 275081 : if (new_class->key - new_class->subclass == class->key)
836 21 : return class->name_version;
837 275060 : if (class->name && !strcmp(class->name, new_class->name))
838 1678 : count = max(count, class->name_version);
839 : }
840 :
841 726 : return count + 1;
842 : }
843 :
844 : /* used from NMI context -- must be lockless */
845 : static __always_inline struct lock_class *
846 691682 : look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
847 : {
848 691682 : struct lockdep_subclass_key *key;
849 691682 : struct hlist_head *hash_head;
850 691682 : struct lock_class *class;
851 :
852 691682 : if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
853 0 : debug_locks_off();
854 0 : printk(KERN_ERR
855 : "BUG: looking up invalid subclass: %u\n", subclass);
856 0 : printk(KERN_ERR
857 : "turning off the locking correctness validator.\n");
858 0 : dump_stack();
859 0 : return NULL;
860 : }
861 :
862 : /*
863 : * If it is not initialised then it has never been locked,
864 : * so it won't be present in the hash table.
865 : */
866 691682 : if (unlikely(!lock->key))
867 : return NULL;
868 :
869 : /*
870 : * NOTE: the class-key must be unique. For dynamic locks, a static
871 : * lock_class_key variable is passed in through the mutex_init()
872 : * (or spin_lock_init()) call - which acts as the key. For static
873 : * locks we use the lock object itself as the key.
874 : */
875 691434 : BUILD_BUG_ON(sizeof(struct lock_class_key) >
876 : sizeof(struct lockdep_map));
877 :
878 691434 : key = lock->key->subkeys + subclass;
879 :
880 691434 : hash_head = classhashentry(key);
881 :
882 : /*
883 : * We do an RCU walk of the hash, see lockdep_free_key_range().
884 : */
885 691434 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
886 : return NULL;
887 :
888 1399821 : hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
889 707846 : if (class->key == key) {
890 : /*
891 : * Huh! same key, different name? Did someone trample
892 : * on some memory? We're most confused.
893 : */
894 1381846 : WARN_ON_ONCE(class->name != lock->name &&
895 : lock->key != &__lockdep_no_validate__);
896 : return class;
897 : }
898 : }
899 :
900 : return NULL;
901 : }
902 :
903 : /*
904 : * Static locks do not have their class-keys yet - for them the key is
905 : * the lock object itself. If the lock is in the per cpu area, the
906 : * canonical address of the lock (per cpu offset removed) is used.
907 : */
908 248 : static bool assign_lock_key(struct lockdep_map *lock)
909 : {
910 248 : unsigned long can_addr, addr = (unsigned long)lock;
911 :
912 : #ifdef __KERNEL__
913 : /*
914 : * lockdep_free_key_range() assumes that struct lock_class_key
915 : * objects do not overlap. Since we use the address of lock
916 : * objects as class key for static objects, check whether the
917 : * size of lock_class_key objects does not exceed the size of
918 : * the smallest lock object.
919 : */
920 248 : BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
921 : #endif
922 :
923 248 : if (__is_kernel_percpu_address(addr, &can_addr))
924 24 : lock->key = (void *)can_addr;
925 224 : else if (__is_module_percpu_address(addr, &can_addr))
926 : lock->key = (void *)can_addr;
927 224 : else if (static_obj(lock))
928 224 : lock->key = (void *)lock;
929 : else {
930 : /* Debug-check: all keys must be persistent! */
931 0 : debug_locks_off();
932 0 : pr_err("INFO: trying to register non-static key.\n");
933 0 : pr_err("the code is fine but needs lockdep annotation.\n");
934 0 : pr_err("turning off the locking correctness validator.\n");
935 0 : dump_stack();
936 0 : return false;
937 : }
938 :
939 : return true;
940 : }
941 :
942 : #ifdef CONFIG_DEBUG_LOCKDEP
943 :
944 : /* Check whether element @e occurs in list @h */
945 0 : static bool in_list(struct list_head *e, struct list_head *h)
946 : {
947 0 : struct list_head *f;
948 :
949 0 : list_for_each(f, h) {
950 0 : if (e == f)
951 : return true;
952 : }
953 :
954 : return false;
955 : }
956 :
957 : /*
958 : * Check whether entry @e occurs in any of the locks_after or locks_before
959 : * lists.
960 : */
961 0 : static bool in_any_class_list(struct list_head *e)
962 : {
963 0 : struct lock_class *class;
964 0 : int i;
965 :
966 0 : for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
967 0 : class = &lock_classes[i];
968 0 : if (in_list(e, &class->locks_after) ||
969 0 : in_list(e, &class->locks_before))
970 : return true;
971 : }
972 : return false;
973 : }
974 :
975 0 : static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
976 : {
977 0 : struct lock_list *e;
978 :
979 0 : list_for_each_entry(e, h, entry) {
980 0 : if (e->links_to != c) {
981 0 : printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
982 0 : c->name ? : "(?)",
983 0 : (unsigned long)(e - list_entries),
984 0 : e->links_to && e->links_to->name ?
985 : e->links_to->name : "(?)",
986 0 : e->class && e->class->name ? e->class->name :
987 : "(?)");
988 0 : return false;
989 : }
990 : }
991 : return true;
992 : }
993 :
994 : #ifdef CONFIG_PROVE_LOCKING
995 : static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
996 : #endif
997 :
998 0 : static bool check_lock_chain_key(struct lock_chain *chain)
999 : {
1000 : #ifdef CONFIG_PROVE_LOCKING
1001 0 : u64 chain_key = INITIAL_CHAIN_KEY;
1002 0 : int i;
1003 :
1004 0 : for (i = chain->base; i < chain->base + chain->depth; i++)
1005 0 : chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
1006 : /*
1007 : * The 'unsigned long long' casts avoid that a compiler warning
1008 : * is reported when building tools/lib/lockdep.
1009 : */
1010 0 : if (chain->chain_key != chain_key) {
1011 0 : printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1012 0 : (unsigned long long)(chain - lock_chains),
1013 : (unsigned long long)chain->chain_key,
1014 : (unsigned long long)chain_key);
1015 0 : return false;
1016 : }
1017 : #endif
1018 : return true;
1019 : }
1020 :
1021 0 : static bool in_any_zapped_class_list(struct lock_class *class)
1022 : {
1023 0 : struct pending_free *pf;
1024 0 : int i;
1025 :
1026 0 : for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
1027 0 : if (in_list(&class->lock_entry, &pf->zapped))
1028 : return true;
1029 : }
1030 :
1031 : return false;
1032 : }
1033 :
1034 0 : static bool __check_data_structures(void)
1035 : {
1036 0 : struct lock_class *class;
1037 0 : struct lock_chain *chain;
1038 0 : struct hlist_head *head;
1039 0 : struct lock_list *e;
1040 0 : int i;
1041 :
1042 : /* Check whether all classes occur in a lock list. */
1043 0 : for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1044 0 : class = &lock_classes[i];
1045 0 : if (!in_list(&class->lock_entry, &all_lock_classes) &&
1046 0 : !in_list(&class->lock_entry, &free_lock_classes) &&
1047 0 : !in_any_zapped_class_list(class)) {
1048 0 : printk(KERN_INFO "class %px/%s is not in any class list\n",
1049 0 : class, class->name ? : "(?)");
1050 0 : return false;
1051 : }
1052 : }
1053 :
1054 : /* Check whether all classes have valid lock lists. */
1055 0 : for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1056 0 : class = &lock_classes[i];
1057 0 : if (!class_lock_list_valid(class, &class->locks_before))
1058 : return false;
1059 0 : if (!class_lock_list_valid(class, &class->locks_after))
1060 : return false;
1061 : }
1062 :
1063 : /* Check the chain_key of all lock chains. */
1064 0 : for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1065 0 : head = chainhash_table + i;
1066 0 : hlist_for_each_entry_rcu(chain, head, entry) {
1067 0 : if (!check_lock_chain_key(chain))
1068 : return false;
1069 : }
1070 : }
1071 :
1072 : /*
1073 : * Check whether all list entries that are in use occur in a class
1074 : * lock list.
1075 : */
1076 0 : for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1077 0 : e = list_entries + i;
1078 0 : if (!in_any_class_list(&e->entry)) {
1079 0 : printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1080 0 : (unsigned int)(e - list_entries),
1081 0 : e->class->name ? : "(?)",
1082 0 : e->links_to->name ? : "(?)");
1083 0 : return false;
1084 : }
1085 : }
1086 :
1087 : /*
1088 : * Check whether all list entries that are not in use do not occur in
1089 : * a class lock list.
1090 : */
1091 0 : for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1092 0 : e = list_entries + i;
1093 0 : if (in_any_class_list(&e->entry)) {
1094 0 : printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1095 0 : (unsigned int)(e - list_entries),
1096 0 : e->class && e->class->name ? e->class->name :
1097 : "(?)",
1098 0 : e->links_to && e->links_to->name ?
1099 : e->links_to->name : "(?)");
1100 0 : return false;
1101 : }
1102 : }
1103 :
1104 : return true;
1105 : }
1106 :
1107 : int check_consistency = 0;
1108 : module_param(check_consistency, int, 0644);
1109 :
1110 1 : static void check_data_structures(void)
1111 : {
1112 1 : static bool once = false;
1113 :
1114 1 : if (check_consistency && !once) {
1115 0 : if (!__check_data_structures()) {
1116 0 : once = true;
1117 0 : WARN_ON(once);
1118 : }
1119 : }
1120 1 : }
1121 :
1122 : #else /* CONFIG_DEBUG_LOCKDEP */
1123 :
1124 : static inline void check_data_structures(void) { }
1125 :
1126 : #endif /* CONFIG_DEBUG_LOCKDEP */
1127 :
1128 : static void init_chain_block_buckets(void);
1129 :
1130 : /*
1131 : * Initialize the lock_classes[] array elements, the free_lock_classes list
1132 : * and also the delayed_free structure.
1133 : */
1134 8243 : static void init_data_structures_once(void)
1135 : {
1136 8243 : static bool __read_mostly ds_initialized, rcu_head_initialized;
1137 8243 : int i;
1138 :
1139 8243 : if (likely(rcu_head_initialized))
1140 : return;
1141 :
1142 347 : if (system_state >= SYSTEM_SCHEDULING) {
1143 1 : init_rcu_head(&delayed_free.rcu_head);
1144 1 : rcu_head_initialized = true;
1145 : }
1146 :
1147 347 : if (ds_initialized)
1148 : return;
1149 :
1150 1 : ds_initialized = true;
1151 :
1152 1 : INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1153 1 : INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1154 :
1155 8193 : for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1156 8192 : list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1157 8192 : INIT_LIST_HEAD(&lock_classes[i].locks_after);
1158 8192 : INIT_LIST_HEAD(&lock_classes[i].locks_before);
1159 : }
1160 1 : init_chain_block_buckets();
1161 : }
1162 :
1163 53 : static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1164 : {
1165 53 : unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1166 :
1167 53 : return lock_keys_hash + hash;
1168 : }
1169 :
1170 : /* Register a dynamically allocated key. */
1171 20 : void lockdep_register_key(struct lock_class_key *key)
1172 : {
1173 20 : struct hlist_head *hash_head;
1174 20 : struct lock_class_key *k;
1175 20 : unsigned long flags;
1176 :
1177 20 : if (WARN_ON_ONCE(static_obj(key)))
1178 : return;
1179 20 : hash_head = keyhashentry(key);
1180 :
1181 20 : raw_local_irq_save(flags);
1182 20 : if (!graph_lock())
1183 0 : goto restore_irqs;
1184 40 : hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1185 0 : if (WARN_ON_ONCE(k == key))
1186 0 : goto out_unlock;
1187 : }
1188 20 : hlist_add_head_rcu(&key->hash_entry, hash_head);
1189 20 : out_unlock:
1190 20 : graph_unlock();
1191 20 : restore_irqs:
1192 20 : raw_local_irq_restore(flags);
1193 : }
1194 : EXPORT_SYMBOL_GPL(lockdep_register_key);
1195 :
1196 : /* Check whether a key has been registered as a dynamic key. */
1197 32 : static bool is_dynamic_key(const struct lock_class_key *key)
1198 : {
1199 32 : struct hlist_head *hash_head;
1200 32 : struct lock_class_key *k;
1201 32 : bool found = false;
1202 :
1203 32 : if (WARN_ON_ONCE(static_obj(key)))
1204 : return false;
1205 :
1206 : /*
1207 : * If lock debugging is disabled lock_keys_hash[] may contain
1208 : * pointers to memory that has already been freed. Avoid triggering
1209 : * a use-after-free in that case by returning early.
1210 : */
1211 32 : if (!debug_locks)
1212 : return true;
1213 :
1214 32 : hash_head = keyhashentry(key);
1215 :
1216 32 : rcu_read_lock();
1217 64 : hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1218 32 : if (k == key) {
1219 : found = true;
1220 : break;
1221 : }
1222 : }
1223 32 : rcu_read_unlock();
1224 :
1225 32 : return found;
1226 : }
1227 :
1228 : /*
1229 : * Register a lock's class in the hash-table, if the class is not present
1230 : * yet. Otherwise we look it up. We cache the result in the lock object
1231 : * itself, so actual lookup of the hash should be once per lock object.
1232 : */
1233 : static struct lock_class *
1234 691675 : register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1235 : {
1236 691675 : struct lockdep_subclass_key *key;
1237 691675 : struct hlist_head *hash_head;
1238 691675 : struct lock_class *class;
1239 :
1240 691675 : DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1241 :
1242 691681 : class = look_up_lock_class(lock, subclass);
1243 691696 : if (likely(class))
1244 690930 : goto out_set_class_cache;
1245 :
1246 766 : if (!lock->key) {
1247 248 : if (!assign_lock_key(lock))
1248 : return NULL;
1249 518 : } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1250 : return NULL;
1251 : }
1252 :
1253 766 : key = lock->key->subkeys + subclass;
1254 766 : hash_head = classhashentry(key);
1255 :
1256 766 : if (!graph_lock()) {
1257 : return NULL;
1258 : }
1259 : /*
1260 : * We have to do the hash-walk again, to avoid races
1261 : * with another CPU:
1262 : */
1263 1609 : hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1264 95 : if (class->key == key)
1265 20 : goto out_unlock_set;
1266 : }
1267 :
1268 747 : init_data_structures_once();
1269 :
1270 : /* Allocate a new lock class and add it to the hash. */
1271 747 : class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1272 : lock_entry);
1273 747 : if (!class) {
1274 0 : if (!debug_locks_off_graph_unlock()) {
1275 : return NULL;
1276 : }
1277 :
1278 0 : print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1279 0 : dump_stack();
1280 0 : return NULL;
1281 : }
1282 747 : nr_lock_classes++;
1283 747 : __set_bit(class - lock_classes, lock_classes_in_use);
1284 747 : debug_atomic_inc(nr_unused_locks);
1285 747 : class->key = key;
1286 747 : class->name = lock->name;
1287 747 : class->subclass = subclass;
1288 747 : WARN_ON_ONCE(!list_empty(&class->locks_before));
1289 747 : WARN_ON_ONCE(!list_empty(&class->locks_after));
1290 747 : class->name_version = count_matching_names(class);
1291 747 : class->wait_type_inner = lock->wait_type_inner;
1292 747 : class->wait_type_outer = lock->wait_type_outer;
1293 747 : class->lock_type = lock->lock_type;
1294 : /*
1295 : * We use RCU's safe list-add method to make
1296 : * parallel walking of the hash-list safe:
1297 : */
1298 747 : hlist_add_head_rcu(&class->hash_entry, hash_head);
1299 : /*
1300 : * Remove the class from the free list and add it to the global list
1301 : * of classes.
1302 : */
1303 747 : list_move_tail(&class->lock_entry, &all_lock_classes);
1304 :
1305 747 : if (verbose(class)) {
1306 : graph_unlock();
1307 :
1308 : printk("\nnew class %px: %s", class->key, class->name);
1309 : if (class->name_version > 1)
1310 : printk(KERN_CONT "#%d", class->name_version);
1311 : printk(KERN_CONT "\n");
1312 : dump_stack();
1313 :
1314 : if (!graph_lock()) {
1315 : return NULL;
1316 : }
1317 : }
1318 747 : out_unlock_set:
1319 767 : graph_unlock();
1320 :
1321 691697 : out_set_class_cache:
1322 691697 : if (!subclass || force)
1323 679389 : lock->class_cache[0] = class;
1324 12308 : else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1325 10309 : lock->class_cache[subclass] = class;
1326 :
1327 : /*
1328 : * Hash collision, did we smoke some? We found a class with a matching
1329 : * hash but the subclass -- which is hashed in -- didn't match.
1330 : */
1331 691697 : if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1332 0 : return NULL;
1333 :
1334 : return class;
1335 : }
1336 :
1337 : #ifdef CONFIG_PROVE_LOCKING
1338 : /*
1339 : * Allocate a lockdep entry. (assumes the graph_lock held, returns
1340 : * with NULL on failure)
1341 : */
1342 6376 : static struct lock_list *alloc_list_entry(void)
1343 : {
1344 6376 : int idx = find_first_zero_bit(list_entries_in_use,
1345 : ARRAY_SIZE(list_entries));
1346 :
1347 6376 : if (idx >= ARRAY_SIZE(list_entries)) {
1348 0 : if (!debug_locks_off_graph_unlock())
1349 : return NULL;
1350 :
1351 0 : print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1352 0 : dump_stack();
1353 0 : return NULL;
1354 : }
1355 6376 : nr_list_entries++;
1356 6376 : __set_bit(idx, list_entries_in_use);
1357 6376 : return list_entries + idx;
1358 : }
1359 :
1360 : /*
1361 : * Add a new dependency to the head of the list:
1362 : */
1363 6376 : static int add_lock_to_list(struct lock_class *this,
1364 : struct lock_class *links_to, struct list_head *head,
1365 : unsigned long ip, u16 distance, u8 dep,
1366 : const struct lock_trace *trace)
1367 : {
1368 6376 : struct lock_list *entry;
1369 : /*
1370 : * Lock not present yet - get a new dependency struct and
1371 : * add it to the list:
1372 : */
1373 6376 : entry = alloc_list_entry();
1374 6376 : if (!entry)
1375 : return 0;
1376 :
1377 6376 : entry->class = this;
1378 6376 : entry->links_to = links_to;
1379 6376 : entry->dep = dep;
1380 6376 : entry->distance = distance;
1381 6376 : entry->trace = trace;
1382 : /*
1383 : * Both allocation and removal are done under the graph lock; but
1384 : * iteration is under RCU-sched; see look_up_lock_class() and
1385 : * lockdep_free_key_range().
1386 : */
1387 6376 : list_add_tail_rcu(&entry->entry, head);
1388 :
1389 6376 : return 1;
1390 : }
1391 :
1392 : /*
1393 : * For good efficiency of modular, we use power of 2
1394 : */
1395 : #define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1396 : #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1397 :
1398 : /*
1399 : * The circular_queue and helpers are used to implement graph
1400 : * breadth-first search (BFS) algorithm, by which we can determine
1401 : * whether there is a path from a lock to another. In deadlock checks,
1402 : * a path from the next lock to be acquired to a previous held lock
1403 : * indicates that adding the <prev> -> <next> lock dependency will
1404 : * produce a circle in the graph. Breadth-first search instead of
1405 : * depth-first search is used in order to find the shortest (circular)
1406 : * path.
1407 : */
1408 : struct circular_queue {
1409 : struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1410 : unsigned int front, rear;
1411 : };
1412 :
1413 : static struct circular_queue lock_cq;
1414 :
1415 : unsigned int max_bfs_queue_depth;
1416 :
1417 : static unsigned int lockdep_dependency_gen_id;
1418 :
1419 16271 : static inline void __cq_init(struct circular_queue *cq)
1420 : {
1421 16271 : cq->front = cq->rear = 0;
1422 16271 : lockdep_dependency_gen_id++;
1423 : }
1424 :
1425 144081 : static inline int __cq_empty(struct circular_queue *cq)
1426 : {
1427 144081 : return (cq->front == cq->rear);
1428 : }
1429 :
1430 127810 : static inline int __cq_full(struct circular_queue *cq)
1431 : {
1432 127810 : return ((cq->rear + 1) & CQ_MASK) == cq->front;
1433 : }
1434 :
1435 127810 : static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1436 : {
1437 127810 : if (__cq_full(cq))
1438 : return -1;
1439 :
1440 127810 : cq->element[cq->rear] = elem;
1441 127810 : cq->rear = (cq->rear + 1) & CQ_MASK;
1442 127810 : return 0;
1443 : }
1444 :
1445 : /*
1446 : * Dequeue an element from the circular_queue, return a lock_list if
1447 : * the queue is not empty, or NULL if otherwise.
1448 : */
1449 144081 : static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1450 : {
1451 144081 : struct lock_list * lock;
1452 :
1453 144081 : if (__cq_empty(cq))
1454 : return NULL;
1455 :
1456 127810 : lock = cq->element[cq->front];
1457 127810 : cq->front = (cq->front + 1) & CQ_MASK;
1458 :
1459 127810 : return lock;
1460 : }
1461 :
1462 111539 : static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
1463 : {
1464 111539 : return (cq->rear - cq->front) & CQ_MASK;
1465 : }
1466 :
1467 217143 : static inline void mark_lock_accessed(struct lock_list *lock)
1468 : {
1469 217143 : lock->class->dep_gen_id = lockdep_dependency_gen_id;
1470 : }
1471 :
1472 392494 : static inline void visit_lock_entry(struct lock_list *lock,
1473 : struct lock_list *parent)
1474 : {
1475 392494 : lock->parent = parent;
1476 : }
1477 :
1478 408765 : static inline unsigned long lock_accessed(struct lock_list *lock)
1479 : {
1480 408765 : return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1481 : }
1482 :
1483 0 : static inline struct lock_list *get_lock_parent(struct lock_list *child)
1484 : {
1485 0 : return child->parent;
1486 : }
1487 :
1488 0 : static inline int get_lock_depth(struct lock_list *child)
1489 : {
1490 0 : int depth = 0;
1491 0 : struct lock_list *parent;
1492 :
1493 0 : while ((parent = get_lock_parent(child))) {
1494 0 : child = parent;
1495 0 : depth++;
1496 : }
1497 0 : return depth;
1498 : }
1499 :
1500 : /*
1501 : * Return the forward or backward dependency list.
1502 : *
1503 : * @lock: the lock_list to get its class's dependency list
1504 : * @offset: the offset to struct lock_class to determine whether it is
1505 : * locks_after or locks_before
1506 : */
1507 609006 : static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1508 : {
1509 609006 : void *lock_class = lock->class;
1510 :
1511 609006 : return lock_class + offset;
1512 : }
1513 : /*
1514 : * Return values of a bfs search:
1515 : *
1516 : * BFS_E* indicates an error
1517 : * BFS_R* indicates a result (match or not)
1518 : *
1519 : * BFS_EINVALIDNODE: Find a invalid node in the graph.
1520 : *
1521 : * BFS_EQUEUEFULL: The queue is full while doing the bfs.
1522 : *
1523 : * BFS_RMATCH: Find the matched node in the graph, and put that node into
1524 : * *@target_entry.
1525 : *
1526 : * BFS_RNOMATCH: Haven't found the matched node and keep *@target_entry
1527 : * _unchanged_.
1528 : */
1529 : enum bfs_result {
1530 : BFS_EINVALIDNODE = -2,
1531 : BFS_EQUEUEFULL = -1,
1532 : BFS_RMATCH = 0,
1533 : BFS_RNOMATCH = 1,
1534 : };
1535 :
1536 : /*
1537 : * bfs_result < 0 means error
1538 : */
1539 26611 : static inline bool bfs_error(enum bfs_result res)
1540 : {
1541 26611 : return res < 0;
1542 : }
1543 :
1544 : /*
1545 : * DEP_*_BIT in lock_list::dep
1546 : *
1547 : * For dependency @prev -> @next:
1548 : *
1549 : * SR: @prev is shared reader (->read != 0) and @next is recursive reader
1550 : * (->read == 2)
1551 : * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
1552 : * SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
1553 : * EN: @prev is exclusive locker and @next is non-recursive locker
1554 : *
1555 : * Note that we define the value of DEP_*_BITs so that:
1556 : * bit0 is prev->read == 0
1557 : * bit1 is next->read != 2
1558 : */
1559 : #define DEP_SR_BIT (0 + (0 << 1)) /* 0 */
1560 : #define DEP_ER_BIT (1 + (0 << 1)) /* 1 */
1561 : #define DEP_SN_BIT (0 + (1 << 1)) /* 2 */
1562 : #define DEP_EN_BIT (1 + (1 << 1)) /* 3 */
1563 :
1564 : #define DEP_SR_MASK (1U << (DEP_SR_BIT))
1565 : #define DEP_ER_MASK (1U << (DEP_ER_BIT))
1566 : #define DEP_SN_MASK (1U << (DEP_SN_BIT))
1567 : #define DEP_EN_MASK (1U << (DEP_EN_BIT))
1568 :
1569 : static inline unsigned int
1570 7152 : __calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1571 : {
1572 14304 : return (prev->read == 0) + ((next->read != 2) << 1);
1573 : }
1574 :
1575 7152 : static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1576 : {
1577 14304 : return 1U << __calc_dep_bit(prev, next);
1578 : }
1579 :
1580 : /*
1581 : * calculate the dep_bit for backwards edges. We care about whether @prev is
1582 : * shared and whether @next is recursive.
1583 : */
1584 : static inline unsigned int
1585 7152 : __calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1586 : {
1587 14304 : return (next->read != 2) + ((prev->read == 0) << 1);
1588 : }
1589 :
1590 7152 : static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1591 : {
1592 14304 : return 1U << __calc_dep_bitb(prev, next);
1593 : }
1594 :
1595 : /*
1596 : * Initialize a lock_list entry @lock belonging to @class as the root for a BFS
1597 : * search.
1598 : */
1599 16271 : static inline void __bfs_init_root(struct lock_list *lock,
1600 : struct lock_class *class)
1601 : {
1602 16271 : lock->class = class;
1603 16271 : lock->parent = NULL;
1604 16271 : lock->only_xr = 0;
1605 : }
1606 :
1607 : /*
1608 : * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
1609 : * root for a BFS search.
1610 : *
1611 : * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
1612 : * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1613 : * and -(S*)->.
1614 : */
1615 8041 : static inline void bfs_init_root(struct lock_list *lock,
1616 : struct held_lock *hlock)
1617 : {
1618 8041 : __bfs_init_root(lock, hlock_class(hlock));
1619 8041 : lock->only_xr = (hlock->read == 2);
1620 8041 : }
1621 :
1622 : /*
1623 : * Similar to bfs_init_root() but initialize the root for backwards BFS.
1624 : *
1625 : * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
1626 : * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
1627 : * -(*S)-> and -(R*)-> (reverse order of -(*R)-> and -(S*)->).
1628 : */
1629 8230 : static inline void bfs_init_rootb(struct lock_list *lock,
1630 : struct held_lock *hlock)
1631 : {
1632 8230 : __bfs_init_root(lock, hlock_class(hlock));
1633 8230 : lock->only_xr = (hlock->read != 0);
1634 8230 : }
1635 :
1636 425036 : static inline struct lock_list *__bfs_next(struct lock_list *lock, int offset)
1637 : {
1638 425036 : if (!lock || !lock->parent)
1639 : return NULL;
1640 :
1641 392494 : return list_next_or_null_rcu(get_dep_list(lock->parent, offset),
1642 : &lock->entry, struct lock_list, entry);
1643 : }
1644 :
1645 : /*
1646 : * Breadth-First Search to find a strong path in the dependency graph.
1647 : *
1648 : * @source_entry: the source of the path we are searching for.
1649 : * @data: data used for the second parameter of @match function
1650 : * @match: match function for the search
1651 : * @target_entry: pointer to the target of a matched path
1652 : * @offset: the offset to struct lock_class to determine whether it is
1653 : * locks_after or locks_before
1654 : *
1655 : * We may have multiple edges (considering different kinds of dependencies,
1656 : * e.g. ER and SN) between two nodes in the dependency graph. But
1657 : * only the strong dependency path in the graph is relevant to deadlocks. A
1658 : * strong dependency path is a dependency path that doesn't have two adjacent
1659 : * dependencies as -(*R)-> -(S*)->, please see:
1660 : *
1661 : * Documentation/locking/lockdep-design.rst
1662 : *
1663 : * for more explanation of the definition of strong dependency paths
1664 : *
1665 : * In __bfs(), we only traverse in the strong dependency path:
1666 : *
1667 : * In lock_list::only_xr, we record whether the previous dependency only
1668 : * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
1669 : * filter out any -(S*)-> in the current dependency and after that, the
1670 : * ->only_xr is set according to whether we only have -(*R)-> left.
1671 : */
1672 16271 : static enum bfs_result __bfs(struct lock_list *source_entry,
1673 : void *data,
1674 : bool (*match)(struct lock_list *entry, void *data),
1675 : bool (*skip)(struct lock_list *entry, void *data),
1676 : struct lock_list **target_entry,
1677 : int offset)
1678 : {
1679 16271 : struct circular_queue *cq = &lock_cq;
1680 16271 : struct lock_list *lock = NULL;
1681 16271 : struct lock_list *entry;
1682 16271 : struct list_head *head;
1683 16271 : unsigned int cq_depth;
1684 16271 : bool first;
1685 :
1686 16271 : lockdep_assert_locked();
1687 :
1688 16271 : __cq_init(cq);
1689 16271 : __cq_enqueue(cq, source_entry);
1690 :
1691 552846 : while ((lock = __bfs_next(lock, offset)) || (lock = __cq_dequeue(cq))) {
1692 408765 : if (!lock->class)
1693 : return BFS_EINVALIDNODE;
1694 :
1695 : /*
1696 : * Step 1: check whether we already finish on this one.
1697 : *
1698 : * If we have visited all the dependencies from this @lock to
1699 : * others (iow, if we have visited all lock_list entries in
1700 : * @lock->class->locks_{after,before}) we skip, otherwise go
1701 : * and visit all the dependencies in the list and mark this
1702 : * list accessed.
1703 : */
1704 408765 : if (lock_accessed(lock))
1705 191622 : continue;
1706 : else
1707 217143 : mark_lock_accessed(lock);
1708 :
1709 : /*
1710 : * Step 2: check whether prev dependency and this form a strong
1711 : * dependency path.
1712 : */
1713 217143 : if (lock->parent) { /* Parent exists, check prev dependency */
1714 200872 : u8 dep = lock->dep;
1715 200872 : bool prev_only_xr = lock->parent->only_xr;
1716 :
1717 : /*
1718 : * Mask out all -(S*)-> if we only have *R in previous
1719 : * step, because -(*R)-> -(S*)-> don't make up a strong
1720 : * dependency.
1721 : */
1722 200872 : if (prev_only_xr)
1723 24166 : dep &= ~(DEP_SR_MASK | DEP_SN_MASK);
1724 :
1725 : /* If nothing left, we skip */
1726 200872 : if (!dep)
1727 212 : continue;
1728 :
1729 : /* If there are only -(*R)-> left, set that for the next step */
1730 200660 : lock->only_xr = !(dep & (DEP_SN_MASK | DEP_EN_MASK));
1731 : }
1732 :
1733 : /*
1734 : * Step 3: we haven't visited this and there is a strong
1735 : * dependency path to this, so check with @match.
1736 : * If @skip is provide and returns true, we skip this
1737 : * lock (and any path this lock is in).
1738 : */
1739 216931 : if (skip && skip(lock, data))
1740 419 : continue;
1741 :
1742 216512 : if (match(lock, data)) {
1743 0 : *target_entry = lock;
1744 0 : return BFS_RMATCH;
1745 : }
1746 :
1747 : /*
1748 : * Step 4: if not match, expand the path by adding the
1749 : * forward or backwards dependencis in the search
1750 : *
1751 : */
1752 216512 : first = true;
1753 216512 : head = get_dep_list(lock, offset);
1754 609006 : list_for_each_entry_rcu(entry, head, entry) {
1755 392494 : visit_lock_entry(entry, lock);
1756 :
1757 : /*
1758 : * Note we only enqueue the first of the list into the
1759 : * queue, because we can always find a sibling
1760 : * dependency from one (see __bfs_next()), as a result
1761 : * the space of queue is saved.
1762 : */
1763 392494 : if (!first)
1764 280955 : continue;
1765 :
1766 111539 : first = false;
1767 :
1768 111539 : if (__cq_enqueue(cq, entry))
1769 : return BFS_EQUEUEFULL;
1770 :
1771 111539 : cq_depth = __cq_get_elem_count(cq);
1772 111539 : if (max_bfs_queue_depth < cq_depth)
1773 112 : max_bfs_queue_depth = cq_depth;
1774 : }
1775 : }
1776 :
1777 : return BFS_RNOMATCH;
1778 : }
1779 :
1780 : static inline enum bfs_result
1781 8041 : __bfs_forwards(struct lock_list *src_entry,
1782 : void *data,
1783 : bool (*match)(struct lock_list *entry, void *data),
1784 : bool (*skip)(struct lock_list *entry, void *data),
1785 : struct lock_list **target_entry)
1786 : {
1787 8041 : return __bfs(src_entry, data, match, skip, target_entry,
1788 : offsetof(struct lock_class, locks_after));
1789 :
1790 : }
1791 :
1792 : static inline enum bfs_result
1793 8230 : __bfs_backwards(struct lock_list *src_entry,
1794 : void *data,
1795 : bool (*match)(struct lock_list *entry, void *data),
1796 : bool (*skip)(struct lock_list *entry, void *data),
1797 : struct lock_list **target_entry)
1798 : {
1799 8230 : return __bfs(src_entry, data, match, skip, target_entry,
1800 : offsetof(struct lock_class, locks_before));
1801 :
1802 : }
1803 :
1804 0 : static void print_lock_trace(const struct lock_trace *trace,
1805 : unsigned int spaces)
1806 : {
1807 0 : stack_trace_print(trace->entries, trace->nr_entries, spaces);
1808 0 : }
1809 :
1810 : /*
1811 : * Print a dependency chain entry (this is only done when a deadlock
1812 : * has been detected):
1813 : */
1814 : static noinline void
1815 0 : print_circular_bug_entry(struct lock_list *target, int depth)
1816 : {
1817 0 : if (debug_locks_silent)
1818 : return;
1819 0 : printk("\n-> #%u", depth);
1820 0 : print_lock_name(target->class);
1821 0 : printk(KERN_CONT ":\n");
1822 0 : print_lock_trace(target->trace, 6);
1823 : }
1824 :
1825 : static void
1826 0 : print_circular_lock_scenario(struct held_lock *src,
1827 : struct held_lock *tgt,
1828 : struct lock_list *prt)
1829 : {
1830 0 : struct lock_class *source = hlock_class(src);
1831 0 : struct lock_class *target = hlock_class(tgt);
1832 0 : struct lock_class *parent = prt->class;
1833 :
1834 : /*
1835 : * A direct locking problem where unsafe_class lock is taken
1836 : * directly by safe_class lock, then all we need to show
1837 : * is the deadlock scenario, as it is obvious that the
1838 : * unsafe lock is taken under the safe lock.
1839 : *
1840 : * But if there is a chain instead, where the safe lock takes
1841 : * an intermediate lock (middle_class) where this lock is
1842 : * not the same as the safe lock, then the lock chain is
1843 : * used to describe the problem. Otherwise we would need
1844 : * to show a different CPU case for each link in the chain
1845 : * from the safe_class lock to the unsafe_class lock.
1846 : */
1847 0 : if (parent != source) {
1848 0 : printk("Chain exists of:\n ");
1849 0 : __print_lock_name(source);
1850 0 : printk(KERN_CONT " --> ");
1851 0 : __print_lock_name(parent);
1852 0 : printk(KERN_CONT " --> ");
1853 0 : __print_lock_name(target);
1854 0 : printk(KERN_CONT "\n\n");
1855 : }
1856 :
1857 0 : printk(" Possible unsafe locking scenario:\n\n");
1858 0 : printk(" CPU0 CPU1\n");
1859 0 : printk(" ---- ----\n");
1860 0 : printk(" lock(");
1861 0 : __print_lock_name(target);
1862 0 : printk(KERN_CONT ");\n");
1863 0 : printk(" lock(");
1864 0 : __print_lock_name(parent);
1865 0 : printk(KERN_CONT ");\n");
1866 0 : printk(" lock(");
1867 0 : __print_lock_name(target);
1868 0 : printk(KERN_CONT ");\n");
1869 0 : printk(" lock(");
1870 0 : __print_lock_name(source);
1871 0 : printk(KERN_CONT ");\n");
1872 0 : printk("\n *** DEADLOCK ***\n\n");
1873 0 : }
1874 :
1875 : /*
1876 : * When a circular dependency is detected, print the
1877 : * header first:
1878 : */
1879 : static noinline void
1880 0 : print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1881 : struct held_lock *check_src,
1882 : struct held_lock *check_tgt)
1883 : {
1884 0 : struct task_struct *curr = current;
1885 :
1886 0 : if (debug_locks_silent)
1887 : return;
1888 :
1889 0 : pr_warn("\n");
1890 0 : pr_warn("======================================================\n");
1891 0 : pr_warn("WARNING: possible circular locking dependency detected\n");
1892 0 : print_kernel_ident();
1893 0 : pr_warn("------------------------------------------------------\n");
1894 0 : pr_warn("%s/%d is trying to acquire lock:\n",
1895 : curr->comm, task_pid_nr(curr));
1896 0 : print_lock(check_src);
1897 :
1898 0 : pr_warn("\nbut task is already holding lock:\n");
1899 :
1900 0 : print_lock(check_tgt);
1901 0 : pr_warn("\nwhich lock already depends on the new lock.\n\n");
1902 0 : pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1903 :
1904 0 : print_circular_bug_entry(entry, depth);
1905 : }
1906 :
1907 : /*
1908 : * We are about to add A -> B into the dependency graph, and in __bfs() a
1909 : * strong dependency path A -> .. -> B is found: hlock_class equals
1910 : * entry->class.
1911 : *
1912 : * If A -> .. -> B can replace A -> B in any __bfs() search (means the former
1913 : * is _stronger_ than or equal to the latter), we consider A -> B as redundant.
1914 : * For example if A -> .. -> B is -(EN)-> (i.e. A -(E*)-> .. -(*N)-> B), and A
1915 : * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
1916 : * dependency graph, as any strong path ..-> A -> B ->.. we can get with
1917 : * having dependency A -> B, we could already get a equivalent path ..-> A ->
1918 : * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant.
1919 : *
1920 : * We need to make sure both the start and the end of A -> .. -> B is not
1921 : * weaker than A -> B. For the start part, please see the comment in
1922 : * check_redundant(). For the end part, we need:
1923 : *
1924 : * Either
1925 : *
1926 : * a) A -> B is -(*R)-> (everything is not weaker than that)
1927 : *
1928 : * or
1929 : *
1930 : * b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
1931 : *
1932 : */
1933 : static inline bool hlock_equal(struct lock_list *entry, void *data)
1934 : {
1935 : struct held_lock *hlock = (struct held_lock *)data;
1936 :
1937 : return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1938 : (hlock->read == 2 || /* A -> B is -(*R)-> */
1939 : !entry->only_xr); /* A -> .. -> B is -(*N)-> */
1940 : }
1941 :
1942 : /*
1943 : * We are about to add B -> A into the dependency graph, and in __bfs() a
1944 : * strong dependency path A -> .. -> B is found: hlock_class equals
1945 : * entry->class.
1946 : *
1947 : * We will have a deadlock case (conflict) if A -> .. -> B -> A is a strong
1948 : * dependency cycle, that means:
1949 : *
1950 : * Either
1951 : *
1952 : * a) B -> A is -(E*)->
1953 : *
1954 : * or
1955 : *
1956 : * b) A -> .. -> B is -(*N)-> (i.e. A -> .. -(*N)-> B)
1957 : *
1958 : * as then we don't have -(*R)-> -(S*)-> in the cycle.
1959 : */
1960 41793 : static inline bool hlock_conflict(struct lock_list *entry, void *data)
1961 : {
1962 41793 : struct held_lock *hlock = (struct held_lock *)data;
1963 :
1964 41793 : return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1965 0 : (hlock->read == 0 || /* B -> A is -(E*)-> */
1966 0 : !entry->only_xr); /* A -> .. -> B is -(*N)-> */
1967 : }
1968 :
1969 0 : static noinline void print_circular_bug(struct lock_list *this,
1970 : struct lock_list *target,
1971 : struct held_lock *check_src,
1972 : struct held_lock *check_tgt)
1973 : {
1974 0 : struct task_struct *curr = current;
1975 0 : struct lock_list *parent;
1976 0 : struct lock_list *first_parent;
1977 0 : int depth;
1978 :
1979 0 : if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1980 : return;
1981 :
1982 0 : this->trace = save_trace();
1983 0 : if (!this->trace)
1984 : return;
1985 :
1986 0 : depth = get_lock_depth(target);
1987 :
1988 0 : print_circular_bug_header(target, depth, check_src, check_tgt);
1989 :
1990 0 : parent = get_lock_parent(target);
1991 0 : first_parent = parent;
1992 :
1993 0 : while (parent) {
1994 0 : print_circular_bug_entry(parent, --depth);
1995 0 : parent = get_lock_parent(parent);
1996 : }
1997 :
1998 0 : printk("\nother info that might help us debug this:\n\n");
1999 0 : print_circular_lock_scenario(check_src, check_tgt,
2000 : first_parent);
2001 :
2002 0 : lockdep_print_held_locks(curr);
2003 :
2004 0 : printk("\nstack backtrace:\n");
2005 0 : dump_stack();
2006 : }
2007 :
2008 0 : static noinline void print_bfs_bug(int ret)
2009 : {
2010 0 : if (!debug_locks_off_graph_unlock())
2011 : return;
2012 :
2013 : /*
2014 : * Breadth-first-search failed, graph got corrupted?
2015 : */
2016 0 : WARN(1, "lockdep bfs error:%d\n", ret);
2017 : }
2018 :
2019 0 : static bool noop_count(struct lock_list *entry, void *data)
2020 : {
2021 0 : (*(unsigned long *)data)++;
2022 0 : return false;
2023 : }
2024 :
2025 0 : static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
2026 : {
2027 0 : unsigned long count = 0;
2028 0 : struct lock_list *target_entry;
2029 :
2030 0 : __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
2031 :
2032 0 : return count;
2033 : }
2034 0 : unsigned long lockdep_count_forward_deps(struct lock_class *class)
2035 : {
2036 0 : unsigned long ret, flags;
2037 0 : struct lock_list this;
2038 :
2039 0 : __bfs_init_root(&this, class);
2040 :
2041 0 : raw_local_irq_save(flags);
2042 0 : lockdep_lock();
2043 0 : ret = __lockdep_count_forward_deps(&this);
2044 0 : lockdep_unlock();
2045 0 : raw_local_irq_restore(flags);
2046 :
2047 0 : return ret;
2048 : }
2049 :
2050 0 : static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
2051 : {
2052 0 : unsigned long count = 0;
2053 0 : struct lock_list *target_entry;
2054 :
2055 0 : __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
2056 :
2057 0 : return count;
2058 : }
2059 :
2060 0 : unsigned long lockdep_count_backward_deps(struct lock_class *class)
2061 : {
2062 0 : unsigned long ret, flags;
2063 0 : struct lock_list this;
2064 :
2065 0 : __bfs_init_root(&this, class);
2066 :
2067 0 : raw_local_irq_save(flags);
2068 0 : lockdep_lock();
2069 0 : ret = __lockdep_count_backward_deps(&this);
2070 0 : lockdep_unlock();
2071 0 : raw_local_irq_restore(flags);
2072 :
2073 0 : return ret;
2074 : }
2075 :
2076 : /*
2077 : * Check that the dependency graph starting at <src> can lead to
2078 : * <target> or not.
2079 : */
2080 : static noinline enum bfs_result
2081 7152 : check_path(struct held_lock *target, struct lock_list *src_entry,
2082 : bool (*match)(struct lock_list *entry, void *data),
2083 : bool (*skip)(struct lock_list *entry, void *data),
2084 : struct lock_list **target_entry)
2085 : {
2086 7152 : enum bfs_result ret;
2087 :
2088 7152 : ret = __bfs_forwards(src_entry, target, match, skip, target_entry);
2089 :
2090 7152 : if (unlikely(bfs_error(ret)))
2091 0 : print_bfs_bug(ret);
2092 :
2093 7152 : return ret;
2094 : }
2095 :
2096 : /*
2097 : * Prove that the dependency graph starting at <src> can not
2098 : * lead to <target>. If it can, there is a circle when adding
2099 : * <target> -> <src> dependency.
2100 : *
2101 : * Print an error and return BFS_RMATCH if it does.
2102 : */
2103 : static noinline enum bfs_result
2104 7152 : check_noncircular(struct held_lock *src, struct held_lock *target,
2105 : struct lock_trace **const trace)
2106 : {
2107 7152 : enum bfs_result ret;
2108 7152 : struct lock_list *target_entry;
2109 7152 : struct lock_list src_entry;
2110 :
2111 7152 : bfs_init_root(&src_entry, src);
2112 :
2113 7152 : debug_atomic_inc(nr_cyclic_checks);
2114 :
2115 7152 : ret = check_path(target, &src_entry, hlock_conflict, NULL, &target_entry);
2116 :
2117 7152 : if (unlikely(ret == BFS_RMATCH)) {
2118 0 : if (!*trace) {
2119 : /*
2120 : * If save_trace fails here, the printing might
2121 : * trigger a WARN but because of the !nr_entries it
2122 : * should not do bad things.
2123 : */
2124 0 : *trace = save_trace();
2125 : }
2126 :
2127 0 : print_circular_bug(&src_entry, target_entry, src, target);
2128 : }
2129 :
2130 7152 : return ret;
2131 : }
2132 :
2133 : #ifdef CONFIG_TRACE_IRQFLAGS
2134 :
2135 : /*
2136 : * Forwards and backwards subgraph searching, for the purposes of
2137 : * proving that two subgraphs can be connected by a new dependency
2138 : * without creating any illegal irq-safe -> irq-unsafe lock dependency.
2139 : *
2140 : * A irq safe->unsafe deadlock happens with the following conditions:
2141 : *
2142 : * 1) We have a strong dependency path A -> ... -> B
2143 : *
2144 : * 2) and we have ENABLED_IRQ usage of B and USED_IN_IRQ usage of A, therefore
2145 : * irq can create a new dependency B -> A (consider the case that a holder
2146 : * of B gets interrupted by an irq whose handler will try to acquire A).
2147 : *
2148 : * 3) the dependency circle A -> ... -> B -> A we get from 1) and 2) is a
2149 : * strong circle:
2150 : *
2151 : * For the usage bits of B:
2152 : * a) if A -> B is -(*N)->, then B -> A could be any type, so any
2153 : * ENABLED_IRQ usage suffices.
2154 : * b) if A -> B is -(*R)->, then B -> A must be -(E*)->, so only
2155 : * ENABLED_IRQ_*_READ usage suffices.
2156 : *
2157 : * For the usage bits of A:
2158 : * c) if A -> B is -(E*)->, then B -> A could be any type, so any
2159 : * USED_IN_IRQ usage suffices.
2160 : * d) if A -> B is -(S*)->, then B -> A must be -(*N)->, so only
2161 : * USED_IN_IRQ_*_READ usage suffices.
2162 : */
2163 :
2164 : /*
2165 : * There is a strong dependency path in the dependency graph: A -> B, and now
2166 : * we need to decide which usage bit of A should be accumulated to detect
2167 : * safe->unsafe bugs.
2168 : *
2169 : * Note that usage_accumulate() is used in backwards search, so ->only_xr
2170 : * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
2171 : *
2172 : * As above, if only_xr is false, which means A -> B has -(E*)-> dependency
2173 : * path, any usage of A should be considered. Otherwise, we should only
2174 : * consider _READ usage.
2175 : */
2176 170552 : static inline bool usage_accumulate(struct lock_list *entry, void *mask)
2177 : {
2178 170552 : if (!entry->only_xr)
2179 138377 : *(unsigned long *)mask |= entry->class->usage_mask;
2180 : else /* Mask out _READ usage bits */
2181 32175 : *(unsigned long *)mask |= (entry->class->usage_mask & LOCKF_IRQ);
2182 :
2183 170552 : return false;
2184 : }
2185 :
2186 : /*
2187 : * There is a strong dependency path in the dependency graph: A -> B, and now
2188 : * we need to decide which usage bit of B conflicts with the usage bits of A,
2189 : * i.e. which usage bit of B may introduce safe->unsafe deadlocks.
2190 : *
2191 : * As above, if only_xr is false, which means A -> B has -(*N)-> dependency
2192 : * path, any usage of B should be considered. Otherwise, we should only
2193 : * consider _READ usage.
2194 : */
2195 4167 : static inline bool usage_match(struct lock_list *entry, void *mask)
2196 : {
2197 4167 : if (!entry->only_xr)
2198 3650 : return !!(entry->class->usage_mask & *(unsigned long *)mask);
2199 : else /* Mask out _READ usage bits */
2200 517 : return !!((entry->class->usage_mask & LOCKF_IRQ) & *(unsigned long *)mask);
2201 : }
2202 :
2203 175138 : static inline bool usage_skip(struct lock_list *entry, void *mask)
2204 : {
2205 : /*
2206 : * Skip local_lock() for irq inversion detection.
2207 : *
2208 : * For !RT, local_lock() is not a real lock, so it won't carry any
2209 : * dependency.
2210 : *
2211 : * For RT, an irq inversion happens when we have lock A and B, and on
2212 : * some CPU we can have:
2213 : *
2214 : * lock(A);
2215 : * <interrupted>
2216 : * lock(B);
2217 : *
2218 : * where lock(B) cannot sleep, and we have a dependency B -> ... -> A.
2219 : *
2220 : * Now we prove local_lock() cannot exist in that dependency. First we
2221 : * have the observation for any lock chain L1 -> ... -> Ln, for any
2222 : * 1 <= i <= n, Li.inner_wait_type <= L1.inner_wait_type, otherwise
2223 : * wait context check will complain. And since B is not a sleep lock,
2224 : * therefore B.inner_wait_type >= 2, and since the inner_wait_type of
2225 : * local_lock() is 3, which is greater than 2, therefore there is no
2226 : * way the local_lock() exists in the dependency B -> ... -> A.
2227 : *
2228 : * As a result, we will skip local_lock(), when we search for irq
2229 : * inversion bugs.
2230 : */
2231 175138 : if (entry->class->lock_type == LD_LOCK_PERCPU) {
2232 419 : if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
2233 0 : return false;
2234 :
2235 : return true;
2236 : }
2237 :
2238 : return false;
2239 : }
2240 :
2241 : /*
2242 : * Find a node in the forwards-direction dependency sub-graph starting
2243 : * at @root->class that matches @bit.
2244 : *
2245 : * Return BFS_MATCH if such a node exists in the subgraph, and put that node
2246 : * into *@target_entry.
2247 : */
2248 : static enum bfs_result
2249 889 : find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
2250 : struct lock_list **target_entry)
2251 : {
2252 889 : enum bfs_result result;
2253 :
2254 889 : debug_atomic_inc(nr_find_usage_forwards_checks);
2255 :
2256 889 : result = __bfs_forwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2257 :
2258 889 : return result;
2259 : }
2260 :
2261 : /*
2262 : * Find a node in the backwards-direction dependency sub-graph starting
2263 : * at @root->class that matches @bit.
2264 : */
2265 : static enum bfs_result
2266 1078 : find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
2267 : struct lock_list **target_entry)
2268 : {
2269 1078 : enum bfs_result result;
2270 :
2271 1078 : debug_atomic_inc(nr_find_usage_backwards_checks);
2272 :
2273 1078 : result = __bfs_backwards(root, &usage_mask, usage_match, usage_skip, target_entry);
2274 :
2275 1078 : return result;
2276 : }
2277 :
2278 0 : static void print_lock_class_header(struct lock_class *class, int depth)
2279 : {
2280 0 : int bit;
2281 :
2282 0 : printk("%*s->", depth, "");
2283 0 : print_lock_name(class);
2284 : #ifdef CONFIG_DEBUG_LOCKDEP
2285 0 : printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
2286 : #endif
2287 0 : printk(KERN_CONT " {\n");
2288 :
2289 0 : for (bit = 0; bit < LOCK_TRACE_STATES; bit++) {
2290 0 : if (class->usage_mask & (1 << bit)) {
2291 0 : int len = depth;
2292 :
2293 0 : len += printk("%*s %s", depth, "", usage_str[bit]);
2294 0 : len += printk(KERN_CONT " at:\n");
2295 0 : print_lock_trace(class->usage_traces[bit], len);
2296 : }
2297 : }
2298 0 : printk("%*s }\n", depth, "");
2299 :
2300 0 : printk("%*s ... key at: [<%px>] %pS\n",
2301 : depth, "", class->key, class->key);
2302 0 : }
2303 :
2304 : /*
2305 : * printk the shortest lock dependencies from @start to @end in reverse order:
2306 : */
2307 : static void __used
2308 0 : print_shortest_lock_dependencies(struct lock_list *leaf,
2309 : struct lock_list *root)
2310 : {
2311 0 : struct lock_list *entry = leaf;
2312 0 : int depth;
2313 :
2314 : /*compute depth from generated tree by BFS*/
2315 0 : depth = get_lock_depth(leaf);
2316 :
2317 0 : do {
2318 0 : print_lock_class_header(entry->class, depth);
2319 0 : printk("%*s ... acquired at:\n", depth, "");
2320 0 : print_lock_trace(entry->trace, 2);
2321 0 : printk("\n");
2322 :
2323 0 : if (depth == 0 && (entry != root)) {
2324 0 : printk("lockdep:%s bad path found in chain graph\n", __func__);
2325 0 : break;
2326 : }
2327 :
2328 0 : entry = get_lock_parent(entry);
2329 0 : depth--;
2330 0 : } while (entry && (depth >= 0));
2331 0 : }
2332 :
2333 : static void
2334 0 : print_irq_lock_scenario(struct lock_list *safe_entry,
2335 : struct lock_list *unsafe_entry,
2336 : struct lock_class *prev_class,
2337 : struct lock_class *next_class)
2338 : {
2339 0 : struct lock_class *safe_class = safe_entry->class;
2340 0 : struct lock_class *unsafe_class = unsafe_entry->class;
2341 0 : struct lock_class *middle_class = prev_class;
2342 :
2343 0 : if (middle_class == safe_class)
2344 0 : middle_class = next_class;
2345 :
2346 : /*
2347 : * A direct locking problem where unsafe_class lock is taken
2348 : * directly by safe_class lock, then all we need to show
2349 : * is the deadlock scenario, as it is obvious that the
2350 : * unsafe lock is taken under the safe lock.
2351 : *
2352 : * But if there is a chain instead, where the safe lock takes
2353 : * an intermediate lock (middle_class) where this lock is
2354 : * not the same as the safe lock, then the lock chain is
2355 : * used to describe the problem. Otherwise we would need
2356 : * to show a different CPU case for each link in the chain
2357 : * from the safe_class lock to the unsafe_class lock.
2358 : */
2359 0 : if (middle_class != unsafe_class) {
2360 0 : printk("Chain exists of:\n ");
2361 0 : __print_lock_name(safe_class);
2362 0 : printk(KERN_CONT " --> ");
2363 0 : __print_lock_name(middle_class);
2364 0 : printk(KERN_CONT " --> ");
2365 0 : __print_lock_name(unsafe_class);
2366 0 : printk(KERN_CONT "\n\n");
2367 : }
2368 :
2369 0 : printk(" Possible interrupt unsafe locking scenario:\n\n");
2370 0 : printk(" CPU0 CPU1\n");
2371 0 : printk(" ---- ----\n");
2372 0 : printk(" lock(");
2373 0 : __print_lock_name(unsafe_class);
2374 0 : printk(KERN_CONT ");\n");
2375 0 : printk(" local_irq_disable();\n");
2376 0 : printk(" lock(");
2377 0 : __print_lock_name(safe_class);
2378 0 : printk(KERN_CONT ");\n");
2379 0 : printk(" lock(");
2380 0 : __print_lock_name(middle_class);
2381 0 : printk(KERN_CONT ");\n");
2382 0 : printk(" <Interrupt>\n");
2383 0 : printk(" lock(");
2384 0 : __print_lock_name(safe_class);
2385 0 : printk(KERN_CONT ");\n");
2386 0 : printk("\n *** DEADLOCK ***\n\n");
2387 0 : }
2388 :
2389 : static void
2390 0 : print_bad_irq_dependency(struct task_struct *curr,
2391 : struct lock_list *prev_root,
2392 : struct lock_list *next_root,
2393 : struct lock_list *backwards_entry,
2394 : struct lock_list *forwards_entry,
2395 : struct held_lock *prev,
2396 : struct held_lock *next,
2397 : enum lock_usage_bit bit1,
2398 : enum lock_usage_bit bit2,
2399 : const char *irqclass)
2400 : {
2401 0 : if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2402 : return;
2403 :
2404 0 : pr_warn("\n");
2405 0 : pr_warn("=====================================================\n");
2406 0 : pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2407 : irqclass, irqclass);
2408 0 : print_kernel_ident();
2409 0 : pr_warn("-----------------------------------------------------\n");
2410 0 : pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2411 : curr->comm, task_pid_nr(curr),
2412 : lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
2413 : curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2414 : lockdep_hardirqs_enabled(),
2415 : curr->softirqs_enabled);
2416 0 : print_lock(next);
2417 :
2418 0 : pr_warn("\nand this task is already holding:\n");
2419 0 : print_lock(prev);
2420 0 : pr_warn("which would create a new lock dependency:\n");
2421 0 : print_lock_name(hlock_class(prev));
2422 0 : pr_cont(" ->");
2423 0 : print_lock_name(hlock_class(next));
2424 0 : pr_cont("\n");
2425 :
2426 0 : pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2427 : irqclass);
2428 0 : print_lock_name(backwards_entry->class);
2429 0 : pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2430 :
2431 0 : print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2432 :
2433 0 : pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2434 0 : print_lock_name(forwards_entry->class);
2435 0 : pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2436 0 : pr_warn("...");
2437 :
2438 0 : print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2439 :
2440 0 : pr_warn("\nother info that might help us debug this:\n\n");
2441 0 : print_irq_lock_scenario(backwards_entry, forwards_entry,
2442 : hlock_class(prev), hlock_class(next));
2443 :
2444 0 : lockdep_print_held_locks(curr);
2445 :
2446 0 : pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2447 0 : prev_root->trace = save_trace();
2448 0 : if (!prev_root->trace)
2449 : return;
2450 0 : print_shortest_lock_dependencies(backwards_entry, prev_root);
2451 :
2452 0 : pr_warn("\nthe dependencies between the lock to be acquired");
2453 0 : pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2454 0 : next_root->trace = save_trace();
2455 0 : if (!next_root->trace)
2456 : return;
2457 0 : print_shortest_lock_dependencies(forwards_entry, next_root);
2458 :
2459 0 : pr_warn("\nstack backtrace:\n");
2460 0 : dump_stack();
2461 : }
2462 :
2463 : static const char *state_names[] = {
2464 : #define LOCKDEP_STATE(__STATE) \
2465 : __stringify(__STATE),
2466 : #include "lockdep_states.h"
2467 : #undef LOCKDEP_STATE
2468 : };
2469 :
2470 : static const char *state_rnames[] = {
2471 : #define LOCKDEP_STATE(__STATE) \
2472 : __stringify(__STATE)"-READ",
2473 : #include "lockdep_states.h"
2474 : #undef LOCKDEP_STATE
2475 : };
2476 :
2477 0 : static inline const char *state_name(enum lock_usage_bit bit)
2478 : {
2479 0 : if (bit & LOCK_USAGE_READ_MASK)
2480 0 : return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2481 : else
2482 0 : return state_names[bit >> LOCK_USAGE_DIR_MASK];
2483 : }
2484 :
2485 : /*
2486 : * The bit number is encoded like:
2487 : *
2488 : * bit0: 0 exclusive, 1 read lock
2489 : * bit1: 0 used in irq, 1 irq enabled
2490 : * bit2-n: state
2491 : */
2492 1216 : static int exclusive_bit(int new_bit)
2493 : {
2494 1216 : int state = new_bit & LOCK_USAGE_STATE_MASK;
2495 1216 : int dir = new_bit & LOCK_USAGE_DIR_MASK;
2496 :
2497 : /*
2498 : * keep state, bit flip the direction and strip read.
2499 : */
2500 1216 : return state | (dir ^ LOCK_USAGE_DIR_MASK);
2501 : }
2502 :
2503 : /*
2504 : * Observe that when given a bitmask where each bitnr is encoded as above, a
2505 : * right shift of the mask transforms the individual bitnrs as -1 and
2506 : * conversely, a left shift transforms into +1 for the individual bitnrs.
2507 : *
2508 : * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2509 : * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2510 : * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2511 : *
2512 : * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2513 : *
2514 : * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2515 : * all bits set) and recompose with bitnr1 flipped.
2516 : */
2517 751 : static unsigned long invert_dir_mask(unsigned long mask)
2518 : {
2519 751 : unsigned long excl = 0;
2520 :
2521 : /* Invert dir */
2522 751 : excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2523 751 : excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2524 :
2525 751 : return excl;
2526 : }
2527 :
2528 : /*
2529 : * Note that a LOCK_ENABLED_IRQ_*_READ usage and a LOCK_USED_IN_IRQ_*_READ
2530 : * usage may cause deadlock too, for example:
2531 : *
2532 : * P1 P2
2533 : * <irq disabled>
2534 : * write_lock(l1); <irq enabled>
2535 : * read_lock(l2);
2536 : * write_lock(l2);
2537 : * <in irq>
2538 : * read_lock(l1);
2539 : *
2540 : * , in above case, l1 will be marked as LOCK_USED_IN_IRQ_HARDIRQ_READ and l2
2541 : * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible
2542 : * deadlock.
2543 : *
2544 : * In fact, all of the following cases may cause deadlocks:
2545 : *
2546 : * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*
2547 : * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*
2548 : * LOCK_USED_IN_IRQ_* -> LOCK_ENABLED_IRQ_*_READ
2549 : * LOCK_USED_IN_IRQ_*_READ -> LOCK_ENABLED_IRQ_*_READ
2550 : *
2551 : * As a result, to calculate the "exclusive mask", first we invert the
2552 : * direction (USED_IN/ENABLED) of the original mask, and 1) for all bits with
2553 : * bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 2) for all
2554 : * bits with bitnr0 cleared (LOCK_*_READ), add those with bitnr0 set (LOCK_*).
2555 : */
2556 751 : static unsigned long exclusive_mask(unsigned long mask)
2557 : {
2558 751 : unsigned long excl = invert_dir_mask(mask);
2559 :
2560 751 : excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2561 751 : excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2562 :
2563 751 : return excl;
2564 : }
2565 :
2566 : /*
2567 : * Retrieve the _possible_ original mask to which @mask is
2568 : * exclusive. Ie: this is the opposite of exclusive_mask().
2569 : * Note that 2 possible original bits can match an exclusive
2570 : * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2571 : * cleared. So both are returned for each exclusive bit.
2572 : */
2573 0 : static unsigned long original_mask(unsigned long mask)
2574 : {
2575 0 : unsigned long excl = invert_dir_mask(mask);
2576 :
2577 : /* Include read in existing usages */
2578 0 : excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2579 0 : excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2580 :
2581 0 : return excl;
2582 : }
2583 :
2584 : /*
2585 : * Find the first pair of bit match between an original
2586 : * usage mask and an exclusive usage mask.
2587 : */
2588 0 : static int find_exclusive_match(unsigned long mask,
2589 : unsigned long excl_mask,
2590 : enum lock_usage_bit *bitp,
2591 : enum lock_usage_bit *excl_bitp)
2592 : {
2593 0 : int bit, excl, excl_read;
2594 :
2595 0 : for_each_set_bit(bit, &mask, LOCK_USED) {
2596 : /*
2597 : * exclusive_bit() strips the read bit, however,
2598 : * LOCK_ENABLED_IRQ_*_READ may cause deadlocks too, so we need
2599 : * to search excl | LOCK_USAGE_READ_MASK as well.
2600 : */
2601 0 : excl = exclusive_bit(bit);
2602 0 : excl_read = excl | LOCK_USAGE_READ_MASK;
2603 0 : if (excl_mask & lock_flag(excl)) {
2604 0 : *bitp = bit;
2605 0 : *excl_bitp = excl;
2606 0 : return 0;
2607 0 : } else if (excl_mask & lock_flag(excl_read)) {
2608 0 : *bitp = bit;
2609 0 : *excl_bitp = excl_read;
2610 0 : return 0;
2611 : }
2612 : }
2613 : return -1;
2614 : }
2615 :
2616 : /*
2617 : * Prove that the new dependency does not connect a hardirq-safe(-read)
2618 : * lock with a hardirq-unsafe lock - to achieve this we search
2619 : * the backwards-subgraph starting at <prev>, and the
2620 : * forwards-subgraph starting at <next>:
2621 : */
2622 7152 : static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2623 : struct held_lock *next)
2624 : {
2625 7152 : unsigned long usage_mask = 0, forward_mask, backward_mask;
2626 7152 : enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2627 7152 : struct lock_list *target_entry1;
2628 7152 : struct lock_list *target_entry;
2629 7152 : struct lock_list this, that;
2630 7152 : enum bfs_result ret;
2631 :
2632 : /*
2633 : * Step 1: gather all hard/soft IRQs usages backward in an
2634 : * accumulated usage mask.
2635 : */
2636 7152 : bfs_init_rootb(&this, prev);
2637 :
2638 7152 : ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
2639 7152 : if (bfs_error(ret)) {
2640 0 : print_bfs_bug(ret);
2641 0 : return 0;
2642 : }
2643 :
2644 7152 : usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2645 7152 : if (!usage_mask)
2646 : return 1;
2647 :
2648 : /*
2649 : * Step 2: find exclusive uses forward that match the previous
2650 : * backward accumulated mask.
2651 : */
2652 751 : forward_mask = exclusive_mask(usage_mask);
2653 :
2654 751 : bfs_init_root(&that, next);
2655 :
2656 751 : ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2657 751 : if (bfs_error(ret)) {
2658 0 : print_bfs_bug(ret);
2659 0 : return 0;
2660 : }
2661 751 : if (ret == BFS_RNOMATCH)
2662 : return 1;
2663 :
2664 : /*
2665 : * Step 3: we found a bad match! Now retrieve a lock from the backward
2666 : * list whose usage mask matches the exclusive usage mask from the
2667 : * lock found on the forward list.
2668 : */
2669 0 : backward_mask = original_mask(target_entry1->class->usage_mask);
2670 :
2671 0 : ret = find_usage_backwards(&this, backward_mask, &target_entry);
2672 0 : if (bfs_error(ret)) {
2673 0 : print_bfs_bug(ret);
2674 0 : return 0;
2675 : }
2676 0 : if (DEBUG_LOCKS_WARN_ON(ret == BFS_RNOMATCH))
2677 0 : return 1;
2678 :
2679 : /*
2680 : * Step 4: narrow down to a pair of incompatible usage bits
2681 : * and report it.
2682 : */
2683 0 : ret = find_exclusive_match(target_entry->class->usage_mask,
2684 0 : target_entry1->class->usage_mask,
2685 : &backward_bit, &forward_bit);
2686 0 : if (DEBUG_LOCKS_WARN_ON(ret == -1))
2687 0 : return 1;
2688 :
2689 0 : print_bad_irq_dependency(curr, &this, &that,
2690 : target_entry, target_entry1,
2691 : prev, next,
2692 : backward_bit, forward_bit,
2693 : state_name(backward_bit));
2694 :
2695 0 : return 0;
2696 : }
2697 :
2698 : #else
2699 :
2700 : static inline int check_irq_usage(struct task_struct *curr,
2701 : struct held_lock *prev, struct held_lock *next)
2702 : {
2703 : return 1;
2704 : }
2705 :
2706 : static inline bool usage_skip(struct lock_list *entry, void *mask)
2707 : {
2708 : return false;
2709 : }
2710 :
2711 : #endif /* CONFIG_TRACE_IRQFLAGS */
2712 :
2713 : #ifdef CONFIG_LOCKDEP_SMALL
2714 : /*
2715 : * Check that the dependency graph starting at <src> can lead to
2716 : * <target> or not. If it can, <src> -> <target> dependency is already
2717 : * in the graph.
2718 : *
2719 : * Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if
2720 : * any error appears in the bfs search.
2721 : */
2722 : static noinline enum bfs_result
2723 : check_redundant(struct held_lock *src, struct held_lock *target)
2724 : {
2725 : enum bfs_result ret;
2726 : struct lock_list *target_entry;
2727 : struct lock_list src_entry;
2728 :
2729 : bfs_init_root(&src_entry, src);
2730 : /*
2731 : * Special setup for check_redundant().
2732 : *
2733 : * To report redundant, we need to find a strong dependency path that
2734 : * is equal to or stronger than <src> -> <target>. So if <src> is E,
2735 : * we need to let __bfs() only search for a path starting at a -(E*)->,
2736 : * we achieve this by setting the initial node's ->only_xr to true in
2737 : * that case. And if <prev> is S, we set initial ->only_xr to false
2738 : * because both -(S*)-> (equal) and -(E*)-> (stronger) are redundant.
2739 : */
2740 : src_entry.only_xr = src->read == 0;
2741 :
2742 : debug_atomic_inc(nr_redundant_checks);
2743 :
2744 : /*
2745 : * Note: we skip local_lock() for redundant check, because as the
2746 : * comment in usage_skip(), A -> local_lock() -> B and A -> B are not
2747 : * the same.
2748 : */
2749 : ret = check_path(target, &src_entry, hlock_equal, usage_skip, &target_entry);
2750 :
2751 : if (ret == BFS_RMATCH)
2752 : debug_atomic_inc(nr_redundant);
2753 :
2754 : return ret;
2755 : }
2756 :
2757 : #else
2758 :
2759 : static inline enum bfs_result
2760 3188 : check_redundant(struct held_lock *src, struct held_lock *target)
2761 : {
2762 3188 : return BFS_RNOMATCH;
2763 : }
2764 :
2765 : #endif
2766 :
2767 7496 : static void inc_chains(int irq_context)
2768 : {
2769 7496 : if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2770 36 : nr_hardirq_chains++;
2771 7460 : else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2772 323 : nr_softirq_chains++;
2773 : else
2774 7137 : nr_process_chains++;
2775 7496 : }
2776 :
2777 3 : static void dec_chains(int irq_context)
2778 : {
2779 3 : if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2780 0 : nr_hardirq_chains--;
2781 3 : else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2782 0 : nr_softirq_chains--;
2783 : else
2784 3 : nr_process_chains--;
2785 3 : }
2786 :
2787 : static void
2788 0 : print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
2789 : {
2790 0 : struct lock_class *next = hlock_class(nxt);
2791 0 : struct lock_class *prev = hlock_class(prv);
2792 :
2793 0 : printk(" Possible unsafe locking scenario:\n\n");
2794 0 : printk(" CPU0\n");
2795 0 : printk(" ----\n");
2796 0 : printk(" lock(");
2797 0 : __print_lock_name(prev);
2798 0 : printk(KERN_CONT ");\n");
2799 0 : printk(" lock(");
2800 0 : __print_lock_name(next);
2801 0 : printk(KERN_CONT ");\n");
2802 0 : printk("\n *** DEADLOCK ***\n\n");
2803 0 : printk(" May be due to missing lock nesting notation\n\n");
2804 0 : }
2805 :
2806 : static void
2807 0 : print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2808 : struct held_lock *next)
2809 : {
2810 0 : if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2811 : return;
2812 :
2813 0 : pr_warn("\n");
2814 0 : pr_warn("============================================\n");
2815 0 : pr_warn("WARNING: possible recursive locking detected\n");
2816 0 : print_kernel_ident();
2817 0 : pr_warn("--------------------------------------------\n");
2818 0 : pr_warn("%s/%d is trying to acquire lock:\n",
2819 : curr->comm, task_pid_nr(curr));
2820 0 : print_lock(next);
2821 0 : pr_warn("\nbut task is already holding lock:\n");
2822 0 : print_lock(prev);
2823 :
2824 0 : pr_warn("\nother info that might help us debug this:\n");
2825 0 : print_deadlock_scenario(next, prev);
2826 0 : lockdep_print_held_locks(curr);
2827 :
2828 0 : pr_warn("\nstack backtrace:\n");
2829 0 : dump_stack();
2830 : }
2831 :
2832 : /*
2833 : * Check whether we are holding such a class already.
2834 : *
2835 : * (Note that this has to be done separately, because the graph cannot
2836 : * detect such classes of deadlocks.)
2837 : *
2838 : * Returns: 0 on deadlock detected, 1 on OK, 2 if another lock with the same
2839 : * lock class is held but nest_lock is also held, i.e. we rely on the
2840 : * nest_lock to avoid the deadlock.
2841 : */
2842 : static int
2843 7496 : check_deadlock(struct task_struct *curr, struct held_lock *next)
2844 : {
2845 7496 : struct held_lock *prev;
2846 7496 : struct held_lock *nest = NULL;
2847 7496 : int i;
2848 :
2849 25430 : for (i = 0; i < curr->lockdep_depth; i++) {
2850 17934 : prev = curr->held_locks + i;
2851 :
2852 17934 : if (prev->instance == next->nest_lock)
2853 0 : nest = prev;
2854 :
2855 17934 : if (hlock_class(prev) != hlock_class(next))
2856 17934 : continue;
2857 :
2858 : /*
2859 : * Allow read-after-read recursion of the same
2860 : * lock class (i.e. read_lock(lock)+read_lock(lock)):
2861 : */
2862 0 : if ((next->read == 2) && prev->read)
2863 0 : continue;
2864 :
2865 : /*
2866 : * We're holding the nest_lock, which serializes this lock's
2867 : * nesting behaviour.
2868 : */
2869 0 : if (nest)
2870 : return 2;
2871 :
2872 0 : print_deadlock_bug(curr, prev, next);
2873 0 : return 0;
2874 : }
2875 : return 1;
2876 : }
2877 :
2878 : /*
2879 : * There was a chain-cache miss, and we are about to add a new dependency
2880 : * to a previous lock. We validate the following rules:
2881 : *
2882 : * - would the adding of the <prev> -> <next> dependency create a
2883 : * circular dependency in the graph? [== circular deadlock]
2884 : *
2885 : * - does the new prev->next dependency connect any hardirq-safe lock
2886 : * (in the full backwards-subgraph starting at <prev>) with any
2887 : * hardirq-unsafe lock (in the full forwards-subgraph starting at
2888 : * <next>)? [== illegal lock inversion with hardirq contexts]
2889 : *
2890 : * - does the new prev->next dependency connect any softirq-safe lock
2891 : * (in the full backwards-subgraph starting at <prev>) with any
2892 : * softirq-unsafe lock (in the full forwards-subgraph starting at
2893 : * <next>)? [== illegal lock inversion with softirq contexts]
2894 : *
2895 : * any of these scenarios could lead to a deadlock.
2896 : *
2897 : * Then if all the validations pass, we add the forwards and backwards
2898 : * dependency.
2899 : */
2900 : static int
2901 7152 : check_prev_add(struct task_struct *curr, struct held_lock *prev,
2902 : struct held_lock *next, u16 distance,
2903 : struct lock_trace **const trace)
2904 : {
2905 7152 : struct lock_list *entry;
2906 7152 : enum bfs_result ret;
2907 :
2908 7152 : if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2909 : /*
2910 : * The warning statements below may trigger a use-after-free
2911 : * of the class name. It is better to trigger a use-after free
2912 : * and to have the class name most of the time instead of not
2913 : * having the class name available.
2914 : */
2915 0 : WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2916 : "Detected use-after-free of lock class %px/%s\n",
2917 : hlock_class(prev),
2918 : hlock_class(prev)->name);
2919 0 : WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2920 : "Detected use-after-free of lock class %px/%s\n",
2921 : hlock_class(next),
2922 : hlock_class(next)->name);
2923 0 : return 2;
2924 : }
2925 :
2926 : /*
2927 : * Prove that the new <prev> -> <next> dependency would not
2928 : * create a circular dependency in the graph. (We do this by
2929 : * a breadth-first search into the graph starting at <next>,
2930 : * and check whether we can reach <prev>.)
2931 : *
2932 : * The search is limited by the size of the circular queue (i.e.,
2933 : * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
2934 : * in the graph whose neighbours are to be checked.
2935 : */
2936 7152 : ret = check_noncircular(next, prev, trace);
2937 7152 : if (unlikely(bfs_error(ret) || ret == BFS_RMATCH))
2938 : return 0;
2939 :
2940 7152 : if (!check_irq_usage(curr, prev, next))
2941 : return 0;
2942 :
2943 : /*
2944 : * Is the <prev> -> <next> dependency already present?
2945 : *
2946 : * (this may occur even though this is a new chain: consider
2947 : * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2948 : * chains - the second one will be new, but L1 already has
2949 : * L2 added to its dependency list, due to the first chain.)
2950 : */
2951 52553 : list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
2952 49365 : if (entry->class == hlock_class(next)) {
2953 3964 : if (distance == 1)
2954 3409 : entry->distance = 1;
2955 3964 : entry->dep |= calc_dep(prev, next);
2956 :
2957 : /*
2958 : * Also, update the reverse dependency in @next's
2959 : * ->locks_before list.
2960 : *
2961 : * Here we reuse @entry as the cursor, which is fine
2962 : * because we won't go to the next iteration of the
2963 : * outer loop:
2964 : *
2965 : * For normal cases, we return in the inner loop.
2966 : *
2967 : * If we fail to return, we have inconsistency, i.e.
2968 : * <prev>::locks_after contains <next> while
2969 : * <next>::locks_before doesn't contain <prev>. In
2970 : * that case, we return after the inner and indicate
2971 : * something is wrong.
2972 : */
2973 120330 : list_for_each_entry(entry, &hlock_class(next)->locks_before, entry) {
2974 120330 : if (entry->class == hlock_class(prev)) {
2975 3964 : if (distance == 1)
2976 3409 : entry->distance = 1;
2977 3964 : entry->dep |= calc_depb(prev, next);
2978 3964 : return 1;
2979 : }
2980 : }
2981 :
2982 : /* <prev> is not found in <next>::locks_before */
2983 : return 0;
2984 : }
2985 : }
2986 :
2987 : /*
2988 : * Is the <prev> -> <next> link redundant?
2989 : */
2990 3188 : ret = check_redundant(prev, next);
2991 3188 : if (bfs_error(ret))
2992 : return 0;
2993 3188 : else if (ret == BFS_RMATCH)
2994 : return 2;
2995 :
2996 3188 : if (!*trace) {
2997 3149 : *trace = save_trace();
2998 3149 : if (!*trace)
2999 : return 0;
3000 : }
3001 :
3002 : /*
3003 : * Ok, all validations passed, add the new lock
3004 : * to the previous lock's dependency list:
3005 : */
3006 6376 : ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
3007 3188 : &hlock_class(prev)->locks_after,
3008 : next->acquire_ip, distance,
3009 3188 : calc_dep(prev, next),
3010 : *trace);
3011 :
3012 3188 : if (!ret)
3013 : return 0;
3014 :
3015 6376 : ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
3016 3188 : &hlock_class(next)->locks_before,
3017 : next->acquire_ip, distance,
3018 3188 : calc_depb(prev, next),
3019 : *trace);
3020 3188 : if (!ret)
3021 0 : return 0;
3022 :
3023 : return 2;
3024 : }
3025 :
3026 : /*
3027 : * Add the dependency to all directly-previous locks that are 'relevant'.
3028 : * The ones that are relevant are (in increasing distance from curr):
3029 : * all consecutive trylock entries and the final non-trylock entry - or
3030 : * the end of this context's lock-chain - whichever comes first.
3031 : */
3032 : static int
3033 7044 : check_prevs_add(struct task_struct *curr, struct held_lock *next)
3034 : {
3035 7044 : struct lock_trace *trace = NULL;
3036 7044 : int depth = curr->lockdep_depth;
3037 7044 : struct held_lock *hlock;
3038 :
3039 : /*
3040 : * Debugging checks.
3041 : *
3042 : * Depth must not be zero for a non-head lock:
3043 : */
3044 7044 : if (!depth)
3045 0 : goto out_bug;
3046 : /*
3047 : * At least two relevant locks must exist for this
3048 : * to be a head:
3049 : */
3050 7044 : if (curr->held_locks[depth].irq_context !=
3051 7044 : curr->held_locks[depth-1].irq_context)
3052 0 : goto out_bug;
3053 :
3054 8054 : for (;;) {
3055 8054 : u16 distance = curr->lockdep_depth - depth + 1;
3056 8054 : hlock = curr->held_locks + depth - 1;
3057 :
3058 8054 : if (hlock->check) {
3059 7152 : int ret = check_prev_add(curr, hlock, next, distance, &trace);
3060 7152 : if (!ret)
3061 : return 0;
3062 :
3063 : /*
3064 : * Stop after the first non-trylock entry,
3065 : * as non-trylock entries have added their
3066 : * own direct dependencies already, so this
3067 : * lock is connected to them indirectly:
3068 : */
3069 7152 : if (!hlock->trylock)
3070 : break;
3071 : }
3072 :
3073 1371 : depth--;
3074 : /*
3075 : * End of lock-stack?
3076 : */
3077 1371 : if (!depth)
3078 : break;
3079 : /*
3080 : * Stop the search if we cross into another context:
3081 : */
3082 1023 : if (curr->held_locks[depth].irq_context !=
3083 1023 : curr->held_locks[depth-1].irq_context)
3084 : break;
3085 : }
3086 : return 1;
3087 0 : out_bug:
3088 0 : if (!debug_locks_off_graph_unlock())
3089 : return 0;
3090 :
3091 : /*
3092 : * Clearly we all shouldn't be here, but since we made it we
3093 : * can reliable say we messed up our state. See the above two
3094 : * gotos for reasons why we could possibly end up here.
3095 : */
3096 0 : WARN_ON(1);
3097 :
3098 0 : return 0;
3099 : }
3100 :
3101 : struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
3102 : static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
3103 : static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
3104 : unsigned long nr_zapped_lock_chains;
3105 : unsigned int nr_free_chain_hlocks; /* Free chain_hlocks in buckets */
3106 : unsigned int nr_lost_chain_hlocks; /* Lost chain_hlocks */
3107 : unsigned int nr_large_chain_blocks; /* size > MAX_CHAIN_BUCKETS */
3108 :
3109 : /*
3110 : * The first 2 chain_hlocks entries in the chain block in the bucket
3111 : * list contains the following meta data:
3112 : *
3113 : * entry[0]:
3114 : * Bit 15 - always set to 1 (it is not a class index)
3115 : * Bits 0-14 - upper 15 bits of the next block index
3116 : * entry[1] - lower 16 bits of next block index
3117 : *
3118 : * A next block index of all 1 bits means it is the end of the list.
3119 : *
3120 : * On the unsized bucket (bucket-0), the 3rd and 4th entries contain
3121 : * the chain block size:
3122 : *
3123 : * entry[2] - upper 16 bits of the chain block size
3124 : * entry[3] - lower 16 bits of the chain block size
3125 : */
3126 : #define MAX_CHAIN_BUCKETS 16
3127 : #define CHAIN_BLK_FLAG (1U << 15)
3128 : #define CHAIN_BLK_LIST_END 0xFFFFU
3129 :
3130 : static int chain_block_buckets[MAX_CHAIN_BUCKETS];
3131 :
3132 14993 : static inline int size_to_bucket(int size)
3133 : {
3134 14993 : if (size > MAX_CHAIN_BUCKETS)
3135 : return 0;
3136 :
3137 3 : return size - 1;
3138 : }
3139 :
3140 : /*
3141 : * Iterate all the chain blocks in a bucket.
3142 : */
3143 : #define for_each_chain_block(bucket, prev, curr) \
3144 : for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3145 : (curr) >= 0; \
3146 : (prev) = (curr), (curr) = chain_block_next(curr))
3147 :
3148 : /*
3149 : * next block or -1
3150 : */
3151 7496 : static inline int chain_block_next(int offset)
3152 : {
3153 7496 : int next = chain_hlocks[offset];
3154 :
3155 7496 : WARN_ON_ONCE(!(next & CHAIN_BLK_FLAG));
3156 :
3157 7496 : if (next == CHAIN_BLK_LIST_END)
3158 : return -1;
3159 :
3160 1 : next &= ~CHAIN_BLK_FLAG;
3161 1 : next <<= 16;
3162 1 : next |= chain_hlocks[offset + 1];
3163 :
3164 1 : return next;
3165 : }
3166 :
3167 : /*
3168 : * bucket-0 only
3169 : */
3170 7493 : static inline int chain_block_size(int offset)
3171 : {
3172 7493 : return (chain_hlocks[offset + 2] << 16) | chain_hlocks[offset + 3];
3173 : }
3174 :
3175 7497 : static inline void init_chain_block(int offset, int next, int bucket, int size)
3176 : {
3177 7497 : chain_hlocks[offset] = (next >> 16) | CHAIN_BLK_FLAG;
3178 7497 : chain_hlocks[offset + 1] = (u16)next;
3179 :
3180 7497 : if (size && !bucket) {
3181 7494 : chain_hlocks[offset + 2] = size >> 16;
3182 7494 : chain_hlocks[offset + 3] = (u16)size;
3183 : }
3184 7497 : }
3185 :
3186 7497 : static inline void add_chain_block(int offset, int size)
3187 : {
3188 7497 : int bucket = size_to_bucket(size);
3189 7497 : int next = chain_block_buckets[bucket];
3190 7497 : int prev, curr;
3191 :
3192 7497 : if (unlikely(size < 2)) {
3193 : /*
3194 : * We can't store single entries on the freelist. Leak them.
3195 : *
3196 : * One possible way out would be to uniquely mark them, other
3197 : * than with CHAIN_BLK_FLAG, such that we can recover them when
3198 : * the block before it is re-added.
3199 : */
3200 0 : if (size)
3201 0 : nr_lost_chain_hlocks++;
3202 0 : return;
3203 : }
3204 :
3205 7497 : nr_free_chain_hlocks += size;
3206 7497 : if (!bucket) {
3207 7494 : nr_large_chain_blocks++;
3208 :
3209 : /*
3210 : * Variable sized, sort large to small.
3211 : */
3212 7494 : for_each_chain_block(0, prev, curr) {
3213 0 : if (size >= chain_block_size(curr))
3214 : break;
3215 : }
3216 7494 : init_chain_block(offset, curr, 0, size);
3217 7494 : if (prev < 0)
3218 7494 : chain_block_buckets[0] = offset;
3219 : else
3220 0 : init_chain_block(prev, offset, 0, 0);
3221 7494 : return;
3222 : }
3223 : /*
3224 : * Fixed size, add to head.
3225 : */
3226 3 : init_chain_block(offset, next, bucket, size);
3227 3 : chain_block_buckets[bucket] = offset;
3228 : }
3229 :
3230 : /*
3231 : * Only the first block in the list can be deleted.
3232 : *
3233 : * For the variable size bucket[0], the first block (the largest one) is
3234 : * returned, broken up and put back into the pool. So if a chain block of
3235 : * length > MAX_CHAIN_BUCKETS is ever used and zapped, it will just be
3236 : * queued up after the primordial chain block and never be used until the
3237 : * hlock entries in the primordial chain block is almost used up. That
3238 : * causes fragmentation and reduce allocation efficiency. That can be
3239 : * monitored by looking at the "large chain blocks" number in lockdep_stats.
3240 : */
3241 7496 : static inline void del_chain_block(int bucket, int size, int next)
3242 : {
3243 7496 : nr_free_chain_hlocks -= size;
3244 7496 : chain_block_buckets[bucket] = next;
3245 :
3246 7496 : if (!bucket)
3247 7493 : nr_large_chain_blocks--;
3248 : }
3249 :
3250 1 : static void init_chain_block_buckets(void)
3251 : {
3252 1 : int i;
3253 :
3254 17 : for (i = 0; i < MAX_CHAIN_BUCKETS; i++)
3255 16 : chain_block_buckets[i] = -1;
3256 :
3257 1 : add_chain_block(0, ARRAY_SIZE(chain_hlocks));
3258 1 : }
3259 :
3260 : /*
3261 : * Return offset of a chain block of the right size or -1 if not found.
3262 : *
3263 : * Fairly simple worst-fit allocator with the addition of a number of size
3264 : * specific free lists.
3265 : */
3266 7496 : static int alloc_chain_hlocks(int req)
3267 : {
3268 7496 : int bucket, curr, size;
3269 :
3270 : /*
3271 : * We rely on the MSB to act as an escape bit to denote freelist
3272 : * pointers. Make sure this bit isn't set in 'normal' class_idx usage.
3273 : */
3274 7496 : BUILD_BUG_ON((MAX_LOCKDEP_KEYS-1) & CHAIN_BLK_FLAG);
3275 :
3276 7496 : init_data_structures_once();
3277 :
3278 7496 : if (nr_free_chain_hlocks < req)
3279 : return -1;
3280 :
3281 : /*
3282 : * We require a minimum of 2 (u16) entries to encode a freelist
3283 : * 'pointer'.
3284 : */
3285 7496 : req = max(req, 2);
3286 7496 : bucket = size_to_bucket(req);
3287 7496 : curr = chain_block_buckets[bucket];
3288 :
3289 0 : if (bucket) {
3290 7496 : if (curr >= 0) {
3291 3 : del_chain_block(bucket, req, chain_block_next(curr));
3292 3 : return curr;
3293 : }
3294 : /* Try bucket 0 */
3295 7493 : curr = chain_block_buckets[0];
3296 : }
3297 :
3298 : /*
3299 : * The variable sized freelist is sorted by size; the first entry is
3300 : * the largest. Use it if it fits.
3301 : */
3302 7493 : if (curr >= 0) {
3303 7493 : size = chain_block_size(curr);
3304 7493 : if (likely(size >= req)) {
3305 7493 : del_chain_block(0, size, chain_block_next(curr));
3306 7493 : add_chain_block(curr + req, size - req);
3307 7493 : return curr;
3308 : }
3309 : }
3310 :
3311 : /*
3312 : * Last resort, split a block in a larger sized bucket.
3313 : */
3314 0 : for (size = MAX_CHAIN_BUCKETS; size > req; size--) {
3315 0 : bucket = size_to_bucket(size);
3316 0 : curr = chain_block_buckets[bucket];
3317 0 : if (curr < 0)
3318 0 : continue;
3319 :
3320 0 : del_chain_block(bucket, size, chain_block_next(curr));
3321 0 : add_chain_block(curr + req, size - req);
3322 0 : return curr;
3323 : }
3324 :
3325 : return -1;
3326 : }
3327 :
3328 3 : static inline void free_chain_hlocks(int base, int size)
3329 : {
3330 3 : add_chain_block(base, max(size, 2));
3331 : }
3332 :
3333 0 : struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
3334 : {
3335 0 : u16 chain_hlock = chain_hlocks[chain->base + i];
3336 0 : unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
3337 :
3338 0 : return lock_classes + class_idx - 1;
3339 : }
3340 :
3341 : /*
3342 : * Returns the index of the first held_lock of the current chain
3343 : */
3344 11545311 : static inline int get_first_held_lock(struct task_struct *curr,
3345 : struct held_lock *hlock)
3346 : {
3347 11545311 : int i;
3348 11545311 : struct held_lock *hlock_curr;
3349 :
3350 20862029 : for (i = curr->lockdep_depth - 1; i >= 0; i--) {
3351 9736488 : hlock_curr = curr->held_locks + i;
3352 9736488 : if (hlock_curr->irq_context != hlock->irq_context)
3353 : break;
3354 :
3355 : }
3356 :
3357 11545311 : return ++i;
3358 : }
3359 :
3360 : #ifdef CONFIG_DEBUG_LOCKDEP
3361 : /*
3362 : * Returns the next chain_key iteration
3363 : */
3364 0 : static u64 print_chain_key_iteration(u16 hlock_id, u64 chain_key)
3365 : {
3366 0 : u64 new_chain_key = iterate_chain_key(chain_key, hlock_id);
3367 :
3368 0 : printk(" hlock_id:%d -> chain_key:%016Lx",
3369 : (unsigned int)hlock_id,
3370 : (unsigned long long)new_chain_key);
3371 0 : return new_chain_key;
3372 : }
3373 :
3374 : static void
3375 0 : print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
3376 : {
3377 0 : struct held_lock *hlock;
3378 0 : u64 chain_key = INITIAL_CHAIN_KEY;
3379 0 : int depth = curr->lockdep_depth;
3380 0 : int i = get_first_held_lock(curr, hlock_next);
3381 :
3382 0 : printk("depth: %u (irq_context %u)\n", depth - i + 1,
3383 0 : hlock_next->irq_context);
3384 0 : for (; i < depth; i++) {
3385 0 : hlock = curr->held_locks + i;
3386 0 : chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
3387 :
3388 0 : print_lock(hlock);
3389 : }
3390 :
3391 0 : print_chain_key_iteration(hlock_id(hlock_next), chain_key);
3392 0 : print_lock(hlock_next);
3393 0 : }
3394 :
3395 0 : static void print_chain_keys_chain(struct lock_chain *chain)
3396 : {
3397 0 : int i;
3398 0 : u64 chain_key = INITIAL_CHAIN_KEY;
3399 0 : u16 hlock_id;
3400 :
3401 0 : printk("depth: %u\n", chain->depth);
3402 0 : for (i = 0; i < chain->depth; i++) {
3403 0 : hlock_id = chain_hlocks[chain->base + i];
3404 0 : chain_key = print_chain_key_iteration(hlock_id, chain_key);
3405 :
3406 0 : print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
3407 0 : printk("\n");
3408 : }
3409 0 : }
3410 :
3411 0 : static void print_collision(struct task_struct *curr,
3412 : struct held_lock *hlock_next,
3413 : struct lock_chain *chain)
3414 : {
3415 0 : pr_warn("\n");
3416 0 : pr_warn("============================\n");
3417 0 : pr_warn("WARNING: chain_key collision\n");
3418 0 : print_kernel_ident();
3419 0 : pr_warn("----------------------------\n");
3420 0 : pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
3421 0 : pr_warn("Hash chain already cached but the contents don't match!\n");
3422 :
3423 0 : pr_warn("Held locks:");
3424 0 : print_chain_keys_held_locks(curr, hlock_next);
3425 :
3426 0 : pr_warn("Locks in cached chain:");
3427 0 : print_chain_keys_chain(chain);
3428 :
3429 0 : pr_warn("\nstack backtrace:\n");
3430 0 : dump_stack();
3431 0 : }
3432 : #endif
3433 :
3434 : /*
3435 : * Checks whether the chain and the current held locks are consistent
3436 : * in depth and also in content. If they are not it most likely means
3437 : * that there was a collision during the calculation of the chain_key.
3438 : * Returns: 0 not passed, 1 passed
3439 : */
3440 11547717 : static int check_no_collision(struct task_struct *curr,
3441 : struct held_lock *hlock,
3442 : struct lock_chain *chain)
3443 : {
3444 : #ifdef CONFIG_DEBUG_LOCKDEP
3445 11547717 : int i, j, id;
3446 :
3447 11547717 : i = get_first_held_lock(curr, hlock);
3448 :
3449 11547717 : if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
3450 0 : print_collision(curr, hlock, chain);
3451 0 : return 0;
3452 : }
3453 :
3454 20847941 : for (j = 0; j < chain->depth - 1; j++, i++) {
3455 9300224 : id = hlock_id(&curr->held_locks[i]);
3456 :
3457 9300224 : if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
3458 0 : print_collision(curr, hlock, chain);
3459 0 : return 0;
3460 : }
3461 : }
3462 : #endif
3463 : return 1;
3464 : }
3465 :
3466 : /*
3467 : * Given an index that is >= -1, return the index of the next lock chain.
3468 : * Return -2 if there is no next lock chain.
3469 : */
3470 0 : long lockdep_next_lockchain(long i)
3471 : {
3472 0 : i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
3473 0 : return i < ARRAY_SIZE(lock_chains) ? i : -2;
3474 : }
3475 :
3476 0 : unsigned long lock_chain_count(void)
3477 : {
3478 0 : return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
3479 : }
3480 :
3481 : /* Must be called with the graph lock held. */
3482 7496 : static struct lock_chain *alloc_lock_chain(void)
3483 : {
3484 7496 : int idx = find_first_zero_bit(lock_chains_in_use,
3485 : ARRAY_SIZE(lock_chains));
3486 :
3487 7496 : if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
3488 : return NULL;
3489 7496 : __set_bit(idx, lock_chains_in_use);
3490 7496 : return lock_chains + idx;
3491 : }
3492 :
3493 : /*
3494 : * Adds a dependency chain into chain hashtable. And must be called with
3495 : * graph_lock held.
3496 : *
3497 : * Return 0 if fail, and graph_lock is released.
3498 : * Return 1 if succeed, with graph_lock held.
3499 : */
3500 7496 : static inline int add_chain_cache(struct task_struct *curr,
3501 : struct held_lock *hlock,
3502 : u64 chain_key)
3503 : {
3504 7496 : struct hlist_head *hash_head = chainhashentry(chain_key);
3505 7496 : struct lock_chain *chain;
3506 7496 : int i, j;
3507 :
3508 : /*
3509 : * The caller must hold the graph lock, ensure we've got IRQs
3510 : * disabled to make this an IRQ-safe lock.. for recursion reasons
3511 : * lockdep won't complain about its own locking errors.
3512 : */
3513 7496 : if (lockdep_assert_locked())
3514 : return 0;
3515 :
3516 7496 : chain = alloc_lock_chain();
3517 7496 : if (!chain) {
3518 0 : if (!debug_locks_off_graph_unlock())
3519 : return 0;
3520 :
3521 0 : print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
3522 0 : dump_stack();
3523 0 : return 0;
3524 : }
3525 7496 : chain->chain_key = chain_key;
3526 7496 : chain->irq_context = hlock->irq_context;
3527 7496 : i = get_first_held_lock(curr, hlock);
3528 7496 : chain->depth = curr->lockdep_depth + 1 - i;
3529 :
3530 7496 : BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
3531 7496 : BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
3532 7496 : BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
3533 :
3534 7496 : j = alloc_chain_hlocks(chain->depth);
3535 7496 : if (j < 0) {
3536 0 : if (!debug_locks_off_graph_unlock())
3537 : return 0;
3538 :
3539 0 : print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
3540 0 : dump_stack();
3541 0 : return 0;
3542 : }
3543 :
3544 7496 : chain->base = j;
3545 25280 : for (j = 0; j < chain->depth - 1; j++, i++) {
3546 17784 : int lock_id = hlock_id(curr->held_locks + i);
3547 :
3548 17784 : chain_hlocks[chain->base + j] = lock_id;
3549 : }
3550 7496 : chain_hlocks[chain->base + j] = hlock_id(hlock);
3551 7496 : hlist_add_head_rcu(&chain->entry, hash_head);
3552 7496 : debug_atomic_inc(chain_lookup_misses);
3553 7496 : inc_chains(chain->irq_context);
3554 :
3555 7496 : return 1;
3556 : }
3557 :
3558 : /*
3559 : * Look up a dependency chain. Must be called with either the graph lock or
3560 : * the RCU read lock held.
3561 : */
3562 11542307 : static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
3563 : {
3564 11542307 : struct hlist_head *hash_head = chainhashentry(chain_key);
3565 11542307 : struct lock_chain *chain;
3566 :
3567 24091778 : hlist_for_each_entry_rcu(chain, hash_head, entry) {
3568 12534479 : if (READ_ONCE(chain->chain_key) == chain_key) {
3569 11527315 : debug_atomic_inc(chain_lookup_hits);
3570 11534897 : return chain;
3571 : }
3572 : }
3573 : return NULL;
3574 : }
3575 :
3576 : /*
3577 : * If the key is not present yet in dependency chain cache then
3578 : * add it and return 1 - in this case the new dependency chain is
3579 : * validated. If the key is already hashed, return 0.
3580 : * (On return with 1 graph_lock is held.)
3581 : */
3582 11530999 : static inline int lookup_chain_cache_add(struct task_struct *curr,
3583 : struct held_lock *hlock,
3584 : u64 chain_key)
3585 : {
3586 11530999 : struct lock_class *class = hlock_class(hlock);
3587 11529153 : struct lock_chain *chain = lookup_chain_cache(chain_key);
3588 :
3589 11551194 : if (chain) {
3590 11543696 : cache_hit:
3591 11543698 : if (!check_no_collision(curr, hlock, chain))
3592 : return 0;
3593 :
3594 11560156 : if (very_verbose(class)) {
3595 : printk("\nhash chain already cached, key: "
3596 : "%016Lx tail class: [%px] %s\n",
3597 : (unsigned long long)chain_key,
3598 : class->key, class->name);
3599 : }
3600 :
3601 : return 0;
3602 : }
3603 :
3604 7498 : if (very_verbose(class)) {
3605 : printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3606 : (unsigned long long)chain_key, class->key, class->name);
3607 : }
3608 :
3609 7498 : if (!graph_lock())
3610 : return 0;
3611 :
3612 : /*
3613 : * We have to walk the chain again locked - to avoid duplicates:
3614 : */
3615 7498 : chain = lookup_chain_cache(chain_key);
3616 7498 : if (chain) {
3617 2 : graph_unlock();
3618 2 : goto cache_hit;
3619 : }
3620 :
3621 7496 : if (!add_chain_cache(curr, hlock, chain_key))
3622 0 : return 0;
3623 :
3624 : return 1;
3625 : }
3626 :
3627 13458264 : static int validate_chain(struct task_struct *curr,
3628 : struct held_lock *hlock,
3629 : int chain_head, u64 chain_key)
3630 : {
3631 : /*
3632 : * Trylock needs to maintain the stack of held locks, but it
3633 : * does not add new dependencies, because trylock can be done
3634 : * in any order.
3635 : *
3636 : * We look up the chain_key and do the O(N^2) check and update of
3637 : * the dependencies only if this is a new dependency chain.
3638 : * (If lookup_chain_cache_add() return with 1 it acquires
3639 : * graph_lock for us)
3640 : */
3641 25030274 : if (!hlock->trylock && hlock->check &&
3642 11527865 : lookup_chain_cache_add(curr, hlock, chain_key)) {
3643 : /*
3644 : * Check whether last held lock:
3645 : *
3646 : * - is irq-safe, if this lock is irq-unsafe
3647 : * - is softirq-safe, if this lock is hardirq-unsafe
3648 : *
3649 : * And check whether the new lock's dependency graph
3650 : * could lead back to the previous lock:
3651 : *
3652 : * - within the current held-lock stack
3653 : * - across our accumulated lock dependency records
3654 : *
3655 : * any of these scenarios could lead to a deadlock.
3656 : */
3657 : /*
3658 : * The simple case: does the current hold the same lock
3659 : * already?
3660 : */
3661 7496 : int ret = check_deadlock(curr, hlock);
3662 :
3663 7496 : if (!ret)
3664 : return 0;
3665 : /*
3666 : * Add dependency only if this lock is not the head
3667 : * of the chain, and if the new lock introduces no more
3668 : * lock dependency (because we already hold a lock with the
3669 : * same lock class) nor deadlock (because the nest_lock
3670 : * serializes nesting locks), see the comments for
3671 : * check_deadlock().
3672 : */
3673 7496 : if (!chain_head && ret != 2) {
3674 7044 : if (!check_prevs_add(curr, hlock))
3675 : return 0;
3676 : }
3677 :
3678 7496 : graph_unlock();
3679 : } else {
3680 : /* after lookup_chain_cache_add(): */
3681 13494913 : if (unlikely(!debug_locks))
3682 0 : return 0;
3683 : }
3684 :
3685 : return 1;
3686 : }
3687 : #else
3688 : static inline int validate_chain(struct task_struct *curr,
3689 : struct held_lock *hlock,
3690 : int chain_head, u64 chain_key)
3691 : {
3692 : return 1;
3693 : }
3694 :
3695 : static void init_chain_block_buckets(void) { }
3696 : #endif /* CONFIG_PROVE_LOCKING */
3697 :
3698 : /*
3699 : * We are building curr_chain_key incrementally, so double-check
3700 : * it from scratch, to make sure that it's done correctly:
3701 : */
3702 26563605 : static void check_chain_key(struct task_struct *curr)
3703 : {
3704 : #ifdef CONFIG_DEBUG_LOCKDEP
3705 26563605 : struct held_lock *hlock, *prev_hlock = NULL;
3706 26563605 : unsigned int i;
3707 26563605 : u64 chain_key = INITIAL_CHAIN_KEY;
3708 :
3709 62525640 : for (i = 0; i < curr->lockdep_depth; i++) {
3710 35968015 : hlock = curr->held_locks + i;
3711 35968015 : if (chain_key != hlock->prev_chain_key) {
3712 0 : debug_locks_off();
3713 : /*
3714 : * We got mighty confused, our chain keys don't match
3715 : * with what we expect, someone trample on our task state?
3716 : */
3717 0 : WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3718 : curr->lockdep_depth, i,
3719 : (unsigned long long)chain_key,
3720 : (unsigned long long)hlock->prev_chain_key);
3721 0 : return;
3722 : }
3723 :
3724 : /*
3725 : * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3726 : * it registered lock class index?
3727 : */
3728 35968015 : if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3729 0 : return;
3730 :
3731 35962035 : if (prev_hlock && (prev_hlock->irq_context !=
3732 : hlock->irq_context))
3733 731778 : chain_key = INITIAL_CHAIN_KEY;
3734 35962035 : chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
3735 35962035 : prev_hlock = hlock;
3736 : }
3737 26557625 : if (chain_key != curr->curr_chain_key) {
3738 0 : debug_locks_off();
3739 : /*
3740 : * More smoking hash instead of calculating it, damn see these
3741 : * numbers float.. I bet that a pink elephant stepped on my memory.
3742 : */
3743 0 : WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3744 : curr->lockdep_depth, i,
3745 : (unsigned long long)chain_key,
3746 : (unsigned long long)curr->curr_chain_key);
3747 : }
3748 : #endif
3749 : }
3750 :
3751 : #ifdef CONFIG_PROVE_LOCKING
3752 : static int mark_lock(struct task_struct *curr, struct held_lock *this,
3753 : enum lock_usage_bit new_bit);
3754 :
3755 0 : static void print_usage_bug_scenario(struct held_lock *lock)
3756 : {
3757 0 : struct lock_class *class = hlock_class(lock);
3758 :
3759 0 : printk(" Possible unsafe locking scenario:\n\n");
3760 0 : printk(" CPU0\n");
3761 0 : printk(" ----\n");
3762 0 : printk(" lock(");
3763 0 : __print_lock_name(class);
3764 0 : printk(KERN_CONT ");\n");
3765 0 : printk(" <Interrupt>\n");
3766 0 : printk(" lock(");
3767 0 : __print_lock_name(class);
3768 0 : printk(KERN_CONT ");\n");
3769 0 : printk("\n *** DEADLOCK ***\n\n");
3770 0 : }
3771 :
3772 : static void
3773 0 : print_usage_bug(struct task_struct *curr, struct held_lock *this,
3774 : enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3775 : {
3776 0 : if (!debug_locks_off() || debug_locks_silent)
3777 : return;
3778 :
3779 0 : pr_warn("\n");
3780 0 : pr_warn("================================\n");
3781 0 : pr_warn("WARNING: inconsistent lock state\n");
3782 0 : print_kernel_ident();
3783 0 : pr_warn("--------------------------------\n");
3784 :
3785 0 : pr_warn("inconsistent {%s} -> {%s} usage.\n",
3786 : usage_str[prev_bit], usage_str[new_bit]);
3787 :
3788 0 : pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3789 : curr->comm, task_pid_nr(curr),
3790 : lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3791 : lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3792 : lockdep_hardirqs_enabled(),
3793 : lockdep_softirqs_enabled(curr));
3794 0 : print_lock(this);
3795 :
3796 0 : pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3797 0 : print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3798 :
3799 0 : print_irqtrace_events(curr);
3800 0 : pr_warn("\nother info that might help us debug this:\n");
3801 0 : print_usage_bug_scenario(this);
3802 :
3803 0 : lockdep_print_held_locks(curr);
3804 :
3805 0 : pr_warn("\nstack backtrace:\n");
3806 0 : dump_stack();
3807 : }
3808 :
3809 : /*
3810 : * Print out an error if an invalid bit is set:
3811 : */
3812 : static inline int
3813 2260 : valid_state(struct task_struct *curr, struct held_lock *this,
3814 : enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3815 : {
3816 2260 : if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3817 0 : graph_unlock();
3818 0 : print_usage_bug(curr, this, bad_bit, new_bit);
3819 0 : return 0;
3820 : }
3821 : return 1;
3822 : }
3823 :
3824 :
3825 : /*
3826 : * print irq inversion bug:
3827 : */
3828 : static void
3829 0 : print_irq_inversion_bug(struct task_struct *curr,
3830 : struct lock_list *root, struct lock_list *other,
3831 : struct held_lock *this, int forwards,
3832 : const char *irqclass)
3833 : {
3834 0 : struct lock_list *entry = other;
3835 0 : struct lock_list *middle = NULL;
3836 0 : int depth;
3837 :
3838 0 : if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3839 : return;
3840 :
3841 0 : pr_warn("\n");
3842 0 : pr_warn("========================================================\n");
3843 0 : pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3844 0 : print_kernel_ident();
3845 0 : pr_warn("--------------------------------------------------------\n");
3846 0 : pr_warn("%s/%d just changed the state of lock:\n",
3847 : curr->comm, task_pid_nr(curr));
3848 0 : print_lock(this);
3849 0 : if (forwards)
3850 0 : pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
3851 : else
3852 0 : pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
3853 0 : print_lock_name(other->class);
3854 0 : pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3855 :
3856 0 : pr_warn("\nother info that might help us debug this:\n");
3857 :
3858 : /* Find a middle lock (if one exists) */
3859 0 : depth = get_lock_depth(other);
3860 0 : do {
3861 0 : if (depth == 0 && (entry != root)) {
3862 0 : pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
3863 0 : break;
3864 : }
3865 0 : middle = entry;
3866 0 : entry = get_lock_parent(entry);
3867 0 : depth--;
3868 0 : } while (entry && entry != root && (depth >= 0));
3869 0 : if (forwards)
3870 0 : print_irq_lock_scenario(root, other,
3871 : middle ? middle->class : root->class, other->class);
3872 : else
3873 0 : print_irq_lock_scenario(other, root,
3874 : middle ? middle->class : other->class, root->class);
3875 :
3876 0 : lockdep_print_held_locks(curr);
3877 :
3878 0 : pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3879 0 : root->trace = save_trace();
3880 0 : if (!root->trace)
3881 : return;
3882 0 : print_shortest_lock_dependencies(other, root);
3883 :
3884 0 : pr_warn("\nstack backtrace:\n");
3885 0 : dump_stack();
3886 : }
3887 :
3888 : /*
3889 : * Prove that in the forwards-direction subgraph starting at <this>
3890 : * there is no lock matching <mask>:
3891 : */
3892 : static int
3893 138 : check_usage_forwards(struct task_struct *curr, struct held_lock *this,
3894 : enum lock_usage_bit bit)
3895 : {
3896 138 : enum bfs_result ret;
3897 138 : struct lock_list root;
3898 138 : struct lock_list *target_entry;
3899 138 : enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
3900 138 : unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
3901 :
3902 138 : bfs_init_root(&root, this);
3903 138 : ret = find_usage_forwards(&root, usage_mask, &target_entry);
3904 138 : if (bfs_error(ret)) {
3905 0 : print_bfs_bug(ret);
3906 0 : return 0;
3907 : }
3908 138 : if (ret == BFS_RNOMATCH)
3909 : return 1;
3910 :
3911 : /* Check whether write or read usage is the match */
3912 0 : if (target_entry->class->usage_mask & lock_flag(bit)) {
3913 0 : print_irq_inversion_bug(curr, &root, target_entry,
3914 : this, 1, state_name(bit));
3915 : } else {
3916 0 : print_irq_inversion_bug(curr, &root, target_entry,
3917 : this, 1, state_name(read_bit));
3918 : }
3919 :
3920 : return 0;
3921 : }
3922 :
3923 : /*
3924 : * Prove that in the backwards-direction subgraph starting at <this>
3925 : * there is no lock matching <mask>:
3926 : */
3927 : static int
3928 1078 : check_usage_backwards(struct task_struct *curr, struct held_lock *this,
3929 : enum lock_usage_bit bit)
3930 : {
3931 1078 : enum bfs_result ret;
3932 1078 : struct lock_list root;
3933 1078 : struct lock_list *target_entry;
3934 1078 : enum lock_usage_bit read_bit = bit + LOCK_USAGE_READ_MASK;
3935 1078 : unsigned usage_mask = lock_flag(bit) | lock_flag(read_bit);
3936 :
3937 1078 : bfs_init_rootb(&root, this);
3938 1078 : ret = find_usage_backwards(&root, usage_mask, &target_entry);
3939 1078 : if (bfs_error(ret)) {
3940 0 : print_bfs_bug(ret);
3941 0 : return 0;
3942 : }
3943 1078 : if (ret == BFS_RNOMATCH)
3944 : return 1;
3945 :
3946 : /* Check whether write or read usage is the match */
3947 0 : if (target_entry->class->usage_mask & lock_flag(bit)) {
3948 0 : print_irq_inversion_bug(curr, &root, target_entry,
3949 : this, 0, state_name(bit));
3950 : } else {
3951 0 : print_irq_inversion_bug(curr, &root, target_entry,
3952 : this, 0, state_name(read_bit));
3953 : }
3954 :
3955 : return 0;
3956 : }
3957 :
3958 1 : void print_irqtrace_events(struct task_struct *curr)
3959 : {
3960 1 : const struct irqtrace_events *trace = &curr->irqtrace;
3961 :
3962 1 : printk("irq event stamp: %u\n", trace->irq_events);
3963 1 : printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
3964 : trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
3965 1 : (void *)trace->hardirq_enable_ip);
3966 1 : printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3967 : trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
3968 1 : (void *)trace->hardirq_disable_ip);
3969 1 : printk("softirqs last enabled at (%u): [<%px>] %pS\n",
3970 : trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
3971 1 : (void *)trace->softirq_enable_ip);
3972 1 : printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3973 : trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
3974 1 : (void *)trace->softirq_disable_ip);
3975 1 : }
3976 :
3977 598 : static int HARDIRQ_verbose(struct lock_class *class)
3978 : {
3979 : #if HARDIRQ_VERBOSE
3980 : return class_filter(class);
3981 : #endif
3982 598 : return 0;
3983 : }
3984 :
3985 618 : static int SOFTIRQ_verbose(struct lock_class *class)
3986 : {
3987 : #if SOFTIRQ_VERBOSE
3988 : return class_filter(class);
3989 : #endif
3990 618 : return 0;
3991 : }
3992 :
3993 : static int (*state_verbose_f[])(struct lock_class *class) = {
3994 : #define LOCKDEP_STATE(__STATE) \
3995 : __STATE##_verbose,
3996 : #include "lockdep_states.h"
3997 : #undef LOCKDEP_STATE
3998 : };
3999 :
4000 1216 : static inline int state_verbose(enum lock_usage_bit bit,
4001 : struct lock_class *class)
4002 : {
4003 1216 : return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
4004 : }
4005 :
4006 : typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
4007 : enum lock_usage_bit bit, const char *name);
4008 :
4009 : static int
4010 1216 : mark_lock_irq(struct task_struct *curr, struct held_lock *this,
4011 : enum lock_usage_bit new_bit)
4012 : {
4013 1216 : int excl_bit = exclusive_bit(new_bit);
4014 1216 : int read = new_bit & LOCK_USAGE_READ_MASK;
4015 1216 : int dir = new_bit & LOCK_USAGE_DIR_MASK;
4016 :
4017 : /*
4018 : * Validate that this particular lock does not have conflicting
4019 : * usage states.
4020 : */
4021 1216 : if (!valid_state(curr, this, new_bit, excl_bit))
4022 : return 0;
4023 :
4024 : /*
4025 : * Check for read in write conflicts
4026 : */
4027 2260 : if (!read && !valid_state(curr, this, new_bit,
4028 1044 : excl_bit + LOCK_USAGE_READ_MASK))
4029 : return 0;
4030 :
4031 :
4032 : /*
4033 : * Validate that the lock dependencies don't have conflicting usage
4034 : * states.
4035 : */
4036 1216 : if (dir) {
4037 : /*
4038 : * mark ENABLED has to look backwards -- to ensure no dependee
4039 : * has USED_IN state, which, again, would allow recursion deadlocks.
4040 : */
4041 1078 : if (!check_usage_backwards(curr, this, excl_bit))
4042 : return 0;
4043 : } else {
4044 : /*
4045 : * mark USED_IN has to look forwards -- to ensure no dependency
4046 : * has ENABLED state, which would allow recursion deadlocks.
4047 : */
4048 138 : if (!check_usage_forwards(curr, this, excl_bit))
4049 : return 0;
4050 : }
4051 :
4052 1216 : if (state_verbose(new_bit, hlock_class(this)))
4053 0 : return 2;
4054 :
4055 : return 1;
4056 : }
4057 :
4058 : /*
4059 : * Mark all held locks with a usage bit:
4060 : */
4061 : static int
4062 18975916 : mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
4063 : {
4064 18975916 : struct held_lock *hlock;
4065 18975916 : int i;
4066 :
4067 34265837 : for (i = 0; i < curr->lockdep_depth; i++) {
4068 15275688 : enum lock_usage_bit hlock_bit = base_bit;
4069 15275688 : hlock = curr->held_locks + i;
4070 :
4071 15275688 : if (hlock->read)
4072 5646264 : hlock_bit += LOCK_USAGE_READ_MASK;
4073 :
4074 15275688 : BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
4075 :
4076 15275688 : if (!hlock->check)
4077 3688778 : continue;
4078 :
4079 11586910 : if (!mark_lock(curr, hlock, hlock_bit))
4080 : return 0;
4081 : }
4082 :
4083 : return 1;
4084 : }
4085 :
4086 : /*
4087 : * Hardirqs will be enabled:
4088 : */
4089 11628677 : static void __trace_hardirqs_on_caller(void)
4090 : {
4091 11628677 : struct task_struct *curr = current;
4092 :
4093 : /*
4094 : * We are going to turn hardirqs on, so set the
4095 : * usage bit for all held locks:
4096 : */
4097 11628677 : if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
4098 : return;
4099 : /*
4100 : * If we have softirqs enabled, then set the usage
4101 : * bit for all held locks. (disabled hardirqs prevented
4102 : * this bit from being set before)
4103 : */
4104 11606755 : if (curr->softirqs_enabled)
4105 7464618 : mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
4106 : }
4107 :
4108 : /**
4109 : * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
4110 : * @ip: Caller address
4111 : *
4112 : * Invoked before a possible transition to RCU idle from exit to user or
4113 : * guest mode. This ensures that all RCU operations are done before RCU
4114 : * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
4115 : * invoked to set the final state.
4116 : */
4117 11611570 : void lockdep_hardirqs_on_prepare(unsigned long ip)
4118 : {
4119 11611570 : if (unlikely(!debug_locks))
4120 : return;
4121 :
4122 : /*
4123 : * NMIs do not (and cannot) track lock dependencies, nothing to do.
4124 : */
4125 11611570 : if (unlikely(in_nmi()))
4126 : return;
4127 :
4128 11611569 : if (unlikely(this_cpu_read(lockdep_recursion)))
4129 : return;
4130 :
4131 11612849 : if (unlikely(lockdep_hardirqs_enabled())) {
4132 : /*
4133 : * Neither irq nor preemption are disabled here
4134 : * so this is racy by nature but losing one hit
4135 : * in a stat is not a big deal.
4136 : */
4137 4 : __debug_atomic_inc(redundant_hardirqs_on);
4138 4 : return;
4139 : }
4140 :
4141 : /*
4142 : * We're enabling irqs and according to our state above irqs weren't
4143 : * already enabled, yet we find the hardware thinks they are in fact
4144 : * enabled.. someone messed up their IRQ state tracing.
4145 : */
4146 11636191 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4147 0 : return;
4148 :
4149 : /*
4150 : * See the fine text that goes along with this variable definition.
4151 : */
4152 11633675 : if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
4153 0 : return;
4154 :
4155 : /*
4156 : * Can't allow enabling interrupts while in an interrupt handler,
4157 : * that's general bad form and such. Recursion, limited stack etc..
4158 : */
4159 11633675 : if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
4160 0 : return;
4161 :
4162 11633675 : current->hardirq_chain_key = current->curr_chain_key;
4163 :
4164 11633675 : lockdep_recursion_inc();
4165 11633675 : __trace_hardirqs_on_caller();
4166 11615666 : lockdep_recursion_finish();
4167 : }
4168 : EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
4169 :
4170 11631351 : void noinstr lockdep_hardirqs_on(unsigned long ip)
4171 : {
4172 11631351 : struct irqtrace_events *trace = ¤t->irqtrace;
4173 :
4174 11631351 : if (unlikely(!debug_locks))
4175 : return;
4176 :
4177 : /*
4178 : * NMIs can happen in the middle of local_irq_{en,dis}able() where the
4179 : * tracking state and hardware state are out of sync.
4180 : *
4181 : * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
4182 : * and not rely on hardware state like normal interrupts.
4183 : */
4184 11631351 : if (unlikely(in_nmi())) {
4185 1 : if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4186 : return;
4187 :
4188 : /*
4189 : * Skip:
4190 : * - recursion check, because NMI can hit lockdep;
4191 : * - hardware state check, because above;
4192 : * - chain_key check, see lockdep_hardirqs_on_prepare().
4193 : */
4194 1 : goto skip_checks;
4195 : }
4196 :
4197 11631350 : if (unlikely(this_cpu_read(lockdep_recursion)))
4198 : return;
4199 :
4200 11633476 : if (lockdep_hardirqs_enabled()) {
4201 : /*
4202 : * Neither irq nor preemption are disabled here
4203 : * so this is racy by nature but losing one hit
4204 : * in a stat is not a big deal.
4205 : */
4206 4 : __debug_atomic_inc(redundant_hardirqs_on);
4207 4 : return;
4208 : }
4209 :
4210 : /*
4211 : * We're enabling irqs and according to our state above irqs weren't
4212 : * already enabled, yet we find the hardware thinks they are in fact
4213 : * enabled.. someone messed up their IRQ state tracing.
4214 : */
4215 11636423 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4216 0 : return;
4217 :
4218 : /*
4219 : * Ensure the lock stack remained unchanged between
4220 : * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
4221 : */
4222 11637099 : DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
4223 : current->curr_chain_key);
4224 :
4225 11637100 : skip_checks:
4226 : /* we'll do an OFF -> ON transition: */
4227 11637100 : __this_cpu_write(hardirqs_enabled, 1);
4228 11637100 : trace->hardirq_enable_ip = ip;
4229 11637100 : trace->hardirq_enable_event = ++trace->irq_events;
4230 11637100 : debug_atomic_inc(hardirqs_on_events);
4231 : }
4232 : EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
4233 :
4234 : /*
4235 : * Hardirqs were disabled:
4236 : */
4237 11595740 : void noinstr lockdep_hardirqs_off(unsigned long ip)
4238 : {
4239 11595740 : if (unlikely(!debug_locks))
4240 : return;
4241 :
4242 : /*
4243 : * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
4244 : * they will restore the software state. This ensures the software
4245 : * state is consistent inside NMIs as well.
4246 : */
4247 11595740 : if (in_nmi()) {
4248 : if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
4249 : return;
4250 11591375 : } else if (__this_cpu_read(lockdep_recursion))
4251 : return;
4252 :
4253 : /*
4254 : * So we're supposed to get called after you mask local IRQs, but for
4255 : * some reason the hardware doesn't quite think you did a proper job.
4256 : */
4257 11595740 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4258 0 : return;
4259 :
4260 11596310 : if (lockdep_hardirqs_enabled()) {
4261 11597074 : struct irqtrace_events *trace = ¤t->irqtrace;
4262 :
4263 : /*
4264 : * We have done an ON -> OFF transition:
4265 : */
4266 11597074 : __this_cpu_write(hardirqs_enabled, 0);
4267 11597074 : trace->hardirq_disable_ip = ip;
4268 11597074 : trace->hardirq_disable_event = ++trace->irq_events;
4269 11597074 : debug_atomic_inc(hardirqs_off_events);
4270 : } else {
4271 0 : debug_atomic_inc(redundant_hardirqs_off);
4272 : }
4273 : }
4274 : EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
4275 :
4276 : /*
4277 : * Softirqs will be enabled:
4278 : */
4279 32925 : void lockdep_softirqs_on(unsigned long ip)
4280 : {
4281 32925 : struct irqtrace_events *trace = ¤t->irqtrace;
4282 :
4283 65871 : if (unlikely(!lockdep_enabled()))
4284 : return;
4285 :
4286 : /*
4287 : * We fancy IRQs being disabled here, see softirq.c, avoids
4288 : * funny state and nesting things.
4289 : */
4290 32937 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4291 0 : return;
4292 :
4293 32957 : if (current->softirqs_enabled) {
4294 0 : debug_atomic_inc(redundant_softirqs_on);
4295 0 : return;
4296 : }
4297 :
4298 32957 : lockdep_recursion_inc();
4299 : /*
4300 : * We'll do an OFF -> ON transition:
4301 : */
4302 32957 : current->softirqs_enabled = 1;
4303 32957 : trace->softirq_enable_ip = ip;
4304 32957 : trace->softirq_enable_event = ++trace->irq_events;
4305 32957 : debug_atomic_inc(softirqs_on_events);
4306 : /*
4307 : * We are going to turn softirqs on, so set the
4308 : * usage bit for all held locks, if hardirqs are
4309 : * enabled too:
4310 : */
4311 32936 : if (lockdep_hardirqs_enabled())
4312 0 : mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
4313 32936 : lockdep_recursion_finish();
4314 : }
4315 :
4316 : /*
4317 : * Softirqs were disabled:
4318 : */
4319 32486 : void lockdep_softirqs_off(unsigned long ip)
4320 : {
4321 65076 : if (unlikely(!lockdep_enabled()))
4322 : return;
4323 :
4324 : /*
4325 : * We fancy IRQs being disabled here, see softirq.c
4326 : */
4327 32646 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4328 0 : return;
4329 :
4330 32733 : if (current->softirqs_enabled) {
4331 32733 : struct irqtrace_events *trace = ¤t->irqtrace;
4332 :
4333 : /*
4334 : * We have done an ON -> OFF transition:
4335 : */
4336 32733 : current->softirqs_enabled = 0;
4337 32733 : trace->softirq_disable_ip = ip;
4338 32733 : trace->softirq_disable_event = ++trace->irq_events;
4339 32733 : debug_atomic_inc(softirqs_off_events);
4340 : /*
4341 : * Whoops, we wanted softirqs off, so why aren't they?
4342 : */
4343 32764 : DEBUG_LOCKS_WARN_ON(!softirq_count());
4344 : } else
4345 0 : debug_atomic_inc(redundant_softirqs_off);
4346 : }
4347 :
4348 : static int
4349 13578105 : mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4350 : {
4351 13578105 : if (!check)
4352 1783573 : goto lock_used;
4353 :
4354 : /*
4355 : * If non-trylock use in a hardirq or softirq context, then
4356 : * mark the lock as used in these contexts:
4357 : */
4358 11794532 : if (!hlock->trylock) {
4359 11566002 : if (hlock->read) {
4360 831956 : if (lockdep_hardirq_context())
4361 103112 : if (!mark_lock(curr, hlock,
4362 : LOCK_USED_IN_HARDIRQ_READ))
4363 : return 0;
4364 831127 : if (curr->softirq_context)
4365 14934 : if (!mark_lock(curr, hlock,
4366 : LOCK_USED_IN_SOFTIRQ_READ))
4367 : return 0;
4368 : } else {
4369 10734046 : if (lockdep_hardirq_context())
4370 122286 : if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
4371 : return 0;
4372 10733666 : if (curr->softirq_context)
4373 2627543 : if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
4374 : return 0;
4375 : }
4376 : }
4377 11793297 : if (!hlock->hardirqs_off) {
4378 3637410 : if (hlock->read) {
4379 588871 : if (!mark_lock(curr, hlock,
4380 : LOCK_ENABLED_HARDIRQ_READ))
4381 : return 0;
4382 588827 : if (curr->softirqs_enabled)
4383 588800 : if (!mark_lock(curr, hlock,
4384 : LOCK_ENABLED_SOFTIRQ_READ))
4385 : return 0;
4386 : } else {
4387 3048539 : if (!mark_lock(curr, hlock,
4388 : LOCK_ENABLED_HARDIRQ))
4389 : return 0;
4390 3048015 : if (curr->softirqs_enabled)
4391 3035922 : if (!mark_lock(curr, hlock,
4392 : LOCK_ENABLED_SOFTIRQ))
4393 : return 0;
4394 : }
4395 : }
4396 :
4397 11206091 : lock_used:
4398 : /* mark it as used: */
4399 13578569 : if (!mark_lock(curr, hlock, LOCK_USED))
4400 0 : return 0;
4401 :
4402 : return 1;
4403 : }
4404 :
4405 13574060 : static inline unsigned int task_irq_context(struct task_struct *task)
4406 : {
4407 27148120 : return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
4408 13574060 : LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
4409 : }
4410 :
4411 13508003 : static int separate_irq_context(struct task_struct *curr,
4412 : struct held_lock *hlock)
4413 : {
4414 13508003 : unsigned int depth = curr->lockdep_depth;
4415 :
4416 : /*
4417 : * Keep track of points where we cross into an interrupt context:
4418 : */
4419 13508003 : if (depth) {
4420 7113697 : struct held_lock *prev_hlock;
4421 :
4422 7113697 : prev_hlock = curr->held_locks + depth-1;
4423 : /*
4424 : * If we cross into another context, reset the
4425 : * hash key (this also prevents the checking and the
4426 : * adding of the dependency to 'prev'):
4427 : */
4428 7113697 : if (prev_hlock->irq_context != hlock->irq_context)
4429 329088 : return 1;
4430 : }
4431 : return 0;
4432 : }
4433 :
4434 : /*
4435 : * Mark a lock with a usage bit, and validate the state transition:
4436 : */
4437 34756433 : static int mark_lock(struct task_struct *curr, struct held_lock *this,
4438 : enum lock_usage_bit new_bit)
4439 : {
4440 34756433 : unsigned int new_mask, ret = 1;
4441 :
4442 34756433 : if (new_bit >= LOCK_USAGE_STATES) {
4443 0 : DEBUG_LOCKS_WARN_ON(1);
4444 0 : return 0;
4445 : }
4446 :
4447 34756433 : if (new_bit == LOCK_USED && this->read)
4448 2772069 : new_bit = LOCK_USED_READ;
4449 :
4450 34756433 : new_mask = 1 << new_bit;
4451 :
4452 : /*
4453 : * If already set then do not dirty the cacheline,
4454 : * nor do any checks:
4455 : */
4456 34756433 : if (likely(hlock_class(this)->usage_mask & new_mask))
4457 : return 1;
4458 :
4459 2027 : if (!graph_lock())
4460 : return 0;
4461 : /*
4462 : * Make sure we didn't race:
4463 : */
4464 2027 : if (unlikely(hlock_class(this)->usage_mask & new_mask))
4465 8 : goto unlock;
4466 :
4467 2019 : if (!hlock_class(this)->usage_mask)
4468 2019 : debug_atomic_dec(nr_unused_locks);
4469 :
4470 2019 : hlock_class(this)->usage_mask |= new_mask;
4471 :
4472 2019 : if (new_bit < LOCK_TRACE_STATES) {
4473 2019 : if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4474 : return 0;
4475 : }
4476 :
4477 2019 : if (new_bit < LOCK_USED) {
4478 1216 : ret = mark_lock_irq(curr, this, new_bit);
4479 1216 : if (!ret)
4480 : return 0;
4481 : }
4482 :
4483 2019 : unlock:
4484 1216 : graph_unlock();
4485 :
4486 : /*
4487 : * We must printk outside of the graph_lock:
4488 : */
4489 2027 : if (ret == 2) {
4490 0 : printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
4491 0 : print_lock(this);
4492 0 : print_irqtrace_events(curr);
4493 0 : dump_stack();
4494 : }
4495 :
4496 2027 : return ret;
4497 : }
4498 :
4499 6020048 : static inline short task_wait_context(struct task_struct *curr)
4500 : {
4501 : /*
4502 : * Set appropriate wait type for the context; for IRQs we have to take
4503 : * into account force_irqthread as that is implied by PREEMPT_RT.
4504 : */
4505 6020048 : if (lockdep_hardirq_context()) {
4506 : /*
4507 : * Check if force_irqthreads will run us threaded.
4508 : */
4509 86920 : if (curr->hardirq_threaded || curr->irq_config)
4510 : return LD_WAIT_CONFIG;
4511 :
4512 : return LD_WAIT_SPIN;
4513 5933128 : } else if (curr->softirq_context) {
4514 : /*
4515 : * Softirqs are always threaded.
4516 : */
4517 1522675 : return LD_WAIT_CONFIG;
4518 : }
4519 :
4520 : return LD_WAIT_MAX;
4521 : }
4522 :
4523 : static int
4524 0 : print_lock_invalid_wait_context(struct task_struct *curr,
4525 : struct held_lock *hlock)
4526 : {
4527 0 : short curr_inner;
4528 :
4529 0 : if (!debug_locks_off())
4530 : return 0;
4531 0 : if (debug_locks_silent)
4532 : return 0;
4533 :
4534 0 : pr_warn("\n");
4535 0 : pr_warn("=============================\n");
4536 0 : pr_warn("[ BUG: Invalid wait context ]\n");
4537 0 : print_kernel_ident();
4538 0 : pr_warn("-----------------------------\n");
4539 :
4540 0 : pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4541 0 : print_lock(hlock);
4542 :
4543 0 : pr_warn("other info that might help us debug this:\n");
4544 :
4545 0 : curr_inner = task_wait_context(curr);
4546 0 : pr_warn("context-{%d:%d}\n", curr_inner, curr_inner);
4547 :
4548 0 : lockdep_print_held_locks(curr);
4549 :
4550 0 : pr_warn("stack backtrace:\n");
4551 0 : dump_stack();
4552 :
4553 0 : return 0;
4554 : }
4555 :
4556 : /*
4557 : * Verify the wait_type context.
4558 : *
4559 : * This check validates we takes locks in the right wait-type order; that is it
4560 : * ensures that we do not take mutexes inside spinlocks and do not attempt to
4561 : * acquire spinlocks inside raw_spinlocks and the sort.
4562 : *
4563 : * The entire thing is slightly more complex because of RCU, RCU is a lock that
4564 : * can be taken from (pretty much) any context but also has constraints.
4565 : * However when taken in a stricter environment the RCU lock does not loosen
4566 : * the constraints.
4567 : *
4568 : * Therefore we must look for the strictest environment in the lock stack and
4569 : * compare that to the lock we're trying to acquire.
4570 : */
4571 13574533 : static int check_wait_context(struct task_struct *curr, struct held_lock *next)
4572 : {
4573 13574533 : u8 next_inner = hlock_class(next)->wait_type_inner;
4574 13557989 : u8 next_outer = hlock_class(next)->wait_type_outer;
4575 13593973 : u8 curr_inner;
4576 13593973 : int depth;
4577 :
4578 13593973 : if (!curr->lockdep_depth || !next_inner || next->trylock)
4579 : return 0;
4580 :
4581 6020048 : if (!next_outer)
4582 5435357 : next_outer = next_inner;
4583 :
4584 : /*
4585 : * Find start of current irq_context..
4586 : */
4587 14975366 : for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) {
4588 9365658 : struct held_lock *prev = curr->held_locks + depth;
4589 9365658 : if (prev->irq_context != next->irq_context)
4590 : break;
4591 : }
4592 6020048 : depth++;
4593 :
4594 6020048 : curr_inner = task_wait_context(curr);
4595 :
4596 14956690 : for (; depth < curr->lockdep_depth; depth++) {
4597 8941652 : struct held_lock *prev = curr->held_locks + depth;
4598 8941652 : u8 prev_inner = hlock_class(prev)->wait_type_inner;
4599 :
4600 8936642 : if (prev_inner) {
4601 : /*
4602 : * We can have a bigger inner than a previous one
4603 : * when outer is smaller than inner, as with RCU.
4604 : *
4605 : * Also due to trylocks.
4606 : */
4607 6730709 : curr_inner = min(curr_inner, prev_inner);
4608 : }
4609 : }
4610 :
4611 6015038 : if (next_outer > curr_inner)
4612 0 : return print_lock_invalid_wait_context(curr, next);
4613 :
4614 : return 0;
4615 : }
4616 :
4617 : #else /* CONFIG_PROVE_LOCKING */
4618 :
4619 : static inline int
4620 : mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4621 : {
4622 : return 1;
4623 : }
4624 :
4625 : static inline unsigned int task_irq_context(struct task_struct *task)
4626 : {
4627 : return 0;
4628 : }
4629 :
4630 : static inline int separate_irq_context(struct task_struct *curr,
4631 : struct held_lock *hlock)
4632 : {
4633 : return 0;
4634 : }
4635 :
4636 : static inline int check_wait_context(struct task_struct *curr,
4637 : struct held_lock *next)
4638 : {
4639 : return 0;
4640 : }
4641 :
4642 : #endif /* CONFIG_PROVE_LOCKING */
4643 :
4644 : /*
4645 : * Initialize a lock instance's lock-class mapping info:
4646 : */
4647 1359542 : void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
4648 : struct lock_class_key *key, int subclass,
4649 : u8 inner, u8 outer, u8 lock_type)
4650 : {
4651 1359542 : int i;
4652 :
4653 4078074 : for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
4654 2718532 : lock->class_cache[i] = NULL;
4655 :
4656 : #ifdef CONFIG_LOCK_STAT
4657 : lock->cpu = raw_smp_processor_id();
4658 : #endif
4659 :
4660 : /*
4661 : * Can't be having no nameless bastards around this place!
4662 : */
4663 1359542 : if (DEBUG_LOCKS_WARN_ON(!name)) {
4664 0 : lock->name = "NULL";
4665 0 : return;
4666 : }
4667 :
4668 1359542 : lock->name = name;
4669 :
4670 1359542 : lock->wait_type_outer = outer;
4671 1359542 : lock->wait_type_inner = inner;
4672 1359542 : lock->lock_type = lock_type;
4673 :
4674 : /*
4675 : * No key, no joy, we need to hash something.
4676 : */
4677 1359542 : if (DEBUG_LOCKS_WARN_ON(!key))
4678 0 : return;
4679 : /*
4680 : * Sanity check, the lock-class key must either have been allocated
4681 : * statically or must have been registered as a dynamic key.
4682 : */
4683 1359542 : if (!static_obj(key) && !is_dynamic_key(key)) {
4684 0 : if (debug_locks)
4685 0 : printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
4686 0 : DEBUG_LOCKS_WARN_ON(1);
4687 0 : return;
4688 : }
4689 1359470 : lock->key = key;
4690 :
4691 1359470 : if (unlikely(!debug_locks))
4692 : return;
4693 :
4694 1359470 : if (subclass) {
4695 1 : unsigned long flags;
4696 :
4697 2 : if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
4698 0 : return;
4699 :
4700 1 : raw_local_irq_save(flags);
4701 1 : lockdep_recursion_inc();
4702 1 : register_lock_class(lock, subclass, 1);
4703 1 : lockdep_recursion_finish();
4704 1 : raw_local_irq_restore(flags);
4705 : }
4706 : }
4707 : EXPORT_SYMBOL_GPL(lockdep_init_map_type);
4708 :
4709 : struct lock_class_key __lockdep_no_validate__;
4710 : EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
4711 :
4712 : static void
4713 0 : print_lock_nested_lock_not_held(struct task_struct *curr,
4714 : struct held_lock *hlock,
4715 : unsigned long ip)
4716 : {
4717 0 : if (!debug_locks_off())
4718 : return;
4719 0 : if (debug_locks_silent)
4720 : return;
4721 :
4722 0 : pr_warn("\n");
4723 0 : pr_warn("==================================\n");
4724 0 : pr_warn("WARNING: Nested lock was not taken\n");
4725 0 : print_kernel_ident();
4726 0 : pr_warn("----------------------------------\n");
4727 :
4728 0 : pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
4729 0 : print_lock(hlock);
4730 :
4731 0 : pr_warn("\nbut this task is not holding:\n");
4732 0 : pr_warn("%s\n", hlock->nest_lock->name);
4733 :
4734 0 : pr_warn("\nstack backtrace:\n");
4735 0 : dump_stack();
4736 :
4737 0 : pr_warn("\nother info that might help us debug this:\n");
4738 0 : lockdep_print_held_locks(curr);
4739 :
4740 0 : pr_warn("\nstack backtrace:\n");
4741 0 : dump_stack();
4742 : }
4743 :
4744 : static int __lock_is_held(const struct lockdep_map *lock, int read);
4745 :
4746 : /*
4747 : * This gets called for every mutex_lock*()/spin_lock*() operation.
4748 : * We maintain the dependency maps and validate the locking attempt:
4749 : *
4750 : * The callers must make sure that IRQs are disabled before calling it,
4751 : * otherwise we could get an interrupt which would want to take locks,
4752 : * which would end up in lockdep again.
4753 : */
4754 13479993 : static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4755 : int trylock, int read, int check, int hardirqs_off,
4756 : struct lockdep_map *nest_lock, unsigned long ip,
4757 : int references, int pin_count)
4758 : {
4759 13479993 : struct task_struct *curr = current;
4760 13479993 : struct lock_class *class = NULL;
4761 13479993 : struct held_lock *hlock;
4762 13479993 : unsigned int depth;
4763 13479993 : int chain_head = 0;
4764 13479993 : int class_idx;
4765 13479993 : u64 chain_key;
4766 :
4767 13479993 : if (unlikely(!debug_locks))
4768 : return 0;
4769 :
4770 13479993 : if (!prove_locking || lock->key == &__lockdep_no_validate__)
4771 30 : check = 0;
4772 :
4773 13479993 : if (subclass < NR_LOCKDEP_CACHING_CLASSES)
4774 13524021 : class = lock->class_cache[subclass];
4775 : /*
4776 : * Not cached?
4777 : */
4778 13479993 : if (unlikely(!class)) {
4779 691533 : class = register_lock_class(lock, subclass, 0);
4780 691535 : if (!class)
4781 : return 0;
4782 : }
4783 :
4784 13479995 : debug_class_ops_inc(class);
4785 :
4786 13574060 : if (very_verbose(class)) {
4787 : printk("\nacquire class [%px] %s", class->key, class->name);
4788 : if (class->name_version > 1)
4789 : printk(KERN_CONT "#%d", class->name_version);
4790 : printk(KERN_CONT "\n");
4791 : dump_stack();
4792 : }
4793 :
4794 : /*
4795 : * Add the lock to the list of currently held locks.
4796 : * (we dont increase the depth just yet, up until the
4797 : * dependency checks are done)
4798 : */
4799 13574060 : depth = curr->lockdep_depth;
4800 : /*
4801 : * Ran out of static storage for our per-task lock stack again have we?
4802 : */
4803 13574060 : if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
4804 0 : return 0;
4805 :
4806 13574060 : class_idx = class - lock_classes;
4807 :
4808 13574060 : if (depth) { /* we're holding locks */
4809 7120373 : hlock = curr->held_locks + depth - 1;
4810 7120373 : if (hlock->class_idx == class_idx && nest_lock) {
4811 0 : if (!references)
4812 0 : references++;
4813 :
4814 0 : if (!hlock->references)
4815 0 : hlock->references++;
4816 :
4817 0 : hlock->references += references;
4818 :
4819 : /* Overflow */
4820 0 : if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
4821 0 : return 0;
4822 :
4823 : return 2;
4824 : }
4825 : }
4826 :
4827 13574060 : hlock = curr->held_locks + depth;
4828 : /*
4829 : * Plain impossible, we just registered it and checked it weren't no
4830 : * NULL like.. I bet this mushroom I ate was good!
4831 : */
4832 13574060 : if (DEBUG_LOCKS_WARN_ON(!class))
4833 0 : return 0;
4834 13574060 : hlock->class_idx = class_idx;
4835 13574060 : hlock->acquire_ip = ip;
4836 13574060 : hlock->instance = lock;
4837 13574060 : hlock->nest_lock = nest_lock;
4838 13574060 : hlock->irq_context = task_irq_context(curr);
4839 13574060 : hlock->trylock = trylock;
4840 13574060 : hlock->read = read;
4841 13574060 : hlock->check = check;
4842 13574060 : hlock->hardirqs_off = !!hardirqs_off;
4843 13574060 : hlock->references = references;
4844 : #ifdef CONFIG_LOCK_STAT
4845 : hlock->waittime_stamp = 0;
4846 : hlock->holdtime_stamp = lockstat_clock();
4847 : #endif
4848 13574060 : hlock->pin_count = pin_count;
4849 :
4850 13574060 : if (check_wait_context(curr, hlock))
4851 : return 0;
4852 :
4853 : /* Initialize the lock usage bit */
4854 13520645 : if (!mark_usage(curr, hlock, check))
4855 : return 0;
4856 :
4857 : /*
4858 : * Calculate the chain hash: it's the combined hash of all the
4859 : * lock keys along the dependency chain. We save the hash value
4860 : * at every step so that we can get the current hash easily
4861 : * after unlock. The chain hash is then used to cache dependency
4862 : * results.
4863 : *
4864 : * The 'key ID' is what is the most compact key value to drive
4865 : * the hash, not class->key.
4866 : */
4867 : /*
4868 : * Whoops, we did it again.. class_idx is invalid.
4869 : */
4870 13500412 : if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
4871 0 : return 0;
4872 :
4873 13491913 : chain_key = curr->curr_chain_key;
4874 13491913 : if (!depth) {
4875 : /*
4876 : * How can we have a chain hash when we ain't got no keys?!
4877 : */
4878 6466485 : if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
4879 0 : return 0;
4880 : chain_head = 1;
4881 : }
4882 :
4883 13491913 : hlock->prev_chain_key = chain_key;
4884 13491913 : if (separate_irq_context(curr, hlock)) {
4885 328513 : chain_key = INITIAL_CHAIN_KEY;
4886 328513 : chain_head = 1;
4887 : }
4888 13491913 : chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
4889 :
4890 13491913 : if (nest_lock && !__lock_is_held(nest_lock, -1)) {
4891 0 : print_lock_nested_lock_not_held(curr, hlock, ip);
4892 0 : return 0;
4893 : }
4894 :
4895 13508811 : if (!debug_locks_silent) {
4896 20616024 : WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
4897 13506833 : WARN_ON_ONCE(!hlock_class(hlock)->key);
4898 : }
4899 :
4900 13481937 : if (!validate_chain(curr, hlock, chain_head, chain_key))
4901 : return 0;
4902 :
4903 13516913 : curr->curr_chain_key = chain_key;
4904 13516913 : curr->lockdep_depth++;
4905 13516913 : check_chain_key(curr);
4906 : #ifdef CONFIG_DEBUG_LOCKDEP
4907 13510310 : if (unlikely(!debug_locks))
4908 : return 0;
4909 : #endif
4910 13510310 : if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
4911 0 : debug_locks_off();
4912 0 : print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
4913 0 : printk(KERN_DEBUG "depth: %i max: %lu!\n",
4914 : curr->lockdep_depth, MAX_LOCK_DEPTH);
4915 :
4916 0 : lockdep_print_held_locks(current);
4917 0 : debug_show_all_locks();
4918 0 : dump_stack();
4919 :
4920 0 : return 0;
4921 : }
4922 :
4923 13510310 : if (unlikely(curr->lockdep_depth > max_lockdep_depth))
4924 15 : max_lockdep_depth = curr->lockdep_depth;
4925 :
4926 : return 1;
4927 : }
4928 :
4929 0 : static void print_unlock_imbalance_bug(struct task_struct *curr,
4930 : struct lockdep_map *lock,
4931 : unsigned long ip)
4932 : {
4933 0 : if (!debug_locks_off())
4934 : return;
4935 0 : if (debug_locks_silent)
4936 : return;
4937 :
4938 0 : pr_warn("\n");
4939 0 : pr_warn("=====================================\n");
4940 0 : pr_warn("WARNING: bad unlock balance detected!\n");
4941 0 : print_kernel_ident();
4942 0 : pr_warn("-------------------------------------\n");
4943 0 : pr_warn("%s/%d is trying to release lock (",
4944 : curr->comm, task_pid_nr(curr));
4945 0 : print_lockdep_cache(lock);
4946 0 : pr_cont(") at:\n");
4947 0 : print_ip_sym(KERN_WARNING, ip);
4948 0 : pr_warn("but there are no more locks to release!\n");
4949 0 : pr_warn("\nother info that might help us debug this:\n");
4950 0 : lockdep_print_held_locks(curr);
4951 :
4952 0 : pr_warn("\nstack backtrace:\n");
4953 0 : dump_stack();
4954 : }
4955 :
4956 75100778 : static noinstr int match_held_lock(const struct held_lock *hlock,
4957 : const struct lockdep_map *lock)
4958 : {
4959 75100778 : if (hlock->instance == lock)
4960 : return 1;
4961 :
4962 57260781 : if (hlock->references) {
4963 0 : const struct lock_class *class = lock->class_cache[0];
4964 :
4965 0 : if (!class)
4966 0 : class = look_up_lock_class(lock, 0);
4967 :
4968 : /*
4969 : * If look_up_lock_class() failed to find a class, we're trying
4970 : * to test if we hold a lock that has never yet been acquired.
4971 : * Clearly if the lock hasn't been acquired _ever_, we're not
4972 : * holding it either, so report failure.
4973 : */
4974 0 : if (!class)
4975 0 : return 0;
4976 :
4977 : /*
4978 : * References, but not a lock we're actually ref-counting?
4979 : * State got messed up, follow the sites that change ->references
4980 : * and try to make sense of it.
4981 : */
4982 0 : if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
4983 0 : return 0;
4984 :
4985 0 : if (hlock->class_idx == class - lock_classes)
4986 0 : return 1;
4987 : }
4988 :
4989 : return 0;
4990 : }
4991 :
4992 : /* @depth must not be zero */
4993 13475375 : static struct held_lock *find_held_lock(struct task_struct *curr,
4994 : struct lockdep_map *lock,
4995 : unsigned int depth, int *idx)
4996 : {
4997 13475375 : struct held_lock *ret, *hlock, *prev_hlock;
4998 13475375 : int i;
4999 :
5000 13475375 : i = depth - 1;
5001 13475375 : hlock = curr->held_locks + i;
5002 13475375 : ret = hlock;
5003 13475375 : if (match_held_lock(hlock, lock))
5004 13399870 : goto out;
5005 :
5006 44868 : ret = NULL;
5007 44868 : for (i--, prev_hlock = hlock--;
5008 49349 : i >= 0;
5009 4481 : i--, prev_hlock = hlock--) {
5010 : /*
5011 : * We must not cross into another context:
5012 : */
5013 49349 : if (prev_hlock->irq_context != hlock->irq_context) {
5014 : ret = NULL;
5015 : break;
5016 : }
5017 49349 : if (match_held_lock(hlock, lock)) {
5018 : ret = hlock;
5019 : break;
5020 : }
5021 : }
5022 :
5023 0 : out:
5024 13444741 : *idx = i;
5025 13444741 : return ret;
5026 : }
5027 :
5028 46031 : static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
5029 : int idx, unsigned int *merged)
5030 : {
5031 46031 : struct held_lock *hlock;
5032 46031 : int first_idx = idx;
5033 :
5034 46031 : if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
5035 0 : return 0;
5036 :
5037 96541 : for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
5038 50512 : switch (__lock_acquire(hlock->instance,
5039 50510 : hlock_class(hlock)->subclass,
5040 50510 : hlock->trylock,
5041 50510 : hlock->read, hlock->check,
5042 50510 : hlock->hardirqs_off,
5043 : hlock->nest_lock, hlock->acquire_ip,
5044 50510 : hlock->references, hlock->pin_count)) {
5045 : case 0:
5046 : return 1;
5047 : case 1:
5048 : break;
5049 0 : case 2:
5050 0 : *merged += (idx == first_idx);
5051 0 : break;
5052 : default:
5053 0 : WARN_ON(1);
5054 0 : return 0;
5055 : }
5056 : }
5057 : return 0;
5058 : }
5059 :
5060 : static int
5061 0 : __lock_set_class(struct lockdep_map *lock, const char *name,
5062 : struct lock_class_key *key, unsigned int subclass,
5063 : unsigned long ip)
5064 : {
5065 0 : struct task_struct *curr = current;
5066 0 : unsigned int depth, merged = 0;
5067 0 : struct held_lock *hlock;
5068 0 : struct lock_class *class;
5069 0 : int i;
5070 :
5071 0 : if (unlikely(!debug_locks))
5072 : return 0;
5073 :
5074 0 : depth = curr->lockdep_depth;
5075 : /*
5076 : * This function is about (re)setting the class of a held lock,
5077 : * yet we're not actually holding any locks. Naughty user!
5078 : */
5079 0 : if (DEBUG_LOCKS_WARN_ON(!depth))
5080 0 : return 0;
5081 :
5082 0 : hlock = find_held_lock(curr, lock, depth, &i);
5083 0 : if (!hlock) {
5084 0 : print_unlock_imbalance_bug(curr, lock, ip);
5085 0 : return 0;
5086 : }
5087 :
5088 0 : lockdep_init_map_waits(lock, name, key, 0,
5089 0 : lock->wait_type_inner,
5090 0 : lock->wait_type_outer);
5091 0 : class = register_lock_class(lock, subclass, 0);
5092 0 : hlock->class_idx = class - lock_classes;
5093 :
5094 0 : curr->lockdep_depth = i;
5095 0 : curr->curr_chain_key = hlock->prev_chain_key;
5096 :
5097 0 : if (reacquire_held_locks(curr, depth, i, &merged))
5098 : return 0;
5099 :
5100 : /*
5101 : * I took it apart and put it back together again, except now I have
5102 : * these 'spare' parts.. where shall I put them.
5103 : */
5104 0 : if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
5105 0 : return 0;
5106 : return 1;
5107 : }
5108 :
5109 1161 : static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5110 : {
5111 1161 : struct task_struct *curr = current;
5112 1161 : unsigned int depth, merged = 0;
5113 1161 : struct held_lock *hlock;
5114 1161 : int i;
5115 :
5116 1161 : if (unlikely(!debug_locks))
5117 : return 0;
5118 :
5119 1161 : depth = curr->lockdep_depth;
5120 : /*
5121 : * This function is about (re)setting the class of a held lock,
5122 : * yet we're not actually holding any locks. Naughty user!
5123 : */
5124 1161 : if (DEBUG_LOCKS_WARN_ON(!depth))
5125 0 : return 0;
5126 :
5127 1161 : hlock = find_held_lock(curr, lock, depth, &i);
5128 1161 : if (!hlock) {
5129 0 : print_unlock_imbalance_bug(curr, lock, ip);
5130 0 : return 0;
5131 : }
5132 :
5133 1161 : curr->lockdep_depth = i;
5134 1161 : curr->curr_chain_key = hlock->prev_chain_key;
5135 :
5136 1161 : WARN(hlock->read, "downgrading a read lock");
5137 1161 : hlock->read = 1;
5138 1161 : hlock->acquire_ip = ip;
5139 :
5140 1161 : if (reacquire_held_locks(curr, depth, i, &merged))
5141 : return 0;
5142 :
5143 : /* Merging can't happen with unchanged classes.. */
5144 1161 : if (DEBUG_LOCKS_WARN_ON(merged))
5145 0 : return 0;
5146 :
5147 : /*
5148 : * I took it apart and put it back together again, except now I have
5149 : * these 'spare' parts.. where shall I put them.
5150 : */
5151 1161 : if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
5152 0 : return 0;
5153 :
5154 : return 1;
5155 : }
5156 :
5157 : /*
5158 : * Remove the lock from the list of currently held locks - this gets
5159 : * called on mutex_unlock()/spin_unlock*() (or on a failed
5160 : * mutex_lock_interruptible()).
5161 : */
5162 : static int
5163 13481731 : __lock_release(struct lockdep_map *lock, unsigned long ip)
5164 : {
5165 13481731 : struct task_struct *curr = current;
5166 13481731 : unsigned int depth, merged = 1;
5167 13481731 : struct held_lock *hlock;
5168 13481731 : int i;
5169 :
5170 13481731 : if (unlikely(!debug_locks))
5171 : return 0;
5172 :
5173 13481731 : depth = curr->lockdep_depth;
5174 : /*
5175 : * So we're all set to release this lock.. wait what lock? We don't
5176 : * own any locks, you've been drinking again?
5177 : */
5178 13481731 : if (depth <= 0) {
5179 0 : print_unlock_imbalance_bug(curr, lock, ip);
5180 0 : return 0;
5181 : }
5182 :
5183 : /*
5184 : * Check whether the lock exists in the current stack
5185 : * of held locks:
5186 : */
5187 13481731 : hlock = find_held_lock(curr, lock, depth, &i);
5188 13488785 : if (!hlock) {
5189 0 : print_unlock_imbalance_bug(curr, lock, ip);
5190 0 : return 0;
5191 : }
5192 :
5193 13488785 : if (hlock->instance == lock)
5194 13488785 : lock_release_holdtime(hlock);
5195 :
5196 13488785 : WARN(hlock->pin_count, "releasing a pinned lock\n");
5197 :
5198 13488785 : if (hlock->references) {
5199 0 : hlock->references--;
5200 0 : if (hlock->references) {
5201 : /*
5202 : * We had, and after removing one, still have
5203 : * references, the current lock stack is still
5204 : * valid. We're done!
5205 : */
5206 : return 1;
5207 : }
5208 : }
5209 :
5210 : /*
5211 : * We have the right lock to unlock, 'hlock' points to it.
5212 : * Now we remove it from the stack, and add back the other
5213 : * entries (if any), recalculating the hash along the way:
5214 : */
5215 :
5216 13488785 : curr->lockdep_depth = i;
5217 13488785 : curr->curr_chain_key = hlock->prev_chain_key;
5218 :
5219 : /*
5220 : * The most likely case is when the unlock is on the innermost
5221 : * lock. In this case, we are done!
5222 : */
5223 13488785 : if (i == depth-1)
5224 : return 1;
5225 :
5226 44872 : if (reacquire_held_locks(curr, depth, i + 1, &merged))
5227 : return 0;
5228 :
5229 : /*
5230 : * We had N bottles of beer on the wall, we drank one, but now
5231 : * there's not N-1 bottles of beer left on the wall...
5232 : * Pouring two of the bottles together is acceptable.
5233 : */
5234 44870 : DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
5235 :
5236 : /*
5237 : * Since reacquire_held_locks() would have called check_chain_key()
5238 : * indirectly via __lock_acquire(), we don't need to do it again
5239 : * on return.
5240 : */
5241 : return 0;
5242 : }
5243 :
5244 : static __always_inline
5245 46797329 : int __lock_is_held(const struct lockdep_map *lock, int read)
5246 : {
5247 0 : struct task_struct *curr = current;
5248 46797329 : int i;
5249 :
5250 104039489 : for (i = 0; i < curr->lockdep_depth; i++) {
5251 61601358 : struct held_lock *hlock = curr->held_locks + i;
5252 :
5253 61601358 : if (match_held_lock(hlock, lock)) {
5254 4332707 : if (read == -1 || hlock->read == read)
5255 : return 1;
5256 :
5257 0 : return 0;
5258 : }
5259 : }
5260 :
5261 : return 0;
5262 : }
5263 :
5264 88365 : static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
5265 : {
5266 88365 : struct pin_cookie cookie = NIL_COOKIE;
5267 88365 : struct task_struct *curr = current;
5268 88365 : int i;
5269 :
5270 88365 : if (unlikely(!debug_locks))
5271 0 : return cookie;
5272 :
5273 154664 : for (i = 0; i < curr->lockdep_depth; i++) {
5274 154664 : struct held_lock *hlock = curr->held_locks + i;
5275 :
5276 154664 : if (match_held_lock(hlock, lock)) {
5277 : /*
5278 : * Grab 16bits of randomness; this is sufficient to not
5279 : * be guessable and still allows some pin nesting in
5280 : * our u32 pin_count.
5281 : */
5282 88283 : cookie.val = 1 + (prandom_u32() >> 16);
5283 87939 : hlock->pin_count += cookie.val;
5284 87939 : return cookie;
5285 : }
5286 : }
5287 :
5288 0 : WARN(1, "pinning an unheld lock\n");
5289 0 : return cookie;
5290 : }
5291 :
5292 7721 : static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5293 : {
5294 7721 : struct task_struct *curr = current;
5295 7721 : int i;
5296 :
5297 7721 : if (unlikely(!debug_locks))
5298 : return;
5299 :
5300 9230 : for (i = 0; i < curr->lockdep_depth; i++) {
5301 9230 : struct held_lock *hlock = curr->held_locks + i;
5302 :
5303 9230 : if (match_held_lock(hlock, lock)) {
5304 7720 : hlock->pin_count += cookie.val;
5305 7720 : return;
5306 : }
5307 : }
5308 :
5309 0 : WARN(1, "pinning an unheld lock\n");
5310 : }
5311 :
5312 95925 : static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5313 : {
5314 95925 : struct task_struct *curr = current;
5315 95925 : int i;
5316 :
5317 95925 : if (unlikely(!debug_locks))
5318 : return;
5319 :
5320 163667 : for (i = 0; i < curr->lockdep_depth; i++) {
5321 163667 : struct held_lock *hlock = curr->held_locks + i;
5322 :
5323 163667 : if (match_held_lock(hlock, lock)) {
5324 95915 : if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
5325 : return;
5326 :
5327 95915 : hlock->pin_count -= cookie.val;
5328 :
5329 95915 : if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
5330 0 : hlock->pin_count = 0;
5331 :
5332 95915 : return;
5333 : }
5334 : }
5335 :
5336 0 : WARN(1, "unpinning an unheld lock\n");
5337 : }
5338 :
5339 : /*
5340 : * Check whether we follow the irq-flags state precisely:
5341 : */
5342 73160890 : static noinstr void check_flags(unsigned long flags)
5343 : {
5344 : #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
5345 73160890 : if (!debug_locks)
5346 : return;
5347 :
5348 : /* Get the warning out.. */
5349 73160890 : instrumentation_begin();
5350 :
5351 73160890 : if (irqs_disabled_flags(flags)) {
5352 35504899 : if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
5353 0 : printk("possible reason: unannotated irqs-off.\n");
5354 : }
5355 : } else {
5356 37655991 : if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
5357 0 : printk("possible reason: unannotated irqs-on.\n");
5358 : }
5359 : }
5360 :
5361 : /*
5362 : * We dont accurately track softirq state in e.g.
5363 : * hardirq contexts (such as on 4KSTACKS), so only
5364 : * check if not in hardirq contexts:
5365 : */
5366 73202313 : if (!hardirq_count()) {
5367 71121597 : if (softirq_count()) {
5368 : /* like the above, but with softirqs */
5369 14097563 : DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
5370 : } else {
5371 : /* lick the above, does it taste good? */
5372 57024034 : DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
5373 : }
5374 : }
5375 :
5376 73202313 : if (!debug_locks)
5377 0 : print_irqtrace_events(current);
5378 :
5379 73202313 : instrumentation_end();
5380 : #endif
5381 : }
5382 :
5383 0 : void lock_set_class(struct lockdep_map *lock, const char *name,
5384 : struct lock_class_key *key, unsigned int subclass,
5385 : unsigned long ip)
5386 : {
5387 0 : unsigned long flags;
5388 :
5389 0 : if (unlikely(!lockdep_enabled()))
5390 : return;
5391 :
5392 0 : raw_local_irq_save(flags);
5393 0 : lockdep_recursion_inc();
5394 0 : check_flags(flags);
5395 0 : if (__lock_set_class(lock, name, key, subclass, ip))
5396 0 : check_chain_key(current);
5397 0 : lockdep_recursion_finish();
5398 0 : raw_local_irq_restore(flags);
5399 : }
5400 : EXPORT_SYMBOL_GPL(lock_set_class);
5401 :
5402 1161 : void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
5403 : {
5404 1161 : unsigned long flags;
5405 :
5406 2322 : if (unlikely(!lockdep_enabled()))
5407 : return;
5408 :
5409 1161 : raw_local_irq_save(flags);
5410 1161 : lockdep_recursion_inc();
5411 1161 : check_flags(flags);
5412 1161 : if (__lock_downgrade(lock, ip))
5413 1161 : check_chain_key(current);
5414 1161 : lockdep_recursion_finish();
5415 1161 : raw_local_irq_restore(flags);
5416 : }
5417 : EXPORT_SYMBOL_GPL(lock_downgrade);
5418 :
5419 : /* NMI context !!! */
5420 1 : static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
5421 : {
5422 : #ifdef CONFIG_PROVE_LOCKING
5423 1 : struct lock_class *class = look_up_lock_class(lock, subclass);
5424 1 : unsigned long mask = LOCKF_USED;
5425 :
5426 : /* if it doesn't have a class (yet), it certainly hasn't been used yet */
5427 1 : if (!class)
5428 0 : return;
5429 :
5430 : /*
5431 : * READ locks only conflict with USED, such that if we only ever use
5432 : * READ locks, there is no deadlock possible -- RCU.
5433 : */
5434 1 : if (!hlock->read)
5435 0 : mask |= LOCKF_USED_READ;
5436 :
5437 1 : if (!(class->usage_mask & mask))
5438 : return;
5439 :
5440 0 : hlock->class_idx = class - lock_classes;
5441 :
5442 0 : print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
5443 : #endif
5444 : }
5445 :
5446 16 : static bool lockdep_nmi(void)
5447 : {
5448 16 : if (raw_cpu_read(lockdep_recursion))
5449 : return false;
5450 :
5451 1 : if (!in_nmi())
5452 : return false;
5453 :
5454 : return true;
5455 : }
5456 :
5457 : /*
5458 : * read_lock() is recursive if:
5459 : * 1. We force lockdep think this way in selftests or
5460 : * 2. The implementation is not queued read/write lock or
5461 : * 3. The locker is at an in_interrupt() context.
5462 : */
5463 41714 : bool read_lock_is_recursive(void)
5464 : {
5465 41714 : return force_read_lock_recursive ||
5466 41714 : !IS_ENABLED(CONFIG_QUEUED_RWLOCKS) ||
5467 41714 : in_interrupt();
5468 : }
5469 : EXPORT_SYMBOL_GPL(read_lock_is_recursive);
5470 :
5471 : /*
5472 : * We are not always called with irqs disabled - do that here,
5473 : * and also avoid lockdep recursion:
5474 : */
5475 13439640 : void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
5476 : int trylock, int read, int check,
5477 : struct lockdep_map *nest_lock, unsigned long ip)
5478 : {
5479 13439640 : unsigned long flags;
5480 :
5481 13439640 : trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
5482 :
5483 13476312 : if (!debug_locks)
5484 : return;
5485 :
5486 26951739 : if (unlikely(!lockdep_enabled())) {
5487 : /* XXX allow trylock from NMI ?!? */
5488 17 : if (lockdep_nmi() && !trylock) {
5489 1 : struct held_lock hlock;
5490 :
5491 1 : hlock.acquire_ip = ip;
5492 1 : hlock.instance = lock;
5493 1 : hlock.nest_lock = nest_lock;
5494 1 : hlock.irq_context = 2; // XXX
5495 1 : hlock.trylock = trylock;
5496 1 : hlock.read = read;
5497 1 : hlock.check = check;
5498 1 : hlock.hardirqs_off = true;
5499 1 : hlock.references = 0;
5500 :
5501 1 : verify_lock_unused(lock, &hlock, subclass);
5502 : }
5503 16 : return;
5504 : }
5505 :
5506 13475411 : raw_local_irq_save(flags);
5507 13474121 : check_flags(flags);
5508 :
5509 13471273 : lockdep_recursion_inc();
5510 13471273 : __lock_acquire(lock, subclass, trylock, read, check,
5511 13471273 : irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
5512 13448016 : lockdep_recursion_finish();
5513 13448016 : raw_local_irq_restore(flags);
5514 : }
5515 : EXPORT_SYMBOL_GPL(lock_acquire);
5516 :
5517 13434749 : void lock_release(struct lockdep_map *lock, unsigned long ip)
5518 : {
5519 13434749 : unsigned long flags;
5520 :
5521 13434749 : trace_lock_release(lock, ip);
5522 :
5523 26969186 : if (unlikely(!lockdep_enabled()))
5524 : return;
5525 :
5526 13486328 : raw_local_irq_save(flags);
5527 13487530 : check_flags(flags);
5528 :
5529 13486363 : lockdep_recursion_inc();
5530 13486363 : if (__lock_release(lock, ip))
5531 13441372 : check_chain_key(current);
5532 13435814 : lockdep_recursion_finish();
5533 13435814 : raw_local_irq_restore(flags);
5534 : }
5535 : EXPORT_SYMBOL_GPL(lock_release);
5536 :
5537 46590488 : noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
5538 : {
5539 46590488 : unsigned long flags;
5540 46590488 : int ret = 0;
5541 :
5542 93268555 : if (unlikely(!lockdep_enabled()))
5543 : return 1; /* avoid false negative lockdep_assert_held() */
5544 :
5545 46590142 : raw_local_irq_save(flags);
5546 46646576 : check_flags(flags);
5547 :
5548 46797329 : lockdep_recursion_inc();
5549 46797329 : ret = __lock_is_held(lock, read);
5550 46770838 : lockdep_recursion_finish();
5551 46770838 : raw_local_irq_restore(flags);
5552 :
5553 : return ret;
5554 : }
5555 : EXPORT_SYMBOL_GPL(lock_is_held_type);
5556 : NOKPROBE_SYMBOL(lock_is_held_type);
5557 :
5558 88081 : struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
5559 : {
5560 88081 : struct pin_cookie cookie = NIL_COOKIE;
5561 88081 : unsigned long flags;
5562 :
5563 176313 : if (unlikely(!lockdep_enabled()))
5564 0 : return cookie;
5565 :
5566 88232 : raw_local_irq_save(flags);
5567 88409 : check_flags(flags);
5568 :
5569 88386 : lockdep_recursion_inc();
5570 88386 : cookie = __lock_pin_lock(lock);
5571 87950 : lockdep_recursion_finish();
5572 87950 : raw_local_irq_restore(flags);
5573 :
5574 87957 : return cookie;
5575 : }
5576 : EXPORT_SYMBOL_GPL(lock_pin_lock);
5577 :
5578 7719 : void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5579 : {
5580 7719 : unsigned long flags;
5581 :
5582 15439 : if (unlikely(!lockdep_enabled()))
5583 : return;
5584 :
5585 7720 : raw_local_irq_save(flags);
5586 7721 : check_flags(flags);
5587 :
5588 7721 : lockdep_recursion_inc();
5589 7721 : __lock_repin_lock(lock, cookie);
5590 7720 : lockdep_recursion_finish();
5591 7720 : raw_local_irq_restore(flags);
5592 : }
5593 : EXPORT_SYMBOL_GPL(lock_repin_lock);
5594 :
5595 95452 : void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5596 : {
5597 95452 : unsigned long flags;
5598 :
5599 191054 : if (unlikely(!lockdep_enabled()))
5600 : return;
5601 :
5602 95602 : raw_local_irq_save(flags);
5603 95919 : check_flags(flags);
5604 :
5605 96003 : lockdep_recursion_inc();
5606 96003 : __lock_unpin_lock(lock, cookie);
5607 95919 : lockdep_recursion_finish();
5608 95919 : raw_local_irq_restore(flags);
5609 : }
5610 : EXPORT_SYMBOL_GPL(lock_unpin_lock);
5611 :
5612 : #ifdef CONFIG_LOCK_STAT
5613 : static void print_lock_contention_bug(struct task_struct *curr,
5614 : struct lockdep_map *lock,
5615 : unsigned long ip)
5616 : {
5617 : if (!debug_locks_off())
5618 : return;
5619 : if (debug_locks_silent)
5620 : return;
5621 :
5622 : pr_warn("\n");
5623 : pr_warn("=================================\n");
5624 : pr_warn("WARNING: bad contention detected!\n");
5625 : print_kernel_ident();
5626 : pr_warn("---------------------------------\n");
5627 : pr_warn("%s/%d is trying to contend lock (",
5628 : curr->comm, task_pid_nr(curr));
5629 : print_lockdep_cache(lock);
5630 : pr_cont(") at:\n");
5631 : print_ip_sym(KERN_WARNING, ip);
5632 : pr_warn("but there are no locks held!\n");
5633 : pr_warn("\nother info that might help us debug this:\n");
5634 : lockdep_print_held_locks(curr);
5635 :
5636 : pr_warn("\nstack backtrace:\n");
5637 : dump_stack();
5638 : }
5639 :
5640 : static void
5641 : __lock_contended(struct lockdep_map *lock, unsigned long ip)
5642 : {
5643 : struct task_struct *curr = current;
5644 : struct held_lock *hlock;
5645 : struct lock_class_stats *stats;
5646 : unsigned int depth;
5647 : int i, contention_point, contending_point;
5648 :
5649 : depth = curr->lockdep_depth;
5650 : /*
5651 : * Whee, we contended on this lock, except it seems we're not
5652 : * actually trying to acquire anything much at all..
5653 : */
5654 : if (DEBUG_LOCKS_WARN_ON(!depth))
5655 : return;
5656 :
5657 : hlock = find_held_lock(curr, lock, depth, &i);
5658 : if (!hlock) {
5659 : print_lock_contention_bug(curr, lock, ip);
5660 : return;
5661 : }
5662 :
5663 : if (hlock->instance != lock)
5664 : return;
5665 :
5666 : hlock->waittime_stamp = lockstat_clock();
5667 :
5668 : contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
5669 : contending_point = lock_point(hlock_class(hlock)->contending_point,
5670 : lock->ip);
5671 :
5672 : stats = get_lock_stats(hlock_class(hlock));
5673 : if (contention_point < LOCKSTAT_POINTS)
5674 : stats->contention_point[contention_point]++;
5675 : if (contending_point < LOCKSTAT_POINTS)
5676 : stats->contending_point[contending_point]++;
5677 : if (lock->cpu != smp_processor_id())
5678 : stats->bounces[bounce_contended + !!hlock->read]++;
5679 : }
5680 :
5681 : static void
5682 : __lock_acquired(struct lockdep_map *lock, unsigned long ip)
5683 : {
5684 : struct task_struct *curr = current;
5685 : struct held_lock *hlock;
5686 : struct lock_class_stats *stats;
5687 : unsigned int depth;
5688 : u64 now, waittime = 0;
5689 : int i, cpu;
5690 :
5691 : depth = curr->lockdep_depth;
5692 : /*
5693 : * Yay, we acquired ownership of this lock we didn't try to
5694 : * acquire, how the heck did that happen?
5695 : */
5696 : if (DEBUG_LOCKS_WARN_ON(!depth))
5697 : return;
5698 :
5699 : hlock = find_held_lock(curr, lock, depth, &i);
5700 : if (!hlock) {
5701 : print_lock_contention_bug(curr, lock, _RET_IP_);
5702 : return;
5703 : }
5704 :
5705 : if (hlock->instance != lock)
5706 : return;
5707 :
5708 : cpu = smp_processor_id();
5709 : if (hlock->waittime_stamp) {
5710 : now = lockstat_clock();
5711 : waittime = now - hlock->waittime_stamp;
5712 : hlock->holdtime_stamp = now;
5713 : }
5714 :
5715 : stats = get_lock_stats(hlock_class(hlock));
5716 : if (waittime) {
5717 : if (hlock->read)
5718 : lock_time_inc(&stats->read_waittime, waittime);
5719 : else
5720 : lock_time_inc(&stats->write_waittime, waittime);
5721 : }
5722 : if (lock->cpu != cpu)
5723 : stats->bounces[bounce_acquired + !!hlock->read]++;
5724 :
5725 : lock->cpu = cpu;
5726 : lock->ip = ip;
5727 : }
5728 :
5729 : void lock_contended(struct lockdep_map *lock, unsigned long ip)
5730 : {
5731 : unsigned long flags;
5732 :
5733 : trace_lock_acquired(lock, ip);
5734 :
5735 : if (unlikely(!lock_stat || !lockdep_enabled()))
5736 : return;
5737 :
5738 : raw_local_irq_save(flags);
5739 : check_flags(flags);
5740 : lockdep_recursion_inc();
5741 : __lock_contended(lock, ip);
5742 : lockdep_recursion_finish();
5743 : raw_local_irq_restore(flags);
5744 : }
5745 : EXPORT_SYMBOL_GPL(lock_contended);
5746 :
5747 : void lock_acquired(struct lockdep_map *lock, unsigned long ip)
5748 : {
5749 : unsigned long flags;
5750 :
5751 : trace_lock_contended(lock, ip);
5752 :
5753 : if (unlikely(!lock_stat || !lockdep_enabled()))
5754 : return;
5755 :
5756 : raw_local_irq_save(flags);
5757 : check_flags(flags);
5758 : lockdep_recursion_inc();
5759 : __lock_acquired(lock, ip);
5760 : lockdep_recursion_finish();
5761 : raw_local_irq_restore(flags);
5762 : }
5763 : EXPORT_SYMBOL_GPL(lock_acquired);
5764 : #endif
5765 :
5766 : /*
5767 : * Used by the testsuite, sanitize the validator state
5768 : * after a simulated failure:
5769 : */
5770 :
5771 0 : void lockdep_reset(void)
5772 : {
5773 0 : unsigned long flags;
5774 0 : int i;
5775 :
5776 0 : raw_local_irq_save(flags);
5777 0 : lockdep_init_task(current);
5778 0 : memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
5779 0 : nr_hardirq_chains = 0;
5780 0 : nr_softirq_chains = 0;
5781 0 : nr_process_chains = 0;
5782 0 : debug_locks = 1;
5783 0 : for (i = 0; i < CHAINHASH_SIZE; i++)
5784 0 : INIT_HLIST_HEAD(chainhash_table + i);
5785 0 : raw_local_irq_restore(flags);
5786 0 : }
5787 :
5788 : /* Remove a class from a lock chain. Must be called with the graph lock held. */
5789 1724 : static void remove_class_from_lock_chain(struct pending_free *pf,
5790 : struct lock_chain *chain,
5791 : struct lock_class *class)
5792 : {
5793 : #ifdef CONFIG_PROVE_LOCKING
5794 1724 : int i;
5795 :
5796 6327 : for (i = chain->base; i < chain->base + chain->depth; i++) {
5797 4606 : if (chain_hlock_class_idx(chain_hlocks[i]) != class - lock_classes)
5798 4603 : continue;
5799 : /*
5800 : * Each lock class occurs at most once in a lock chain so once
5801 : * we found a match we can break out of this loop.
5802 : */
5803 3 : goto free_lock_chain;
5804 : }
5805 : /* Since the chain has not been modified, return. */
5806 : return;
5807 :
5808 3 : free_lock_chain:
5809 3 : free_chain_hlocks(chain->base, chain->depth);
5810 : /* Overwrite the chain key for concurrent RCU readers. */
5811 3 : WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
5812 3 : dec_chains(chain->irq_context);
5813 :
5814 : /*
5815 : * Note: calling hlist_del_rcu() from inside a
5816 : * hlist_for_each_entry_rcu() loop is safe.
5817 : */
5818 3 : hlist_del_rcu(&chain->entry);
5819 3 : __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
5820 3 : nr_zapped_lock_chains++;
5821 : #endif
5822 : }
5823 :
5824 : /* Must be called with the graph lock held. */
5825 1 : static void remove_class_from_lock_chains(struct pending_free *pf,
5826 : struct lock_class *class)
5827 : {
5828 1 : struct lock_chain *chain;
5829 1 : struct hlist_head *head;
5830 1 : int i;
5831 :
5832 32769 : for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
5833 32768 : head = chainhash_table + i;
5834 67260 : hlist_for_each_entry_rcu(chain, head, entry) {
5835 1724 : remove_class_from_lock_chain(pf, chain, class);
5836 : }
5837 : }
5838 1 : }
5839 :
5840 : /*
5841 : * Remove all references to a lock class. The caller must hold the graph lock.
5842 : */
5843 1 : static void zap_class(struct pending_free *pf, struct lock_class *class)
5844 : {
5845 1 : struct lock_list *entry;
5846 1 : int i;
5847 :
5848 1 : WARN_ON_ONCE(!class->key);
5849 :
5850 : /*
5851 : * Remove all dependencies this lock is
5852 : * involved in:
5853 : */
5854 1813 : for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
5855 1812 : entry = list_entries + i;
5856 1812 : if (entry->class != class && entry->links_to != class)
5857 1810 : continue;
5858 2 : __clear_bit(i, list_entries_in_use);
5859 2 : nr_list_entries--;
5860 1812 : list_del_rcu(&entry->entry);
5861 : }
5862 1 : if (list_empty(&class->locks_after) &&
5863 1 : list_empty(&class->locks_before)) {
5864 1 : list_move_tail(&class->lock_entry, &pf->zapped);
5865 1 : hlist_del_rcu(&class->hash_entry);
5866 1 : WRITE_ONCE(class->key, NULL);
5867 1 : WRITE_ONCE(class->name, NULL);
5868 1 : nr_lock_classes--;
5869 1 : __clear_bit(class - lock_classes, lock_classes_in_use);
5870 : } else {
5871 0 : WARN_ONCE(true, "%s() failed for class %s\n", __func__,
5872 : class->name);
5873 : }
5874 :
5875 1 : remove_class_from_lock_chains(pf, class);
5876 1 : nr_zapped_classes++;
5877 1 : }
5878 :
5879 1 : static void reinit_class(struct lock_class *class)
5880 : {
5881 1 : void *const p = class;
5882 1 : const unsigned int offset = offsetof(struct lock_class, key);
5883 :
5884 1 : WARN_ON_ONCE(!class->lock_entry.next);
5885 1 : WARN_ON_ONCE(!list_empty(&class->locks_after));
5886 1 : WARN_ON_ONCE(!list_empty(&class->locks_before));
5887 1 : memset(p + offset, 0, sizeof(*class) - offset);
5888 1 : WARN_ON_ONCE(!class->lock_entry.next);
5889 1 : WARN_ON_ONCE(!list_empty(&class->locks_after));
5890 1 : WARN_ON_ONCE(!list_empty(&class->locks_before));
5891 1 : }
5892 :
5893 711 : static inline int within(const void *addr, void *start, unsigned long size)
5894 : {
5895 703 : return addr >= start && addr < start + size;
5896 : }
5897 :
5898 2 : static bool inside_selftest(void)
5899 : {
5900 2 : return current == lockdep_selftest_task_struct;
5901 : }
5902 :
5903 : /* The caller must hold the graph lock. */
5904 1 : static struct pending_free *get_pending_free(void)
5905 : {
5906 1 : return delayed_free.pf + delayed_free.index;
5907 : }
5908 :
5909 : static void free_zapped_rcu(struct rcu_head *cb);
5910 :
5911 : /*
5912 : * Schedule an RCU callback if no RCU callback is pending. Must be called with
5913 : * the graph lock held.
5914 : */
5915 2 : static void call_rcu_zapped(struct pending_free *pf)
5916 : {
5917 2 : WARN_ON_ONCE(inside_selftest());
5918 :
5919 2 : if (list_empty(&pf->zapped))
5920 : return;
5921 :
5922 1 : if (delayed_free.scheduled)
5923 : return;
5924 :
5925 1 : delayed_free.scheduled = true;
5926 :
5927 1 : WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
5928 1 : delayed_free.index ^= 1;
5929 :
5930 1 : call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5931 : }
5932 :
5933 : /* The caller must hold the graph lock. May be called from RCU context. */
5934 1 : static void __free_zapped_classes(struct pending_free *pf)
5935 : {
5936 1 : struct lock_class *class;
5937 :
5938 1 : check_data_structures();
5939 :
5940 2 : list_for_each_entry(class, &pf->zapped, lock_entry)
5941 1 : reinit_class(class);
5942 :
5943 1 : list_splice_init(&pf->zapped, &free_lock_classes);
5944 :
5945 : #ifdef CONFIG_PROVE_LOCKING
5946 2 : bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
5947 1 : pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
5948 1 : bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
5949 : #endif
5950 1 : }
5951 :
5952 1 : static void free_zapped_rcu(struct rcu_head *ch)
5953 : {
5954 1 : struct pending_free *pf;
5955 1 : unsigned long flags;
5956 :
5957 1 : if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
5958 : return;
5959 :
5960 1 : raw_local_irq_save(flags);
5961 1 : lockdep_lock();
5962 :
5963 : /* closed head */
5964 1 : pf = delayed_free.pf + (delayed_free.index ^ 1);
5965 1 : __free_zapped_classes(pf);
5966 1 : delayed_free.scheduled = false;
5967 :
5968 : /*
5969 : * If there's anything on the open list, close and start a new callback.
5970 : */
5971 1 : call_rcu_zapped(delayed_free.pf + delayed_free.index);
5972 :
5973 1 : lockdep_unlock();
5974 1 : raw_local_irq_restore(flags);
5975 : }
5976 :
5977 : /*
5978 : * Remove all lock classes from the class hash table and from the
5979 : * all_lock_classes list whose key or name is in the address range [start,
5980 : * start + size). Move these lock classes to the zapped_classes list. Must
5981 : * be called with the graph lock held.
5982 : */
5983 1 : static void __lockdep_free_key_range(struct pending_free *pf, void *start,
5984 : unsigned long size)
5985 : {
5986 1 : struct lock_class *class;
5987 1 : struct hlist_head *head;
5988 1 : int i;
5989 :
5990 : /* Unhash all classes that were created by a module. */
5991 4097 : for (i = 0; i < CLASSHASH_SIZE; i++) {
5992 4096 : head = classhash_table + i;
5993 8561 : hlist_for_each_entry_rcu(class, head, hash_entry) {
5994 356 : if (!within(class->key, start, size) &&
5995 355 : !within(class->name, start, size))
5996 355 : continue;
5997 1 : zap_class(pf, class);
5998 : }
5999 : }
6000 1 : }
6001 :
6002 : /*
6003 : * Used in module.c to remove lock classes from memory that is going to be
6004 : * freed; and possibly re-used by other modules.
6005 : *
6006 : * We will have had one synchronize_rcu() before getting here, so we're
6007 : * guaranteed nobody will look up these exact classes -- they're properly dead
6008 : * but still allocated.
6009 : */
6010 0 : static void lockdep_free_key_range_reg(void *start, unsigned long size)
6011 : {
6012 0 : struct pending_free *pf;
6013 0 : unsigned long flags;
6014 :
6015 0 : init_data_structures_once();
6016 :
6017 0 : raw_local_irq_save(flags);
6018 0 : lockdep_lock();
6019 0 : pf = get_pending_free();
6020 0 : __lockdep_free_key_range(pf, start, size);
6021 0 : call_rcu_zapped(pf);
6022 0 : lockdep_unlock();
6023 0 : raw_local_irq_restore(flags);
6024 :
6025 : /*
6026 : * Wait for any possible iterators from look_up_lock_class() to pass
6027 : * before continuing to free the memory they refer to.
6028 : */
6029 0 : synchronize_rcu();
6030 0 : }
6031 :
6032 : /*
6033 : * Free all lockdep keys in the range [start, start+size). Does not sleep.
6034 : * Ignores debug_locks. Must only be used by the lockdep selftests.
6035 : */
6036 0 : static void lockdep_free_key_range_imm(void *start, unsigned long size)
6037 : {
6038 0 : struct pending_free *pf = delayed_free.pf;
6039 0 : unsigned long flags;
6040 :
6041 0 : init_data_structures_once();
6042 :
6043 0 : raw_local_irq_save(flags);
6044 0 : lockdep_lock();
6045 0 : __lockdep_free_key_range(pf, start, size);
6046 0 : __free_zapped_classes(pf);
6047 0 : lockdep_unlock();
6048 0 : raw_local_irq_restore(flags);
6049 0 : }
6050 :
6051 0 : void lockdep_free_key_range(void *start, unsigned long size)
6052 : {
6053 0 : init_data_structures_once();
6054 :
6055 0 : if (inside_selftest())
6056 0 : lockdep_free_key_range_imm(start, size);
6057 : else
6058 0 : lockdep_free_key_range_reg(start, size);
6059 0 : }
6060 :
6061 : /*
6062 : * Check whether any element of the @lock->class_cache[] array refers to a
6063 : * registered lock class. The caller must hold either the graph lock or the
6064 : * RCU read lock.
6065 : */
6066 0 : static bool lock_class_cache_is_registered(struct lockdep_map *lock)
6067 : {
6068 0 : struct lock_class *class;
6069 0 : struct hlist_head *head;
6070 0 : int i, j;
6071 :
6072 0 : for (i = 0; i < CLASSHASH_SIZE; i++) {
6073 0 : head = classhash_table + i;
6074 0 : hlist_for_each_entry_rcu(class, head, hash_entry) {
6075 0 : for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
6076 0 : if (lock->class_cache[j] == class)
6077 : return true;
6078 : }
6079 : }
6080 : return false;
6081 : }
6082 :
6083 : /* The caller must hold the graph lock. Does not sleep. */
6084 0 : static void __lockdep_reset_lock(struct pending_free *pf,
6085 : struct lockdep_map *lock)
6086 : {
6087 0 : struct lock_class *class;
6088 0 : int j;
6089 :
6090 : /*
6091 : * Remove all classes this lock might have:
6092 : */
6093 0 : for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
6094 : /*
6095 : * If the class exists we look it up and zap it:
6096 : */
6097 0 : class = look_up_lock_class(lock, j);
6098 0 : if (class)
6099 0 : zap_class(pf, class);
6100 : }
6101 : /*
6102 : * Debug check: in the end all mapped classes should
6103 : * be gone.
6104 : */
6105 0 : if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
6106 0 : debug_locks_off();
6107 0 : }
6108 :
6109 : /*
6110 : * Remove all information lockdep has about a lock if debug_locks == 1. Free
6111 : * released data structures from RCU context.
6112 : */
6113 0 : static void lockdep_reset_lock_reg(struct lockdep_map *lock)
6114 : {
6115 0 : struct pending_free *pf;
6116 0 : unsigned long flags;
6117 0 : int locked;
6118 :
6119 0 : raw_local_irq_save(flags);
6120 0 : locked = graph_lock();
6121 0 : if (!locked)
6122 0 : goto out_irq;
6123 :
6124 0 : pf = get_pending_free();
6125 0 : __lockdep_reset_lock(pf, lock);
6126 0 : call_rcu_zapped(pf);
6127 :
6128 0 : graph_unlock();
6129 0 : out_irq:
6130 0 : raw_local_irq_restore(flags);
6131 0 : }
6132 :
6133 : /*
6134 : * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
6135 : * lockdep selftests.
6136 : */
6137 0 : static void lockdep_reset_lock_imm(struct lockdep_map *lock)
6138 : {
6139 0 : struct pending_free *pf = delayed_free.pf;
6140 0 : unsigned long flags;
6141 :
6142 0 : raw_local_irq_save(flags);
6143 0 : lockdep_lock();
6144 0 : __lockdep_reset_lock(pf, lock);
6145 0 : __free_zapped_classes(pf);
6146 0 : lockdep_unlock();
6147 0 : raw_local_irq_restore(flags);
6148 0 : }
6149 :
6150 0 : void lockdep_reset_lock(struct lockdep_map *lock)
6151 : {
6152 0 : init_data_structures_once();
6153 :
6154 0 : if (inside_selftest())
6155 0 : lockdep_reset_lock_imm(lock);
6156 : else
6157 0 : lockdep_reset_lock_reg(lock);
6158 0 : }
6159 :
6160 : /* Unregister a dynamically allocated key. */
6161 1 : void lockdep_unregister_key(struct lock_class_key *key)
6162 : {
6163 1 : struct hlist_head *hash_head = keyhashentry(key);
6164 1 : struct lock_class_key *k;
6165 1 : struct pending_free *pf;
6166 1 : unsigned long flags;
6167 1 : bool found = false;
6168 :
6169 1 : might_sleep();
6170 :
6171 1 : if (WARN_ON_ONCE(static_obj(key)))
6172 : return;
6173 :
6174 1 : raw_local_irq_save(flags);
6175 1 : if (!graph_lock())
6176 0 : goto out_irq;
6177 :
6178 1 : pf = get_pending_free();
6179 2 : hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
6180 1 : if (k == key) {
6181 1 : hlist_del_rcu(&k->hash_entry);
6182 1 : found = true;
6183 1 : break;
6184 : }
6185 : }
6186 1 : WARN_ON_ONCE(!found);
6187 1 : __lockdep_free_key_range(pf, key, 1);
6188 1 : call_rcu_zapped(pf);
6189 1 : graph_unlock();
6190 1 : out_irq:
6191 1 : raw_local_irq_restore(flags);
6192 :
6193 : /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
6194 1 : synchronize_rcu();
6195 : }
6196 : EXPORT_SYMBOL_GPL(lockdep_unregister_key);
6197 :
6198 1 : void __init lockdep_init(void)
6199 : {
6200 1 : printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
6201 :
6202 1 : printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
6203 1 : printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
6204 1 : printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
6205 1 : printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
6206 1 : printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
6207 1 : printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
6208 1 : printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
6209 :
6210 1 : printk(" memory used by lock dependency info: %zu kB\n",
6211 : (sizeof(lock_classes) +
6212 : sizeof(lock_classes_in_use) +
6213 : sizeof(classhash_table) +
6214 : sizeof(list_entries) +
6215 : sizeof(list_entries_in_use) +
6216 : sizeof(chainhash_table) +
6217 : sizeof(delayed_free)
6218 : #ifdef CONFIG_PROVE_LOCKING
6219 : + sizeof(lock_cq)
6220 : + sizeof(lock_chains)
6221 : + sizeof(lock_chains_in_use)
6222 : + sizeof(chain_hlocks)
6223 : #endif
6224 : ) / 1024
6225 : );
6226 :
6227 : #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
6228 1 : printk(" memory used for stack traces: %zu kB\n",
6229 : (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
6230 : );
6231 : #endif
6232 :
6233 1 : printk(" per task-struct memory footprint: %zu bytes\n",
6234 : sizeof(((struct task_struct *)NULL)->held_locks));
6235 1 : }
6236 :
6237 : static void
6238 0 : print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
6239 : const void *mem_to, struct held_lock *hlock)
6240 : {
6241 0 : if (!debug_locks_off())
6242 : return;
6243 0 : if (debug_locks_silent)
6244 : return;
6245 :
6246 0 : pr_warn("\n");
6247 0 : pr_warn("=========================\n");
6248 0 : pr_warn("WARNING: held lock freed!\n");
6249 0 : print_kernel_ident();
6250 0 : pr_warn("-------------------------\n");
6251 0 : pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
6252 : curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
6253 0 : print_lock(hlock);
6254 0 : lockdep_print_held_locks(curr);
6255 :
6256 0 : pr_warn("\nstack backtrace:\n");
6257 0 : dump_stack();
6258 : }
6259 :
6260 2379519 : static inline int not_in_range(const void* mem_from, unsigned long mem_len,
6261 : const void* lock_from, unsigned long lock_len)
6262 : {
6263 3781274 : return lock_from + lock_len <= mem_from ||
6264 1401755 : mem_from + mem_len <= lock_from;
6265 : }
6266 :
6267 : /*
6268 : * Called when kernel memory is freed (or unmapped), or if a lock
6269 : * is destroyed or reinitialized - this code checks whether there is
6270 : * any held lock in the memory range of <from> to <to>:
6271 : */
6272 2388185 : void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
6273 : {
6274 2388185 : struct task_struct *curr = current;
6275 2388185 : struct held_lock *hlock;
6276 2388185 : unsigned long flags;
6277 2388185 : int i;
6278 :
6279 2388185 : if (unlikely(!debug_locks))
6280 : return;
6281 :
6282 2388185 : raw_local_irq_save(flags);
6283 7158283 : for (i = 0; i < curr->lockdep_depth; i++) {
6284 2379519 : hlock = curr->held_locks + i;
6285 :
6286 2379519 : if (not_in_range(mem_from, mem_len, hlock->instance,
6287 : sizeof(*hlock->instance)))
6288 2379519 : continue;
6289 :
6290 0 : print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
6291 0 : break;
6292 : }
6293 2390155 : raw_local_irq_restore(flags);
6294 : }
6295 : EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
6296 :
6297 0 : static void print_held_locks_bug(void)
6298 : {
6299 0 : if (!debug_locks_off())
6300 : return;
6301 0 : if (debug_locks_silent)
6302 : return;
6303 :
6304 0 : pr_warn("\n");
6305 0 : pr_warn("====================================\n");
6306 0 : pr_warn("WARNING: %s/%d still has locks held!\n",
6307 : current->comm, task_pid_nr(current));
6308 0 : print_kernel_ident();
6309 0 : pr_warn("------------------------------------\n");
6310 0 : lockdep_print_held_locks(current);
6311 0 : pr_warn("\nstack backtrace:\n");
6312 0 : dump_stack();
6313 : }
6314 :
6315 948 : void debug_check_no_locks_held(void)
6316 : {
6317 948 : if (unlikely(current->lockdep_depth > 0))
6318 0 : print_held_locks_bug();
6319 948 : }
6320 : EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
6321 :
6322 : #ifdef __KERNEL__
6323 0 : void debug_show_all_locks(void)
6324 : {
6325 0 : struct task_struct *g, *p;
6326 :
6327 0 : if (unlikely(!debug_locks)) {
6328 0 : pr_warn("INFO: lockdep is turned off.\n");
6329 0 : return;
6330 : }
6331 0 : pr_warn("\nShowing all locks held in the system:\n");
6332 :
6333 0 : rcu_read_lock();
6334 0 : for_each_process_thread(g, p) {
6335 0 : if (!p->lockdep_depth)
6336 0 : continue;
6337 0 : lockdep_print_held_locks(p);
6338 0 : touch_nmi_watchdog();
6339 : touch_all_softlockup_watchdogs();
6340 : }
6341 0 : rcu_read_unlock();
6342 :
6343 0 : pr_warn("\n");
6344 0 : pr_warn("=============================================\n\n");
6345 : }
6346 : EXPORT_SYMBOL_GPL(debug_show_all_locks);
6347 : #endif
6348 :
6349 : /*
6350 : * Careful: only use this function if you are sure that
6351 : * the task cannot run in parallel!
6352 : */
6353 0 : void debug_show_held_locks(struct task_struct *task)
6354 : {
6355 0 : if (unlikely(!debug_locks)) {
6356 0 : printk("INFO: lockdep is turned off.\n");
6357 0 : return;
6358 : }
6359 0 : lockdep_print_held_locks(task);
6360 : }
6361 : EXPORT_SYMBOL_GPL(debug_show_held_locks);
6362 :
6363 431367 : asmlinkage __visible void lockdep_sys_exit(void)
6364 : {
6365 431367 : struct task_struct *curr = current;
6366 :
6367 431367 : if (unlikely(curr->lockdep_depth)) {
6368 0 : if (!debug_locks_off())
6369 : return;
6370 0 : pr_warn("\n");
6371 0 : pr_warn("================================================\n");
6372 0 : pr_warn("WARNING: lock held when returning to user space!\n");
6373 0 : print_kernel_ident();
6374 0 : pr_warn("------------------------------------------------\n");
6375 0 : pr_warn("%s/%d is leaving the kernel with locks still held!\n",
6376 : curr->comm, curr->pid);
6377 0 : lockdep_print_held_locks(curr);
6378 : }
6379 :
6380 : /*
6381 : * The lock history for each syscall should be independent. So wipe the
6382 : * slate clean on return to userspace.
6383 : */
6384 431367 : lockdep_invariant_state(false);
6385 : }
6386 :
6387 0 : void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
6388 : {
6389 0 : struct task_struct *curr = current;
6390 :
6391 : /* Note: the following can be executed concurrently, so be careful. */
6392 0 : pr_warn("\n");
6393 0 : pr_warn("=============================\n");
6394 0 : pr_warn("WARNING: suspicious RCU usage\n");
6395 0 : print_kernel_ident();
6396 0 : pr_warn("-----------------------------\n");
6397 0 : pr_warn("%s:%d %s!\n", file, line, s);
6398 0 : pr_warn("\nother info that might help us debug this:\n\n");
6399 0 : pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
6400 : !rcu_lockdep_current_cpu_online()
6401 : ? "RCU used illegally from offline CPU!\n"
6402 : : "",
6403 : rcu_scheduler_active, debug_locks);
6404 :
6405 : /*
6406 : * If a CPU is in the RCU-free window in idle (ie: in the section
6407 : * between rcu_idle_enter() and rcu_idle_exit(), then RCU
6408 : * considers that CPU to be in an "extended quiescent state",
6409 : * which means that RCU will be completely ignoring that CPU.
6410 : * Therefore, rcu_read_lock() and friends have absolutely no
6411 : * effect on a CPU running in that state. In other words, even if
6412 : * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
6413 : * delete data structures out from under it. RCU really has no
6414 : * choice here: we need to keep an RCU-free window in idle where
6415 : * the CPU may possibly enter into low power mode. This way we can
6416 : * notice an extended quiescent state to other CPUs that started a grace
6417 : * period. Otherwise we would delay any grace period as long as we run
6418 : * in the idle task.
6419 : *
6420 : * So complain bitterly if someone does call rcu_read_lock(),
6421 : * rcu_read_lock_bh() and so on from extended quiescent states.
6422 : */
6423 0 : if (!rcu_is_watching())
6424 0 : pr_warn("RCU used illegally from extended quiescent state!\n");
6425 :
6426 0 : lockdep_print_held_locks(curr);
6427 0 : pr_warn("\nstack backtrace:\n");
6428 0 : dump_stack();
6429 0 : }
6430 : EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
|