Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Runtime locking correctness validator
4 : *
5 : * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 : * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 : *
8 : * see Documentation/locking/lockdep-design.rst for more details.
9 : */
10 : #ifndef __LINUX_LOCKDEP_H
11 : #define __LINUX_LOCKDEP_H
12 :
13 : #include <linux/lockdep_types.h>
14 : #include <linux/smp.h>
15 : #include <asm/percpu.h>
16 :
17 : struct task_struct;
18 :
19 : /* for sysctl */
20 : extern int prove_locking;
21 : extern int lock_stat;
22 :
23 : #ifdef CONFIG_LOCKDEP
24 :
25 : #include <linux/linkage.h>
26 : #include <linux/list.h>
27 : #include <linux/debug_locks.h>
28 : #include <linux/stacktrace.h>
29 :
30 5793 : static inline void lockdep_copy_map(struct lockdep_map *to,
31 : struct lockdep_map *from)
32 : {
33 5793 : int i;
34 :
35 5793 : *to = *from;
36 : /*
37 : * Since the class cache can be modified concurrently we could observe
38 : * half pointers (64bit arch using 32bit copy insns). Therefore clear
39 : * the caches and take the performance hit.
40 : *
41 : * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
42 : * that relies on cache abuse.
43 : */
44 17385 : for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
45 11592 : to->class_cache[i] = NULL;
46 : }
47 :
48 : /*
49 : * Every lock has a list of other locks that were taken after it.
50 : * We only grow the list, never remove from it:
51 : */
52 : struct lock_list {
53 : struct list_head entry;
54 : struct lock_class *class;
55 : struct lock_class *links_to;
56 : const struct lock_trace *trace;
57 : u16 distance;
58 : /* bitmap of different dependencies from head to this */
59 : u8 dep;
60 : /* used by BFS to record whether "prev -> this" only has -(*R)-> */
61 : u8 only_xr;
62 :
63 : /*
64 : * The parent field is used to implement breadth-first search, and the
65 : * bit 0 is reused to indicate if the lock has been accessed in BFS.
66 : */
67 : struct lock_list *parent;
68 : };
69 :
70 : /**
71 : * struct lock_chain - lock dependency chain record
72 : *
73 : * @irq_context: the same as irq_context in held_lock below
74 : * @depth: the number of held locks in this chain
75 : * @base: the index in chain_hlocks for this chain
76 : * @entry: the collided lock chains in lock_chain hash list
77 : * @chain_key: the hash key of this lock_chain
78 : */
79 : struct lock_chain {
80 : /* see BUILD_BUG_ON()s in add_chain_cache() */
81 : unsigned int irq_context : 2,
82 : depth : 6,
83 : base : 24;
84 : /* 4 byte hole */
85 : struct hlist_node entry;
86 : u64 chain_key;
87 : };
88 :
89 : #define MAX_LOCKDEP_KEYS_BITS 13
90 : #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
91 : #define INITIAL_CHAIN_KEY -1
92 :
93 : struct held_lock {
94 : /*
95 : * One-way hash of the dependency chain up to this point. We
96 : * hash the hashes step by step as the dependency chain grows.
97 : *
98 : * We use it for dependency-caching and we skip detection
99 : * passes and dependency-updates if there is a cache-hit, so
100 : * it is absolutely critical for 100% coverage of the validator
101 : * to have a unique key value for every unique dependency path
102 : * that can occur in the system, to make a unique hash value
103 : * as likely as possible - hence the 64-bit width.
104 : *
105 : * The task struct holds the current hash value (initialized
106 : * with zero), here we store the previous hash value:
107 : */
108 : u64 prev_chain_key;
109 : unsigned long acquire_ip;
110 : struct lockdep_map *instance;
111 : struct lockdep_map *nest_lock;
112 : #ifdef CONFIG_LOCK_STAT
113 : u64 waittime_stamp;
114 : u64 holdtime_stamp;
115 : #endif
116 : /*
117 : * class_idx is zero-indexed; it points to the element in
118 : * lock_classes this held lock instance belongs to. class_idx is in
119 : * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
120 : */
121 : unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
122 : /*
123 : * The lock-stack is unified in that the lock chains of interrupt
124 : * contexts nest ontop of process context chains, but we 'separate'
125 : * the hashes by starting with 0 if we cross into an interrupt
126 : * context, and we also keep do not add cross-context lock
127 : * dependencies - the lock usage graph walking covers that area
128 : * anyway, and we'd just unnecessarily increase the number of
129 : * dependencies otherwise. [Note: hardirq and softirq contexts
130 : * are separated from each other too.]
131 : *
132 : * The following field is used to detect when we cross into an
133 : * interrupt context:
134 : */
135 : unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
136 : unsigned int trylock:1; /* 16 bits */
137 :
138 : unsigned int read:2; /* see lock_acquire() comment */
139 : unsigned int check:1; /* see lock_acquire() comment */
140 : unsigned int hardirqs_off:1;
141 : unsigned int references:12; /* 32 bits */
142 : unsigned int pin_count;
143 : };
144 :
145 : /*
146 : * Initialization, self-test and debugging-output methods:
147 : */
148 : extern void lockdep_init(void);
149 : extern void lockdep_reset(void);
150 : extern void lockdep_reset_lock(struct lockdep_map *lock);
151 : extern void lockdep_free_key_range(void *start, unsigned long size);
152 : extern asmlinkage void lockdep_sys_exit(void);
153 : extern void lockdep_set_selftest_task(struct task_struct *task);
154 :
155 : extern void lockdep_init_task(struct task_struct *task);
156 :
157 : /*
158 : * Split the recrursion counter in two to readily detect 'off' vs recursion.
159 : */
160 : #define LOCKDEP_RECURSION_BITS 16
161 : #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
162 : #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
163 :
164 : /*
165 : * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166 : * to header dependencies.
167 : */
168 :
169 : #define lockdep_off() \
170 : do { \
171 : current->lockdep_recursion += LOCKDEP_OFF; \
172 : } while (0)
173 :
174 : #define lockdep_on() \
175 : do { \
176 : current->lockdep_recursion -= LOCKDEP_OFF; \
177 : } while (0)
178 :
179 : extern void lockdep_register_key(struct lock_class_key *key);
180 : extern void lockdep_unregister_key(struct lock_class_key *key);
181 :
182 : /*
183 : * These methods are used by specific locking variants (spinlocks,
184 : * rwlocks, mutexes and rwsems) to pass init/acquire/release events
185 : * to lockdep:
186 : */
187 :
188 : extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
189 : struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
190 :
191 : static inline void
192 1368159 : lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
193 : struct lock_class_key *key, int subclass, u8 inner, u8 outer)
194 : {
195 1364399 : lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
196 2492 : }
197 :
198 : static inline void
199 1273610 : lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
200 : struct lock_class_key *key, int subclass, u8 inner)
201 : {
202 1326549 : lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
203 : }
204 :
205 54155 : static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
206 : struct lock_class_key *key, int subclass)
207 : {
208 54155 : lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
209 846 : }
210 :
211 : /*
212 : * Reinitialize a lock key - for cases where there is special locking or
213 : * special initialization of locks so that the validator gets the scope
214 : * of dependencies wrong: they are either too broad (they need a class-split)
215 : * or they are too narrow (they suffer from a false class-split):
216 : */
217 : #define lockdep_set_class(lock, key) \
218 : lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
219 : (lock)->dep_map.wait_type_inner, \
220 : (lock)->dep_map.wait_type_outer)
221 :
222 : #define lockdep_set_class_and_name(lock, key, name) \
223 : lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
224 : (lock)->dep_map.wait_type_inner, \
225 : (lock)->dep_map.wait_type_outer)
226 :
227 : #define lockdep_set_class_and_subclass(lock, key, sub) \
228 : lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
229 : (lock)->dep_map.wait_type_inner, \
230 : (lock)->dep_map.wait_type_outer)
231 :
232 : #define lockdep_set_subclass(lock, sub) \
233 : lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
234 : (lock)->dep_map.wait_type_inner, \
235 : (lock)->dep_map.wait_type_outer)
236 :
237 : #define lockdep_set_novalidate_class(lock) \
238 : lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
239 :
240 : /*
241 : * Compare locking classes
242 : */
243 : #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
244 :
245 1615 : static inline int lockdep_match_key(struct lockdep_map *lock,
246 : struct lock_class_key *key)
247 : {
248 1615 : return lock->key == key;
249 : }
250 :
251 : /*
252 : * Acquire a lock.
253 : *
254 : * Values for "read":
255 : *
256 : * 0: exclusive (write) acquire
257 : * 1: read-acquire (no recursion allowed)
258 : * 2: read-acquire with same-instance recursion allowed
259 : *
260 : * Values for check:
261 : *
262 : * 0: simple checks (freeing, held-at-exit-time, etc.)
263 : * 1: full validation
264 : */
265 : extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
266 : int trylock, int read, int check,
267 : struct lockdep_map *nest_lock, unsigned long ip);
268 :
269 : extern void lock_release(struct lockdep_map *lock, unsigned long ip);
270 :
271 : /*
272 : * Same "read" as for lock_acquire(), except -1 means any.
273 : */
274 : extern int lock_is_held_type(const struct lockdep_map *lock, int read);
275 :
276 47415989 : static inline int lock_is_held(const struct lockdep_map *lock)
277 : {
278 47423958 : return lock_is_held_type(lock, -1);
279 : }
280 :
281 : #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
282 : #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
283 :
284 : extern void lock_set_class(struct lockdep_map *lock, const char *name,
285 : struct lock_class_key *key, unsigned int subclass,
286 : unsigned long ip);
287 :
288 0 : static inline void lock_set_subclass(struct lockdep_map *lock,
289 : unsigned int subclass, unsigned long ip)
290 : {
291 0 : lock_set_class(lock, lock->name, lock->key, subclass, ip);
292 : }
293 :
294 : extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
295 :
296 : #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
297 :
298 : extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
299 : extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
300 : extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
301 :
302 : #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
303 :
304 : #define lockdep_assert_held(l) do { \
305 : WARN_ON(debug_locks && !lockdep_is_held(l)); \
306 : } while (0)
307 :
308 : #define lockdep_assert_held_write(l) do { \
309 : WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
310 : } while (0)
311 :
312 : #define lockdep_assert_held_read(l) do { \
313 : WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
314 : } while (0)
315 :
316 : #define lockdep_assert_held_once(l) do { \
317 : WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
318 : } while (0)
319 :
320 : #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
321 :
322 : #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
323 : #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
324 : #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
325 :
326 : #else /* !CONFIG_LOCKDEP */
327 :
328 : static inline void lockdep_init_task(struct task_struct *task)
329 : {
330 : }
331 :
332 : static inline void lockdep_off(void)
333 : {
334 : }
335 :
336 : static inline void lockdep_on(void)
337 : {
338 : }
339 :
340 : static inline void lockdep_set_selftest_task(struct task_struct *task)
341 : {
342 : }
343 :
344 : # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
345 : # define lock_release(l, i) do { } while (0)
346 : # define lock_downgrade(l, i) do { } while (0)
347 : # define lock_set_class(l, n, k, s, i) do { } while (0)
348 : # define lock_set_subclass(l, s, i) do { } while (0)
349 : # define lockdep_init() do { } while (0)
350 : # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
351 : do { (void)(name); (void)(key); } while (0)
352 : # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
353 : do { (void)(name); (void)(key); } while (0)
354 : # define lockdep_init_map_wait(lock, name, key, sub, inner) \
355 : do { (void)(name); (void)(key); } while (0)
356 : # define lockdep_init_map(lock, name, key, sub) \
357 : do { (void)(name); (void)(key); } while (0)
358 : # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
359 : # define lockdep_set_class_and_name(lock, key, name) \
360 : do { (void)(key); (void)(name); } while (0)
361 : #define lockdep_set_class_and_subclass(lock, key, sub) \
362 : do { (void)(key); } while (0)
363 : #define lockdep_set_subclass(lock, sub) do { } while (0)
364 :
365 : #define lockdep_set_novalidate_class(lock) do { } while (0)
366 :
367 : /*
368 : * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
369 : * case since the result is not well defined and the caller should rather
370 : * #ifdef the call himself.
371 : */
372 :
373 : # define lockdep_reset() do { debug_locks = 1; } while (0)
374 : # define lockdep_free_key_range(start, size) do { } while (0)
375 : # define lockdep_sys_exit() do { } while (0)
376 :
377 : static inline void lockdep_register_key(struct lock_class_key *key)
378 : {
379 : }
380 :
381 : static inline void lockdep_unregister_key(struct lock_class_key *key)
382 : {
383 : }
384 :
385 : #define lockdep_depth(tsk) (0)
386 :
387 : /*
388 : * Dummy forward declarations, allow users to write less ifdef-y code
389 : * and depend on dead code elimination.
390 : */
391 : extern int lock_is_held(const void *);
392 : extern int lockdep_is_held(const void *);
393 : #define lockdep_is_held_type(l, r) (1)
394 :
395 : #define lockdep_assert_held(l) do { (void)(l); } while (0)
396 : #define lockdep_assert_held_write(l) do { (void)(l); } while (0)
397 : #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
398 : #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
399 :
400 : #define lockdep_recursing(tsk) (0)
401 :
402 : #define NIL_COOKIE (struct pin_cookie){ }
403 :
404 : #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
405 : #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
406 : #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
407 :
408 : #endif /* !LOCKDEP */
409 :
410 : enum xhlock_context_t {
411 : XHLOCK_HARD,
412 : XHLOCK_SOFT,
413 : XHLOCK_CTX_NR,
414 : };
415 :
416 : #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
417 : /*
418 : * To initialize a lockdep_map statically use this macro.
419 : * Note that _name must not be NULL.
420 : */
421 : #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
422 : { .name = (_name), .key = (void *)(_key), }
423 :
424 1906 : static inline void lockdep_invariant_state(bool force) {}
425 0 : static inline void lockdep_free_task(struct task_struct *task) {}
426 :
427 : #ifdef CONFIG_LOCK_STAT
428 :
429 : extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
430 : extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
431 :
432 : #define LOCK_CONTENDED(_lock, try, lock) \
433 : do { \
434 : if (!try(_lock)) { \
435 : lock_contended(&(_lock)->dep_map, _RET_IP_); \
436 : lock(_lock); \
437 : } \
438 : lock_acquired(&(_lock)->dep_map, _RET_IP_); \
439 : } while (0)
440 :
441 : #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
442 : ({ \
443 : int ____err = 0; \
444 : if (!try(_lock)) { \
445 : lock_contended(&(_lock)->dep_map, _RET_IP_); \
446 : ____err = lock(_lock); \
447 : } \
448 : if (!____err) \
449 : lock_acquired(&(_lock)->dep_map, _RET_IP_); \
450 : ____err; \
451 : })
452 :
453 : #else /* CONFIG_LOCK_STAT */
454 :
455 : #define lock_contended(lockdep_map, ip) do {} while (0)
456 : #define lock_acquired(lockdep_map, ip) do {} while (0)
457 :
458 : #define LOCK_CONTENDED(_lock, try, lock) \
459 : lock(_lock)
460 :
461 : #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
462 : lock(_lock)
463 :
464 : #endif /* CONFIG_LOCK_STAT */
465 :
466 : #ifdef CONFIG_LOCKDEP
467 :
468 : /*
469 : * On lockdep we dont want the hand-coded irq-enable of
470 : * _raw_*_lock_flags() code, because lockdep assumes
471 : * that interrupts are not re-enabled during lock-acquire:
472 : */
473 : #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
474 : LOCK_CONTENDED((_lock), (try), (lock))
475 :
476 : #else /* CONFIG_LOCKDEP */
477 :
478 : #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
479 : lockfl((_lock), (flags))
480 :
481 : #endif /* CONFIG_LOCKDEP */
482 :
483 : #ifdef CONFIG_PROVE_LOCKING
484 : extern void print_irqtrace_events(struct task_struct *curr);
485 : #else
486 : static inline void print_irqtrace_events(struct task_struct *curr)
487 : {
488 : }
489 : #endif
490 :
491 : /* Variable used to make lockdep treat read_lock() as recursive in selftests */
492 : #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
493 : extern unsigned int force_read_lock_recursive;
494 : #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
495 : #define force_read_lock_recursive 0
496 : #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
497 :
498 : #ifdef CONFIG_LOCKDEP
499 : extern bool read_lock_is_recursive(void);
500 : #else /* CONFIG_LOCKDEP */
501 : /* If !LOCKDEP, the value is meaningless */
502 : #define read_lock_is_recursive() 0
503 : #endif
504 :
505 : /*
506 : * For trivial one-depth nesting of a lock-class, the following
507 : * global define can be used. (Subsystems with multiple levels
508 : * of nesting should define their own lock-nesting subclasses.)
509 : */
510 : #define SINGLE_DEPTH_NESTING 1
511 :
512 : /*
513 : * Map the dependency ops to NOP or to real lockdep ops, depending
514 : * on the per lock-class debug mode:
515 : */
516 :
517 : #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
518 : #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
519 : #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
520 :
521 : #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
522 : #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
523 : #define spin_release(l, i) lock_release(l, i)
524 :
525 : #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
526 : #define rwlock_acquire_read(l, s, t, i) \
527 : do { \
528 : if (read_lock_is_recursive()) \
529 : lock_acquire_shared_recursive(l, s, t, NULL, i); \
530 : else \
531 : lock_acquire_shared(l, s, t, NULL, i); \
532 : } while (0)
533 :
534 : #define rwlock_release(l, i) lock_release(l, i)
535 :
536 : #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
537 : #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
538 : #define seqcount_release(l, i) lock_release(l, i)
539 :
540 : #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
541 : #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
542 : #define mutex_release(l, i) lock_release(l, i)
543 :
544 : #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
545 : #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
546 : #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
547 : #define rwsem_release(l, i) lock_release(l, i)
548 :
549 : #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
550 : #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
551 : #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
552 : #define lock_map_release(l) lock_release(l, _THIS_IP_)
553 :
554 : #ifdef CONFIG_PROVE_LOCKING
555 : # define might_lock(lock) \
556 : do { \
557 : typecheck(struct lockdep_map *, &(lock)->dep_map); \
558 : lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
559 : lock_release(&(lock)->dep_map, _THIS_IP_); \
560 : } while (0)
561 : # define might_lock_read(lock) \
562 : do { \
563 : typecheck(struct lockdep_map *, &(lock)->dep_map); \
564 : lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
565 : lock_release(&(lock)->dep_map, _THIS_IP_); \
566 : } while (0)
567 : # define might_lock_nested(lock, subclass) \
568 : do { \
569 : typecheck(struct lockdep_map *, &(lock)->dep_map); \
570 : lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
571 : _THIS_IP_); \
572 : lock_release(&(lock)->dep_map, _THIS_IP_); \
573 : } while (0)
574 :
575 : DECLARE_PER_CPU(int, hardirqs_enabled);
576 : DECLARE_PER_CPU(int, hardirq_context);
577 : DECLARE_PER_CPU(unsigned int, lockdep_recursion);
578 :
579 : #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
580 :
581 : #define lockdep_assert_irqs_enabled() \
582 : do { \
583 : WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
584 : } while (0)
585 :
586 : #define lockdep_assert_irqs_disabled() \
587 : do { \
588 : WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
589 : } while (0)
590 :
591 : #define lockdep_assert_in_irq() \
592 : do { \
593 : WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
594 : } while (0)
595 :
596 : #define lockdep_assert_preemption_enabled() \
597 : do { \
598 : WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
599 : __lockdep_enabled && \
600 : (preempt_count() != 0 || \
601 : !this_cpu_read(hardirqs_enabled))); \
602 : } while (0)
603 :
604 : #define lockdep_assert_preemption_disabled() \
605 : do { \
606 : WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
607 : __lockdep_enabled && \
608 : (preempt_count() == 0 && \
609 : this_cpu_read(hardirqs_enabled))); \
610 : } while (0)
611 :
612 : /*
613 : * Acceptable for protecting per-CPU resources accessed from BH.
614 : * Much like in_softirq() - semantics are ambiguous, use carefully.
615 : */
616 : #define lockdep_assert_in_softirq() \
617 : do { \
618 : WARN_ON_ONCE(__lockdep_enabled && \
619 : (!in_softirq() || in_irq() || in_nmi())); \
620 : } while (0)
621 :
622 : #else
623 : # define might_lock(lock) do { } while (0)
624 : # define might_lock_read(lock) do { } while (0)
625 : # define might_lock_nested(lock, subclass) do { } while (0)
626 :
627 : # define lockdep_assert_irqs_enabled() do { } while (0)
628 : # define lockdep_assert_irqs_disabled() do { } while (0)
629 : # define lockdep_assert_in_irq() do { } while (0)
630 :
631 : # define lockdep_assert_preemption_enabled() do { } while (0)
632 : # define lockdep_assert_preemption_disabled() do { } while (0)
633 : # define lockdep_assert_in_softirq() do { } while (0)
634 : #endif
635 :
636 : #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
637 :
638 : # define lockdep_assert_RT_in_threaded_ctx() do { \
639 : WARN_ONCE(debug_locks && !current->lockdep_recursion && \
640 : lockdep_hardirq_context() && \
641 : !(current->hardirq_threaded || current->irq_config), \
642 : "Not in threaded context on PREEMPT_RT as expected\n"); \
643 : } while (0)
644 :
645 : #else
646 :
647 : # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
648 :
649 : #endif
650 :
651 : #ifdef CONFIG_LOCKDEP
652 : void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
653 : #else
654 : static inline void
655 : lockdep_rcu_suspicious(const char *file, const int line, const char *s)
656 : {
657 : }
658 : #endif
659 :
660 : #endif /* __LINUX_LOCKDEP_H */
|