Line data Source code
1 : /*
2 : * Generic infrastructure for lifetime debugging of objects.
3 : *
4 : * Started by Thomas Gleixner
5 : *
6 : * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7 : *
8 : * For licencing details see kernel-base/COPYING
9 : */
10 :
11 : #define pr_fmt(fmt) "ODEBUG: " fmt
12 :
13 : #include <linux/debugobjects.h>
14 : #include <linux/interrupt.h>
15 : #include <linux/sched.h>
16 : #include <linux/sched/task_stack.h>
17 : #include <linux/seq_file.h>
18 : #include <linux/debugfs.h>
19 : #include <linux/slab.h>
20 : #include <linux/hash.h>
21 : #include <linux/kmemleak.h>
22 : #include <linux/cpu.h>
23 :
24 : #define ODEBUG_HASH_BITS 14
25 : #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
26 :
27 : #define ODEBUG_POOL_SIZE 1024
28 : #define ODEBUG_POOL_MIN_LEVEL 256
29 : #define ODEBUG_POOL_PERCPU_SIZE 64
30 : #define ODEBUG_BATCH_SIZE 16
31 :
32 : #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
33 : #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
34 : #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
35 :
36 : /*
37 : * We limit the freeing of debug objects via workqueue at a maximum
38 : * frequency of 10Hz and about 1024 objects for each freeing operation.
39 : * So it is freeing at most 10k debug objects per second.
40 : */
41 : #define ODEBUG_FREE_WORK_MAX 1024
42 : #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
43 :
44 : struct debug_bucket {
45 : struct hlist_head list;
46 : raw_spinlock_t lock;
47 : };
48 :
49 : /*
50 : * Debug object percpu free list
51 : * Access is protected by disabling irq
52 : */
53 : struct debug_percpu_free {
54 : struct hlist_head free_objs;
55 : int obj_free;
56 : };
57 :
58 : static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59 :
60 : static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
61 :
62 : static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63 :
64 : static DEFINE_RAW_SPINLOCK(pool_lock);
65 :
66 : static HLIST_HEAD(obj_pool);
67 : static HLIST_HEAD(obj_to_free);
68 :
69 : /*
70 : * Because of the presence of percpu free pools, obj_pool_free will
71 : * under-count those in the percpu free pools. Similarly, obj_pool_used
72 : * will over-count those in the percpu free pools. Adjustments will be
73 : * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74 : * can be off.
75 : */
76 : static int obj_pool_min_free = ODEBUG_POOL_SIZE;
77 : static int obj_pool_free = ODEBUG_POOL_SIZE;
78 : static int obj_pool_used;
79 : static int obj_pool_max_used;
80 : static bool obj_freeing;
81 : /* The number of objs on the global free list */
82 : static int obj_nr_tofree;
83 :
84 : static int debug_objects_maxchain __read_mostly;
85 : static int __maybe_unused debug_objects_maxchecked __read_mostly;
86 : static int debug_objects_fixups __read_mostly;
87 : static int debug_objects_warnings __read_mostly;
88 : static int debug_objects_enabled __read_mostly
89 : = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 : static int debug_objects_pool_size __read_mostly
91 : = ODEBUG_POOL_SIZE;
92 : static int debug_objects_pool_min_level __read_mostly
93 : = ODEBUG_POOL_MIN_LEVEL;
94 : static const struct debug_obj_descr *descr_test __read_mostly;
95 : static struct kmem_cache *obj_cache __read_mostly;
96 :
97 : /*
98 : * Track numbers of kmem_cache_alloc()/free() calls done.
99 : */
100 : static int debug_objects_allocated;
101 : static int debug_objects_freed;
102 :
103 : static void free_obj_work(struct work_struct *work);
104 : static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105 :
106 0 : static int __init enable_object_debug(char *str)
107 : {
108 0 : debug_objects_enabled = 1;
109 0 : return 0;
110 : }
111 :
112 0 : static int __init disable_object_debug(char *str)
113 : {
114 0 : debug_objects_enabled = 0;
115 0 : return 0;
116 : }
117 :
118 : early_param("debug_objects", enable_object_debug);
119 : early_param("no_debug_objects", disable_object_debug);
120 :
121 : static const char *obj_states[ODEBUG_STATE_MAX] = {
122 : [ODEBUG_STATE_NONE] = "none",
123 : [ODEBUG_STATE_INIT] = "initialized",
124 : [ODEBUG_STATE_INACTIVE] = "inactive",
125 : [ODEBUG_STATE_ACTIVE] = "active",
126 : [ODEBUG_STATE_DESTROYED] = "destroyed",
127 : [ODEBUG_STATE_NOTAVAILABLE] = "not available",
128 : };
129 :
130 1127958 : static void fill_pool(void)
131 : {
132 1127958 : gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
133 1127958 : struct debug_obj *obj;
134 1127958 : unsigned long flags;
135 :
136 1127958 : if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137 : return;
138 :
139 : /*
140 : * Reuse objs from the global free list; they will be reinitialized
141 : * when allocating.
142 : *
143 : * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144 : * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145 : * sections.
146 : */
147 2656 : while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148 0 : raw_spin_lock_irqsave(&pool_lock, flags);
149 : /*
150 : * Recheck with the lock held as the worker thread might have
151 : * won the race and freed the global free list already.
152 : */
153 0 : while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154 0 : obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155 0 : hlist_del(&obj->node);
156 0 : WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157 0 : hlist_add_head(&obj->node, &obj_pool);
158 0 : WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159 : }
160 3814 : raw_spin_unlock_irqrestore(&pool_lock, flags);
161 : }
162 :
163 3815 : if (unlikely(!obj_cache))
164 : return;
165 :
166 7796 : while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167 : struct debug_obj *new[ODEBUG_BATCH_SIZE];
168 : int cnt;
169 :
170 67585 : for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171 63606 : new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172 63606 : if (!new[cnt])
173 : break;
174 : }
175 3979 : if (!cnt)
176 0 : return;
177 :
178 3979 : raw_spin_lock_irqsave(&pool_lock, flags);
179 3979 : while (cnt) {
180 63696 : hlist_add_head(&new[--cnt]->node, &obj_pool);
181 63696 : debug_objects_allocated++;
182 67677 : WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183 : }
184 3981 : raw_spin_unlock_irqrestore(&pool_lock, flags);
185 : }
186 : }
187 :
188 : /*
189 : * Lookup an object in the hash bucket.
190 : */
191 6737135 : static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192 : {
193 6737135 : struct debug_obj *obj;
194 6737135 : int cnt = 0;
195 :
196 29260224 : hlist_for_each_entry(obj, &b->list, node) {
197 12380792 : cnt++;
198 12380792 : if (obj->object == addr)
199 4487815 : return obj;
200 : }
201 2249320 : if (cnt > debug_objects_maxchain)
202 55 : debug_objects_maxchain = cnt;
203 :
204 : return NULL;
205 : }
206 :
207 : /*
208 : * Allocate a new object from the hlist
209 : */
210 2668065 : static struct debug_obj *__alloc_object(struct hlist_head *list)
211 : {
212 2668065 : struct debug_obj *obj = NULL;
213 :
214 2668065 : if (list->first) {
215 2623637 : obj = hlist_entry(list->first, typeof(*obj), node);
216 2623637 : hlist_del(&obj->node);
217 : }
218 :
219 2668065 : return obj;
220 : }
221 :
222 : /*
223 : * Allocate a new object. If the pool is empty, switch off the debugger.
224 : * Must be called with interrupts disabled.
225 : */
226 : static struct debug_obj *
227 1128374 : alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
228 : {
229 1128374 : struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
230 1128424 : struct debug_obj *obj;
231 :
232 1128424 : if (likely(obj_cache)) {
233 1128423 : obj = __alloc_object(&percpu_pool->free_objs);
234 1128350 : if (obj) {
235 1083922 : percpu_pool->obj_free--;
236 1083922 : goto init_obj;
237 : }
238 : }
239 :
240 44429 : raw_spin_lock(&pool_lock);
241 44429 : obj = __alloc_object(&obj_pool);
242 44429 : if (obj) {
243 44429 : obj_pool_used++;
244 44429 : WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
245 :
246 : /*
247 : * Looking ahead, allocate one batch of debug objects and
248 : * put them into the percpu free pool.
249 : */
250 44429 : if (likely(obj_cache)) {
251 : int i;
252 :
253 755276 : for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
254 710848 : struct debug_obj *obj2;
255 :
256 710848 : obj2 = __alloc_object(&obj_pool);
257 710848 : if (!obj2)
258 : break;
259 710848 : hlist_add_head(&obj2->node,
260 : &percpu_pool->free_objs);
261 710848 : percpu_pool->obj_free++;
262 710848 : obj_pool_used++;
263 710848 : WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
264 : }
265 : }
266 :
267 44429 : if (obj_pool_used > obj_pool_max_used)
268 583 : obj_pool_max_used = obj_pool_used;
269 :
270 44429 : if (obj_pool_free < obj_pool_min_free)
271 50 : obj_pool_min_free = obj_pool_free;
272 : }
273 44429 : raw_spin_unlock(&pool_lock);
274 :
275 1128393 : init_obj:
276 1128393 : if (obj) {
277 1128393 : obj->object = addr;
278 1128393 : obj->descr = descr;
279 1128393 : obj->state = ODEBUG_STATE_NONE;
280 1128393 : obj->astate = 0;
281 1128393 : hlist_add_head(&obj->node, &b->list);
282 : }
283 1128393 : return obj;
284 : }
285 :
286 : /*
287 : * workqueue function to free objects.
288 : *
289 : * To reduce contention on the global pool_lock, the actual freeing of
290 : * debug objects will be delayed if the pool_lock is busy.
291 : */
292 594 : static void free_obj_work(struct work_struct *work)
293 : {
294 594 : struct hlist_node *tmp;
295 594 : struct debug_obj *obj;
296 594 : unsigned long flags;
297 594 : HLIST_HEAD(tofree);
298 :
299 594 : WRITE_ONCE(obj_freeing, false);
300 1188 : if (!raw_spin_trylock_irqsave(&pool_lock, flags))
301 532 : return;
302 :
303 593 : if (obj_pool_free >= debug_objects_pool_size)
304 62 : goto free_objs;
305 :
306 : /*
307 : * The objs on the pool list might be allocated before the work is
308 : * run, so recheck if pool list it full or not, if not fill pool
309 : * list from the global free list. As it is likely that a workload
310 : * may be gearing up to use more and more objects, don't free any
311 : * of them until the next round.
312 : */
313 110637 : while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
314 110245 : obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
315 110245 : hlist_del(&obj->node);
316 110245 : hlist_add_head(&obj->node, &obj_pool);
317 110245 : WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
318 110776 : WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
319 : }
320 531 : raw_spin_unlock_irqrestore(&pool_lock, flags);
321 531 : return;
322 :
323 62 : free_objs:
324 : /*
325 : * Pool list is already full and there are still objs on the free
326 : * list. Move remaining free objs to a temporary list to free the
327 : * memory outside the pool_lock held region.
328 : */
329 62 : if (obj_nr_tofree) {
330 62 : hlist_move_list(&obj_to_free, &tofree);
331 62 : debug_objects_freed += obj_nr_tofree;
332 62 : WRITE_ONCE(obj_nr_tofree, 0);
333 : }
334 62 : raw_spin_unlock_irqrestore(&pool_lock, flags);
335 :
336 56879 : hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
337 56755 : hlist_del(&obj->node);
338 56755 : kmem_cache_free(obj_cache, obj);
339 : }
340 : }
341 :
342 1119328 : static void __free_object(struct debug_obj *obj)
343 : {
344 1119328 : struct debug_obj *objs[ODEBUG_BATCH_SIZE];
345 1119328 : struct debug_percpu_free *percpu_pool;
346 1119328 : int lookahead_count = 0;
347 1119328 : unsigned long flags;
348 1119328 : bool work;
349 :
350 2239358 : local_irq_save(flags);
351 1120030 : if (!obj_cache)
352 0 : goto free_to_obj_pool;
353 :
354 : /*
355 : * Try to free it into the percpu pool first.
356 : */
357 1120030 : percpu_pool = this_cpu_ptr(&percpu_obj_pool);
358 1120190 : if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
359 1076690 : hlist_add_head(&obj->node, &percpu_pool->free_objs);
360 1076690 : percpu_pool->obj_free++;
361 1076690 : local_irq_restore(flags);
362 1076740 : return;
363 : }
364 :
365 : /*
366 : * As the percpu pool is full, look ahead and pull out a batch
367 : * of objects from the percpu pool and free them as well.
368 : */
369 747774 : for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
370 703745 : objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
371 704274 : if (!objs[lookahead_count])
372 : break;
373 704274 : percpu_pool->obj_free--;
374 : }
375 :
376 44029 : free_to_obj_pool:
377 44029 : raw_spin_lock(&pool_lock);
378 44030 : work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
379 9674 : (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
380 44030 : obj_pool_used--;
381 :
382 44030 : if (work) {
383 5100 : WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
384 5100 : hlist_add_head(&obj->node, &obj_to_free);
385 5100 : if (lookahead_count) {
386 5100 : WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
387 5100 : obj_pool_used -= lookahead_count;
388 5100 : while (lookahead_count) {
389 86700 : hlist_add_head(&objs[--lookahead_count]->node,
390 : &obj_to_free);
391 : }
392 : }
393 :
394 5100 : if ((obj_pool_free > debug_objects_pool_size) &&
395 5100 : (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
396 : int i;
397 :
398 : /*
399 : * Free one more batch of objects from obj_pool.
400 : */
401 85612 : for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
402 80576 : obj = __alloc_object(&obj_pool);
403 80576 : hlist_add_head(&obj->node, &obj_to_free);
404 80576 : WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
405 80576 : WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
406 : }
407 : }
408 : } else {
409 38930 : WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
410 38930 : hlist_add_head(&obj->node, &obj_pool);
411 38930 : if (lookahead_count) {
412 38930 : WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
413 38930 : obj_pool_used -= lookahead_count;
414 38930 : while (lookahead_count) {
415 661810 : hlist_add_head(&objs[--lookahead_count]->node,
416 : &obj_pool);
417 : }
418 : }
419 : }
420 44030 : raw_spin_unlock(&pool_lock);
421 44031 : local_irq_restore(flags);
422 : }
423 :
424 : /*
425 : * Put the object back into the pool and schedule work to free objects
426 : * if necessary.
427 : */
428 241 : static void free_object(struct debug_obj *obj)
429 : {
430 241 : __free_object(obj);
431 241 : if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
432 0 : WRITE_ONCE(obj_freeing, true);
433 0 : schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
434 : }
435 241 : }
436 :
437 : #ifdef CONFIG_HOTPLUG_CPU
438 0 : static int object_cpu_offline(unsigned int cpu)
439 : {
440 0 : struct debug_percpu_free *percpu_pool;
441 0 : struct hlist_node *tmp;
442 0 : struct debug_obj *obj;
443 :
444 : /* Remote access is safe as the CPU is dead already */
445 0 : percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
446 0 : hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
447 0 : hlist_del(&obj->node);
448 0 : kmem_cache_free(obj_cache, obj);
449 : }
450 0 : percpu_pool->obj_free = 0;
451 :
452 0 : return 0;
453 : }
454 : #endif
455 :
456 : /*
457 : * We run out of memory. That means we probably have tons of objects
458 : * allocated.
459 : */
460 0 : static void debug_objects_oom(void)
461 : {
462 0 : struct debug_bucket *db = obj_hash;
463 0 : struct hlist_node *tmp;
464 0 : HLIST_HEAD(freelist);
465 0 : struct debug_obj *obj;
466 0 : unsigned long flags;
467 0 : int i;
468 :
469 0 : pr_warn("Out of memory. ODEBUG disabled\n");
470 :
471 0 : for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472 0 : raw_spin_lock_irqsave(&db->lock, flags);
473 0 : hlist_move_list(&db->list, &freelist);
474 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
475 :
476 : /* Now free them */
477 0 : hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478 0 : hlist_del(&obj->node);
479 0 : free_object(obj);
480 : }
481 : }
482 0 : }
483 :
484 : /*
485 : * We use the pfn of the address for the hash. That way we can check
486 : * for freed objects simply by checking the affected bucket.
487 : */
488 9470544 : static struct debug_bucket *get_bucket(unsigned long addr)
489 : {
490 9470544 : unsigned long hash;
491 :
492 9470544 : hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493 2738601 : return &obj_hash[hash];
494 : }
495 :
496 0 : static void debug_print_object(struct debug_obj *obj, char *msg)
497 : {
498 0 : const struct debug_obj_descr *descr = obj->descr;
499 0 : static int limit;
500 :
501 0 : if (limit < 5 && descr != descr_test) {
502 0 : void *hint = descr->debug_hint ?
503 0 : descr->debug_hint(obj->object) : NULL;
504 0 : limit++;
505 0 : WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
506 : "object type: %s hint: %pS\n",
507 : msg, obj_states[obj->state], obj->astate,
508 : descr->name, hint);
509 : }
510 0 : debug_objects_warnings++;
511 0 : }
512 :
513 : /*
514 : * Try to repair the damage, so we have a better chance to get useful
515 : * debug output.
516 : */
517 : static bool
518 0 : debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
519 : void * addr, enum debug_obj_state state)
520 : {
521 0 : if (fixup && fixup(addr, state)) {
522 0 : debug_objects_fixups++;
523 0 : return true;
524 : }
525 : return false;
526 : }
527 :
528 1128158 : static void debug_object_is_on_stack(void *addr, int onstack)
529 : {
530 1128158 : int is_on_stack;
531 1128158 : static int limit;
532 :
533 1128158 : if (limit > 4)
534 : return;
535 :
536 1128202 : is_on_stack = object_is_on_stack(addr);
537 1128202 : if (is_on_stack == onstack)
538 : return;
539 :
540 0 : limit++;
541 0 : if (is_on_stack)
542 0 : pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
543 : task_stack_page(current));
544 : else
545 0 : pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
546 : task_stack_page(current));
547 :
548 0 : WARN_ON(1);
549 : }
550 :
551 : static void
552 1128056 : __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
553 : {
554 1128056 : enum debug_obj_state state;
555 1128056 : bool check_stack = false;
556 1128056 : struct debug_bucket *db;
557 1128056 : struct debug_obj *obj;
558 1128056 : unsigned long flags;
559 :
560 1128056 : fill_pool();
561 :
562 1128023 : db = get_bucket((unsigned long) addr);
563 :
564 1128023 : raw_spin_lock_irqsave(&db->lock, flags);
565 :
566 1128632 : obj = lookup_object(addr, db);
567 1128684 : if (!obj) {
568 1128477 : obj = alloc_object(addr, db, descr);
569 1128289 : if (!obj) {
570 0 : debug_objects_enabled = 0;
571 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
572 0 : debug_objects_oom();
573 0 : return;
574 : }
575 : check_stack = true;
576 : }
577 :
578 1128496 : switch (obj->state) {
579 1128496 : case ODEBUG_STATE_NONE:
580 : case ODEBUG_STATE_INIT:
581 : case ODEBUG_STATE_INACTIVE:
582 1128496 : obj->state = ODEBUG_STATE_INIT;
583 1128496 : break;
584 :
585 0 : case ODEBUG_STATE_ACTIVE:
586 0 : state = obj->state;
587 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
588 0 : debug_print_object(obj, "init");
589 0 : debug_object_fixup(descr->fixup_init, addr, state);
590 0 : return;
591 :
592 0 : case ODEBUG_STATE_DESTROYED:
593 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
594 0 : debug_print_object(obj, "init");
595 0 : return;
596 : default:
597 : break;
598 : }
599 :
600 1128496 : raw_spin_unlock_irqrestore(&db->lock, flags);
601 1128359 : if (check_stack)
602 1128184 : debug_object_is_on_stack(addr, onstack);
603 : }
604 :
605 : /**
606 : * debug_object_init - debug checks when an object is initialized
607 : * @addr: address of the object
608 : * @descr: pointer to an object specific debug description structure
609 : */
610 1128143 : void debug_object_init(void *addr, const struct debug_obj_descr *descr)
611 : {
612 1128143 : if (!debug_objects_enabled)
613 : return;
614 :
615 1128152 : __debug_object_init(addr, descr, 0);
616 : }
617 : EXPORT_SYMBOL_GPL(debug_object_init);
618 :
619 : /**
620 : * debug_object_init_on_stack - debug checks when an object on stack is
621 : * initialized
622 : * @addr: address of the object
623 : * @descr: pointer to an object specific debug description structure
624 : */
625 241 : void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
626 : {
627 241 : if (!debug_objects_enabled)
628 : return;
629 :
630 241 : __debug_object_init(addr, descr, 1);
631 : }
632 : EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
633 :
634 : /**
635 : * debug_object_activate - debug checks when an object is activated
636 : * @addr: address of the object
637 : * @descr: pointer to an object specific debug description structure
638 : * Returns 0 for success, -EINVAL for check failed.
639 : */
640 2244161 : int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
641 : {
642 2244161 : enum debug_obj_state state;
643 2244161 : struct debug_bucket *db;
644 2244161 : struct debug_obj *obj;
645 2244161 : unsigned long flags;
646 2244161 : int ret;
647 2244161 : struct debug_obj o = { .object = addr,
648 : .state = ODEBUG_STATE_NOTAVAILABLE,
649 : .descr = descr };
650 :
651 2244161 : if (!debug_objects_enabled)
652 : return 0;
653 :
654 2244442 : db = get_bucket((unsigned long) addr);
655 :
656 2244442 : raw_spin_lock_irqsave(&db->lock, flags);
657 :
658 2245135 : obj = lookup_object(addr, db);
659 2245248 : if (obj) {
660 1124188 : bool print_object = false;
661 :
662 1124188 : switch (obj->state) {
663 1124188 : case ODEBUG_STATE_INIT:
664 : case ODEBUG_STATE_INACTIVE:
665 1124188 : obj->state = ODEBUG_STATE_ACTIVE;
666 1124188 : ret = 0;
667 1124188 : break;
668 :
669 0 : case ODEBUG_STATE_ACTIVE:
670 0 : state = obj->state;
671 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
672 0 : debug_print_object(obj, "activate");
673 0 : ret = debug_object_fixup(descr->fixup_activate, addr, state);
674 0 : return ret ? 0 : -EINVAL;
675 :
676 : case ODEBUG_STATE_DESTROYED:
677 : print_object = true;
678 : ret = -EINVAL;
679 : break;
680 0 : default:
681 0 : ret = 0;
682 0 : break;
683 : }
684 1124188 : raw_spin_unlock_irqrestore(&db->lock, flags);
685 1123962 : if (print_object)
686 0 : debug_print_object(obj, "activate");
687 1123962 : return ret;
688 : }
689 :
690 1121060 : raw_spin_unlock_irqrestore(&db->lock, flags);
691 :
692 : /*
693 : * We are here when a static object is activated. We
694 : * let the type specific code confirm whether this is
695 : * true or not. if true, we just make sure that the
696 : * static object is tracked in the object tracker. If
697 : * not, this must be a bug, so we try to fix it up.
698 : */
699 1120806 : if (descr->is_static_object && descr->is_static_object(addr)) {
700 : /* track this static object */
701 1120806 : debug_object_init(addr, descr);
702 1120842 : debug_object_activate(addr, descr);
703 : } else {
704 0 : debug_print_object(&o, "activate");
705 0 : ret = debug_object_fixup(descr->fixup_activate, addr,
706 : ODEBUG_STATE_NOTAVAILABLE);
707 0 : return ret ? 0 : -EINVAL;
708 : }
709 1120842 : return 0;
710 : }
711 : EXPORT_SYMBOL_GPL(debug_object_activate);
712 :
713 : /**
714 : * debug_object_deactivate - debug checks when an object is deactivated
715 : * @addr: address of the object
716 : * @descr: pointer to an object specific debug description structure
717 : */
718 1119635 : void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
719 : {
720 1119635 : struct debug_bucket *db;
721 1119635 : struct debug_obj *obj;
722 1119635 : unsigned long flags;
723 1119635 : bool print_object = false;
724 :
725 1119635 : if (!debug_objects_enabled)
726 : return;
727 :
728 1119956 : db = get_bucket((unsigned long) addr);
729 :
730 1119956 : raw_spin_lock_irqsave(&db->lock, flags);
731 :
732 1123407 : obj = lookup_object(addr, db);
733 1123411 : if (obj) {
734 1123411 : switch (obj->state) {
735 1123411 : case ODEBUG_STATE_INIT:
736 : case ODEBUG_STATE_INACTIVE:
737 : case ODEBUG_STATE_ACTIVE:
738 1123411 : if (!obj->astate)
739 1123578 : obj->state = ODEBUG_STATE_INACTIVE;
740 : else
741 : print_object = true;
742 : break;
743 :
744 0 : case ODEBUG_STATE_DESTROYED:
745 0 : print_object = true;
746 0 : break;
747 : default:
748 : break;
749 : }
750 0 : }
751 :
752 1123411 : raw_spin_unlock_irqrestore(&db->lock, flags);
753 1121302 : if (!obj) {
754 0 : struct debug_obj o = { .object = addr,
755 : .state = ODEBUG_STATE_NOTAVAILABLE,
756 : .descr = descr };
757 :
758 0 : debug_print_object(&o, "deactivate");
759 1121302 : } else if (print_object) {
760 0 : debug_print_object(obj, "deactivate");
761 : }
762 : }
763 : EXPORT_SYMBOL_GPL(debug_object_deactivate);
764 :
765 : /**
766 : * debug_object_destroy - debug checks when an object is destroyed
767 : * @addr: address of the object
768 : * @descr: pointer to an object specific debug description structure
769 : */
770 0 : void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
771 : {
772 0 : enum debug_obj_state state;
773 0 : struct debug_bucket *db;
774 0 : struct debug_obj *obj;
775 0 : unsigned long flags;
776 0 : bool print_object = false;
777 :
778 0 : if (!debug_objects_enabled)
779 : return;
780 :
781 0 : db = get_bucket((unsigned long) addr);
782 :
783 0 : raw_spin_lock_irqsave(&db->lock, flags);
784 :
785 0 : obj = lookup_object(addr, db);
786 0 : if (!obj)
787 0 : goto out_unlock;
788 :
789 0 : switch (obj->state) {
790 0 : case ODEBUG_STATE_NONE:
791 : case ODEBUG_STATE_INIT:
792 : case ODEBUG_STATE_INACTIVE:
793 0 : obj->state = ODEBUG_STATE_DESTROYED;
794 0 : break;
795 0 : case ODEBUG_STATE_ACTIVE:
796 0 : state = obj->state;
797 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
798 0 : debug_print_object(obj, "destroy");
799 0 : debug_object_fixup(descr->fixup_destroy, addr, state);
800 0 : return;
801 :
802 0 : case ODEBUG_STATE_DESTROYED:
803 0 : print_object = true;
804 0 : break;
805 : default:
806 : break;
807 : }
808 0 : out_unlock:
809 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
810 0 : if (print_object)
811 0 : debug_print_object(obj, "destroy");
812 : }
813 : EXPORT_SYMBOL_GPL(debug_object_destroy);
814 :
815 : /**
816 : * debug_object_free - debug checks when an object is freed
817 : * @addr: address of the object
818 : * @descr: pointer to an object specific debug description structure
819 : */
820 241 : void debug_object_free(void *addr, const struct debug_obj_descr *descr)
821 : {
822 241 : enum debug_obj_state state;
823 241 : struct debug_bucket *db;
824 241 : struct debug_obj *obj;
825 241 : unsigned long flags;
826 :
827 241 : if (!debug_objects_enabled)
828 : return;
829 :
830 241 : db = get_bucket((unsigned long) addr);
831 :
832 241 : raw_spin_lock_irqsave(&db->lock, flags);
833 :
834 241 : obj = lookup_object(addr, db);
835 241 : if (!obj)
836 0 : goto out_unlock;
837 :
838 241 : switch (obj->state) {
839 0 : case ODEBUG_STATE_ACTIVE:
840 0 : state = obj->state;
841 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
842 0 : debug_print_object(obj, "free");
843 0 : debug_object_fixup(descr->fixup_free, addr, state);
844 0 : return;
845 241 : default:
846 241 : hlist_del(&obj->node);
847 241 : raw_spin_unlock_irqrestore(&db->lock, flags);
848 241 : free_object(obj);
849 241 : return;
850 : }
851 0 : out_unlock:
852 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
853 : }
854 : EXPORT_SYMBOL_GPL(debug_object_free);
855 :
856 : /**
857 : * debug_object_assert_init - debug checks when object should be init-ed
858 : * @addr: address of the object
859 : * @descr: pointer to an object specific debug description structure
860 : */
861 0 : void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
862 : {
863 0 : struct debug_bucket *db;
864 0 : struct debug_obj *obj;
865 0 : unsigned long flags;
866 :
867 0 : if (!debug_objects_enabled)
868 : return;
869 :
870 0 : db = get_bucket((unsigned long) addr);
871 :
872 0 : raw_spin_lock_irqsave(&db->lock, flags);
873 :
874 0 : obj = lookup_object(addr, db);
875 0 : if (!obj) {
876 0 : struct debug_obj o = { .object = addr,
877 : .state = ODEBUG_STATE_NOTAVAILABLE,
878 : .descr = descr };
879 :
880 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
881 : /*
882 : * Maybe the object is static, and we let the type specific
883 : * code confirm. Track this static object if true, else invoke
884 : * fixup.
885 : */
886 0 : if (descr->is_static_object && descr->is_static_object(addr)) {
887 : /* Track this static object */
888 0 : debug_object_init(addr, descr);
889 : } else {
890 0 : debug_print_object(&o, "assert_init");
891 0 : debug_object_fixup(descr->fixup_assert_init, addr,
892 : ODEBUG_STATE_NOTAVAILABLE);
893 : }
894 0 : return;
895 : }
896 :
897 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
898 : }
899 : EXPORT_SYMBOL_GPL(debug_object_assert_init);
900 :
901 : /**
902 : * debug_object_active_state - debug checks object usage state machine
903 : * @addr: address of the object
904 : * @descr: pointer to an object specific debug description structure
905 : * @expect: expected state
906 : * @next: state to move to if expected state is found
907 : */
908 : void
909 2239079 : debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
910 : unsigned int expect, unsigned int next)
911 : {
912 2239079 : struct debug_bucket *db;
913 2239079 : struct debug_obj *obj;
914 2239079 : unsigned long flags;
915 2239079 : bool print_object = false;
916 :
917 2239079 : if (!debug_objects_enabled)
918 : return;
919 :
920 2239281 : db = get_bucket((unsigned long) addr);
921 :
922 2239281 : raw_spin_lock_irqsave(&db->lock, flags);
923 :
924 2241614 : obj = lookup_object(addr, db);
925 2241775 : if (obj) {
926 2241775 : switch (obj->state) {
927 2241979 : case ODEBUG_STATE_ACTIVE:
928 2241979 : if (obj->astate == expect)
929 2242057 : obj->astate = next;
930 : else
931 : print_object = true;
932 : break;
933 :
934 : default:
935 : print_object = true;
936 : break;
937 : }
938 0 : }
939 :
940 2241775 : raw_spin_unlock_irqrestore(&db->lock, flags);
941 2238932 : if (!obj) {
942 0 : struct debug_obj o = { .object = addr,
943 : .state = ODEBUG_STATE_NOTAVAILABLE,
944 : .descr = descr };
945 :
946 0 : debug_print_object(&o, "active_state");
947 2238932 : } else if (print_object) {
948 0 : debug_print_object(obj, "active_state");
949 : }
950 : }
951 : EXPORT_SYMBOL_GPL(debug_object_active_state);
952 :
953 : #ifdef CONFIG_DEBUG_OBJECTS_FREE
954 2281292 : static void __debug_check_no_obj_freed(const void *address, unsigned long size)
955 : {
956 2281292 : unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
957 2281292 : const struct debug_obj_descr *descr;
958 2281292 : enum debug_obj_state state;
959 2281292 : struct debug_bucket *db;
960 2281292 : struct hlist_node *tmp;
961 2281292 : struct debug_obj *obj;
962 2281292 : int cnt, objs_checked = 0;
963 :
964 2281292 : saddr = (unsigned long) address;
965 2281292 : eaddr = saddr + size;
966 2281292 : paddr = saddr & ODEBUG_CHUNK_MASK;
967 2281292 : chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
968 2281292 : chunks >>= ODEBUG_CHUNK_SHIFT;
969 :
970 5022607 : for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
971 2738601 : db = get_bucket(paddr);
972 :
973 2738601 : repeat:
974 2738601 : cnt = 0;
975 2738601 : raw_spin_lock_irqsave(&db->lock, flags);
976 12900132 : hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
977 3707577 : cnt++;
978 3707577 : oaddr = (unsigned long) obj->object;
979 3707577 : if (oaddr < saddr || oaddr >= eaddr)
980 2586628 : continue;
981 :
982 1120949 : switch (obj->state) {
983 0 : case ODEBUG_STATE_ACTIVE:
984 0 : descr = obj->descr;
985 0 : state = obj->state;
986 0 : raw_spin_unlock_irqrestore(&db->lock, flags);
987 0 : debug_print_object(obj, "free");
988 0 : debug_object_fixup(descr->fixup_free,
989 : (void *) oaddr, state);
990 0 : goto repeat;
991 1120949 : default:
992 1120949 : hlist_del(&obj->node);
993 1120949 : __free_object(obj);
994 1120949 : break;
995 : }
996 : }
997 2742489 : raw_spin_unlock_irqrestore(&db->lock, flags);
998 :
999 2741315 : if (cnt > debug_objects_maxchain)
1000 7 : debug_objects_maxchain = cnt;
1001 :
1002 2741315 : objs_checked += cnt;
1003 : }
1004 :
1005 2284006 : if (objs_checked > debug_objects_maxchecked)
1006 13 : debug_objects_maxchecked = objs_checked;
1007 :
1008 : /* Schedule work to actually kmem_cache_free() objects */
1009 2284006 : if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1010 596 : WRITE_ONCE(obj_freeing, true);
1011 596 : schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1012 : }
1013 2284008 : }
1014 :
1015 2282029 : void debug_check_no_obj_freed(const void *address, unsigned long size)
1016 : {
1017 2282029 : if (debug_objects_enabled)
1018 2281225 : __debug_check_no_obj_freed(address, size);
1019 2284784 : }
1020 : #endif
1021 :
1022 : #ifdef CONFIG_DEBUG_FS
1023 :
1024 0 : static int debug_stats_show(struct seq_file *m, void *v)
1025 : {
1026 0 : int cpu, obj_percpu_free = 0;
1027 :
1028 0 : for_each_possible_cpu(cpu)
1029 0 : obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1030 :
1031 0 : seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1032 0 : seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1033 0 : seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1034 0 : seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1035 0 : seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1036 0 : seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1037 0 : seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1038 0 : seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1039 0 : seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1040 0 : seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1041 0 : seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1042 0 : seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1043 0 : return 0;
1044 : }
1045 0 : DEFINE_SHOW_ATTRIBUTE(debug_stats);
1046 :
1047 1 : static int __init debug_objects_init_debugfs(void)
1048 : {
1049 1 : struct dentry *dbgdir;
1050 :
1051 1 : if (!debug_objects_enabled)
1052 : return 0;
1053 :
1054 1 : dbgdir = debugfs_create_dir("debug_objects", NULL);
1055 :
1056 1 : debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1057 :
1058 1 : return 0;
1059 : }
1060 : __initcall(debug_objects_init_debugfs);
1061 :
1062 : #else
1063 : static inline void debug_objects_init_debugfs(void) { }
1064 : #endif
1065 :
1066 : #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1067 :
1068 : /* Random data structure for the self test */
1069 : struct self_test {
1070 : unsigned long dummy1[6];
1071 : int static_init;
1072 : unsigned long dummy2[3];
1073 : };
1074 :
1075 : static __initconst const struct debug_obj_descr descr_type_test;
1076 :
1077 : static bool __init is_static_object(void *addr)
1078 : {
1079 : struct self_test *obj = addr;
1080 :
1081 : return obj->static_init;
1082 : }
1083 :
1084 : /*
1085 : * fixup_init is called when:
1086 : * - an active object is initialized
1087 : */
1088 : static bool __init fixup_init(void *addr, enum debug_obj_state state)
1089 : {
1090 : struct self_test *obj = addr;
1091 :
1092 : switch (state) {
1093 : case ODEBUG_STATE_ACTIVE:
1094 : debug_object_deactivate(obj, &descr_type_test);
1095 : debug_object_init(obj, &descr_type_test);
1096 : return true;
1097 : default:
1098 : return false;
1099 : }
1100 : }
1101 :
1102 : /*
1103 : * fixup_activate is called when:
1104 : * - an active object is activated
1105 : * - an unknown non-static object is activated
1106 : */
1107 : static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1108 : {
1109 : struct self_test *obj = addr;
1110 :
1111 : switch (state) {
1112 : case ODEBUG_STATE_NOTAVAILABLE:
1113 : return true;
1114 : case ODEBUG_STATE_ACTIVE:
1115 : debug_object_deactivate(obj, &descr_type_test);
1116 : debug_object_activate(obj, &descr_type_test);
1117 : return true;
1118 :
1119 : default:
1120 : return false;
1121 : }
1122 : }
1123 :
1124 : /*
1125 : * fixup_destroy is called when:
1126 : * - an active object is destroyed
1127 : */
1128 : static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1129 : {
1130 : struct self_test *obj = addr;
1131 :
1132 : switch (state) {
1133 : case ODEBUG_STATE_ACTIVE:
1134 : debug_object_deactivate(obj, &descr_type_test);
1135 : debug_object_destroy(obj, &descr_type_test);
1136 : return true;
1137 : default:
1138 : return false;
1139 : }
1140 : }
1141 :
1142 : /*
1143 : * fixup_free is called when:
1144 : * - an active object is freed
1145 : */
1146 : static bool __init fixup_free(void *addr, enum debug_obj_state state)
1147 : {
1148 : struct self_test *obj = addr;
1149 :
1150 : switch (state) {
1151 : case ODEBUG_STATE_ACTIVE:
1152 : debug_object_deactivate(obj, &descr_type_test);
1153 : debug_object_free(obj, &descr_type_test);
1154 : return true;
1155 : default:
1156 : return false;
1157 : }
1158 : }
1159 :
1160 : static int __init
1161 : check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1162 : {
1163 : struct debug_bucket *db;
1164 : struct debug_obj *obj;
1165 : unsigned long flags;
1166 : int res = -EINVAL;
1167 :
1168 : db = get_bucket((unsigned long) addr);
1169 :
1170 : raw_spin_lock_irqsave(&db->lock, flags);
1171 :
1172 : obj = lookup_object(addr, db);
1173 : if (!obj && state != ODEBUG_STATE_NONE) {
1174 : WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1175 : goto out;
1176 : }
1177 : if (obj && obj->state != state) {
1178 : WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1179 : obj->state, state);
1180 : goto out;
1181 : }
1182 : if (fixups != debug_objects_fixups) {
1183 : WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1184 : fixups, debug_objects_fixups);
1185 : goto out;
1186 : }
1187 : if (warnings != debug_objects_warnings) {
1188 : WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1189 : warnings, debug_objects_warnings);
1190 : goto out;
1191 : }
1192 : res = 0;
1193 : out:
1194 : raw_spin_unlock_irqrestore(&db->lock, flags);
1195 : if (res)
1196 : debug_objects_enabled = 0;
1197 : return res;
1198 : }
1199 :
1200 : static __initconst const struct debug_obj_descr descr_type_test = {
1201 : .name = "selftest",
1202 : .is_static_object = is_static_object,
1203 : .fixup_init = fixup_init,
1204 : .fixup_activate = fixup_activate,
1205 : .fixup_destroy = fixup_destroy,
1206 : .fixup_free = fixup_free,
1207 : };
1208 :
1209 : static __initdata struct self_test obj = { .static_init = 0 };
1210 :
1211 : static void __init debug_objects_selftest(void)
1212 : {
1213 : int fixups, oldfixups, warnings, oldwarnings;
1214 : unsigned long flags;
1215 :
1216 : local_irq_save(flags);
1217 :
1218 : fixups = oldfixups = debug_objects_fixups;
1219 : warnings = oldwarnings = debug_objects_warnings;
1220 : descr_test = &descr_type_test;
1221 :
1222 : debug_object_init(&obj, &descr_type_test);
1223 : if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1224 : goto out;
1225 : debug_object_activate(&obj, &descr_type_test);
1226 : if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1227 : goto out;
1228 : debug_object_activate(&obj, &descr_type_test);
1229 : if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1230 : goto out;
1231 : debug_object_deactivate(&obj, &descr_type_test);
1232 : if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1233 : goto out;
1234 : debug_object_destroy(&obj, &descr_type_test);
1235 : if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1236 : goto out;
1237 : debug_object_init(&obj, &descr_type_test);
1238 : if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1239 : goto out;
1240 : debug_object_activate(&obj, &descr_type_test);
1241 : if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1242 : goto out;
1243 : debug_object_deactivate(&obj, &descr_type_test);
1244 : if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1245 : goto out;
1246 : debug_object_free(&obj, &descr_type_test);
1247 : if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1248 : goto out;
1249 :
1250 : obj.static_init = 1;
1251 : debug_object_activate(&obj, &descr_type_test);
1252 : if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1253 : goto out;
1254 : debug_object_init(&obj, &descr_type_test);
1255 : if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1256 : goto out;
1257 : debug_object_free(&obj, &descr_type_test);
1258 : if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1259 : goto out;
1260 :
1261 : #ifdef CONFIG_DEBUG_OBJECTS_FREE
1262 : debug_object_init(&obj, &descr_type_test);
1263 : if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1264 : goto out;
1265 : debug_object_activate(&obj, &descr_type_test);
1266 : if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1267 : goto out;
1268 : __debug_check_no_obj_freed(&obj, sizeof(obj));
1269 : if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1270 : goto out;
1271 : #endif
1272 : pr_info("selftest passed\n");
1273 :
1274 : out:
1275 : debug_objects_fixups = oldfixups;
1276 : debug_objects_warnings = oldwarnings;
1277 : descr_test = NULL;
1278 :
1279 : local_irq_restore(flags);
1280 : }
1281 : #else
1282 : static inline void debug_objects_selftest(void) { }
1283 : #endif
1284 :
1285 : /*
1286 : * Called during early boot to initialize the hash buckets and link
1287 : * the static object pool objects into the poll list. After this call
1288 : * the object tracker is fully operational.
1289 : */
1290 1 : void __init debug_objects_early_init(void)
1291 : {
1292 1 : int i;
1293 :
1294 16385 : for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1295 16384 : raw_spin_lock_init(&obj_hash[i].lock);
1296 :
1297 1025 : for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1298 2047 : hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1299 1 : }
1300 :
1301 : /*
1302 : * Convert the statically allocated objects to dynamic ones:
1303 : */
1304 1 : static int __init debug_objects_replace_static_objects(void)
1305 : {
1306 1 : struct debug_bucket *db = obj_hash;
1307 1 : struct hlist_node *tmp;
1308 1 : struct debug_obj *obj, *new;
1309 1 : HLIST_HEAD(objects);
1310 1 : int i, cnt = 0;
1311 :
1312 1025 : for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1313 1024 : obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1314 1024 : if (!obj)
1315 0 : goto free;
1316 2047 : hlist_add_head(&obj->node, &objects);
1317 : }
1318 :
1319 : /*
1320 : * debug_objects_mem_init() is now called early that only one CPU is up
1321 : * and interrupts have been disabled, so it is safe to replace the
1322 : * active object references.
1323 : */
1324 :
1325 : /* Remove the statically allocated objects from the pool */
1326 1024 : hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1327 2045 : hlist_del(&obj->node);
1328 : /* Move the allocated objects to the pool */
1329 1 : hlist_move_list(&objects, &obj_pool);
1330 :
1331 : /* Replace the active object references */
1332 16385 : for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1333 16384 : hlist_move_list(&db->list, &objects);
1334 :
1335 32769 : hlist_for_each_entry(obj, &objects, node) {
1336 1 : new = hlist_entry(obj_pool.first, typeof(*obj), node);
1337 1 : hlist_del(&new->node);
1338 : /* copy object data */
1339 1 : *new = *obj;
1340 1 : hlist_add_head(&new->node, &db->list);
1341 1 : cnt++;
1342 : }
1343 : }
1344 :
1345 : pr_debug("%d of %d active objects replaced\n",
1346 : cnt, obj_pool_used);
1347 : return 0;
1348 0 : free:
1349 0 : hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1350 0 : hlist_del(&obj->node);
1351 0 : kmem_cache_free(obj_cache, obj);
1352 : }
1353 : return -ENOMEM;
1354 : }
1355 :
1356 : /*
1357 : * Called after the kmem_caches are functional to setup a dedicated
1358 : * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1359 : * prevents that the debug code is called on kmem_cache_free() for the
1360 : * debug tracker objects to avoid recursive calls.
1361 : */
1362 1 : void __init debug_objects_mem_init(void)
1363 : {
1364 1 : int cpu, extras;
1365 :
1366 1 : if (!debug_objects_enabled)
1367 : return;
1368 :
1369 : /*
1370 : * Initialize the percpu object pools
1371 : *
1372 : * Initialization is not strictly necessary, but was done for
1373 : * completeness.
1374 : */
1375 5 : for_each_possible_cpu(cpu)
1376 4 : INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1377 :
1378 1 : obj_cache = kmem_cache_create("debug_objects_cache",
1379 : sizeof (struct debug_obj), 0,
1380 : SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1381 : NULL);
1382 :
1383 1 : if (!obj_cache || debug_objects_replace_static_objects()) {
1384 0 : debug_objects_enabled = 0;
1385 0 : kmem_cache_destroy(obj_cache);
1386 0 : pr_warn("out of memory.\n");
1387 : } else
1388 : debug_objects_selftest();
1389 :
1390 : #ifdef CONFIG_HOTPLUG_CPU
1391 1 : cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1392 : object_cpu_offline);
1393 : #endif
1394 :
1395 : /*
1396 : * Increase the thresholds for allocating and freeing objects
1397 : * according to the number of possible CPUs available in the system.
1398 : */
1399 1 : extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1400 1 : debug_objects_pool_size += extras;
1401 1 : debug_objects_pool_min_level += extras;
1402 : }
|