LCOV - code coverage report
Current view: top level - mm - kmemleak.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 272 732 37.2 %
Date: 2021-04-22 12:43:58 Functions: 30 63 47.6 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * mm/kmemleak.c
       4             :  *
       5             :  * Copyright (C) 2008 ARM Limited
       6             :  * Written by Catalin Marinas <catalin.marinas@arm.com>
       7             :  *
       8             :  * For more information on the algorithm and kmemleak usage, please see
       9             :  * Documentation/dev-tools/kmemleak.rst.
      10             :  *
      11             :  * Notes on locking
      12             :  * ----------------
      13             :  *
      14             :  * The following locks and mutexes are used by kmemleak:
      15             :  *
      16             :  * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
      17             :  *   accesses to the object_tree_root. The object_list is the main list
      18             :  *   holding the metadata (struct kmemleak_object) for the allocated memory
      19             :  *   blocks. The object_tree_root is a red black tree used to look-up
      20             :  *   metadata based on a pointer to the corresponding memory block.  The
      21             :  *   kmemleak_object structures are added to the object_list and
      22             :  *   object_tree_root in the create_object() function called from the
      23             :  *   kmemleak_alloc() callback and removed in delete_object() called from the
      24             :  *   kmemleak_free() callback
      25             :  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
      26             :  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
      27             :  *   that some members of this structure may be protected by other means
      28             :  *   (atomic or kmemleak_lock). This lock is also held when scanning the
      29             :  *   corresponding memory block to avoid the kernel freeing it via the
      30             :  *   kmemleak_free() callback. This is less heavyweight than holding a global
      31             :  *   lock like kmemleak_lock during scanning.
      32             :  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
      33             :  *   unreferenced objects at a time. The gray_list contains the objects which
      34             :  *   are already referenced or marked as false positives and need to be
      35             :  *   scanned. This list is only modified during a scanning episode when the
      36             :  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
      37             :  *   Note that the kmemleak_object.use_count is incremented when an object is
      38             :  *   added to the gray_list and therefore cannot be freed. This mutex also
      39             :  *   prevents multiple users of the "kmemleak" debugfs file together with
      40             :  *   modifications to the memory scanning parameters including the scan_thread
      41             :  *   pointer
      42             :  *
      43             :  * Locks and mutexes are acquired/nested in the following order:
      44             :  *
      45             :  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
      46             :  *
      47             :  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
      48             :  * regions.
      49             :  *
      50             :  * The kmemleak_object structures have a use_count incremented or decremented
      51             :  * using the get_object()/put_object() functions. When the use_count becomes
      52             :  * 0, this count can no longer be incremented and put_object() schedules the
      53             :  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
      54             :  * function must be protected by rcu_read_lock() to avoid accessing a freed
      55             :  * structure.
      56             :  */
      57             : 
      58             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      59             : 
      60             : #include <linux/init.h>
      61             : #include <linux/kernel.h>
      62             : #include <linux/list.h>
      63             : #include <linux/sched/signal.h>
      64             : #include <linux/sched/task.h>
      65             : #include <linux/sched/task_stack.h>
      66             : #include <linux/jiffies.h>
      67             : #include <linux/delay.h>
      68             : #include <linux/export.h>
      69             : #include <linux/kthread.h>
      70             : #include <linux/rbtree.h>
      71             : #include <linux/fs.h>
      72             : #include <linux/debugfs.h>
      73             : #include <linux/seq_file.h>
      74             : #include <linux/cpumask.h>
      75             : #include <linux/spinlock.h>
      76             : #include <linux/module.h>
      77             : #include <linux/mutex.h>
      78             : #include <linux/rcupdate.h>
      79             : #include <linux/stacktrace.h>
      80             : #include <linux/cache.h>
      81             : #include <linux/percpu.h>
      82             : #include <linux/memblock.h>
      83             : #include <linux/pfn.h>
      84             : #include <linux/mmzone.h>
      85             : #include <linux/slab.h>
      86             : #include <linux/thread_info.h>
      87             : #include <linux/err.h>
      88             : #include <linux/uaccess.h>
      89             : #include <linux/string.h>
      90             : #include <linux/nodemask.h>
      91             : #include <linux/mm.h>
      92             : #include <linux/workqueue.h>
      93             : #include <linux/crc32.h>
      94             : 
      95             : #include <asm/sections.h>
      96             : #include <asm/processor.h>
      97             : #include <linux/atomic.h>
      98             : 
      99             : #include <linux/kasan.h>
     100             : #include <linux/kmemleak.h>
     101             : #include <linux/memory_hotplug.h>
     102             : 
     103             : /*
     104             :  * Kmemleak configuration and common defines.
     105             :  */
     106             : #define MAX_TRACE               16      /* stack trace length */
     107             : #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
     108             : #define SECS_FIRST_SCAN         60      /* delay before the first scan */
     109             : #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
     110             : #define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
     111             : 
     112             : #define BYTES_PER_POINTER       sizeof(void *)
     113             : 
     114             : /* GFP bitmask for kmemleak internal allocations */
     115             : #define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
     116             :                                  __GFP_NORETRY | __GFP_NOMEMALLOC | \
     117             :                                  __GFP_NOWARN)
     118             : 
     119             : /* scanning area inside a memory block */
     120             : struct kmemleak_scan_area {
     121             :         struct hlist_node node;
     122             :         unsigned long start;
     123             :         size_t size;
     124             : };
     125             : 
     126             : #define KMEMLEAK_GREY   0
     127             : #define KMEMLEAK_BLACK  -1
     128             : 
     129             : /*
     130             :  * Structure holding the metadata for each allocated memory block.
     131             :  * Modifications to such objects should be made while holding the
     132             :  * object->lock. Insertions or deletions from object_list, gray_list or
     133             :  * rb_node are already protected by the corresponding locks or mutex (see
     134             :  * the notes on locking above). These objects are reference-counted
     135             :  * (use_count) and freed using the RCU mechanism.
     136             :  */
     137             : struct kmemleak_object {
     138             :         raw_spinlock_t lock;
     139             :         unsigned int flags;             /* object status flags */
     140             :         struct list_head object_list;
     141             :         struct list_head gray_list;
     142             :         struct rb_node rb_node;
     143             :         struct rcu_head rcu;            /* object_list lockless traversal */
     144             :         /* object usage count; object freed when use_count == 0 */
     145             :         atomic_t use_count;
     146             :         unsigned long pointer;
     147             :         size_t size;
     148             :         /* pass surplus references to this pointer */
     149             :         unsigned long excess_ref;
     150             :         /* minimum number of a pointers found before it is considered leak */
     151             :         int min_count;
     152             :         /* the total number of pointers found pointing to this object */
     153             :         int count;
     154             :         /* checksum for detecting modified objects */
     155             :         u32 checksum;
     156             :         /* memory ranges to be scanned inside an object (empty for all) */
     157             :         struct hlist_head area_list;
     158             :         unsigned long trace[MAX_TRACE];
     159             :         unsigned int trace_len;
     160             :         unsigned long jiffies;          /* creation timestamp */
     161             :         pid_t pid;                      /* pid of the current task */
     162             :         char comm[TASK_COMM_LEN];       /* executable name */
     163             : };
     164             : 
     165             : /* flag representing the memory block allocation status */
     166             : #define OBJECT_ALLOCATED        (1 << 0)
     167             : /* flag set after the first reporting of an unreference object */
     168             : #define OBJECT_REPORTED         (1 << 1)
     169             : /* flag set to not scan the object */
     170             : #define OBJECT_NO_SCAN          (1 << 2)
     171             : /* flag set to fully scan the object when scan_area allocation failed */
     172             : #define OBJECT_FULL_SCAN        (1 << 3)
     173             : 
     174             : #define HEX_PREFIX              "    "
     175             : /* number of bytes to print per line; must be 16 or 32 */
     176             : #define HEX_ROW_SIZE            16
     177             : /* number of bytes to print at a time (1, 2, 4, 8) */
     178             : #define HEX_GROUP_SIZE          1
     179             : /* include ASCII after the hex output */
     180             : #define HEX_ASCII               1
     181             : /* max number of lines to be printed */
     182             : #define HEX_MAX_LINES           2
     183             : 
     184             : /* the list of all allocated objects */
     185             : static LIST_HEAD(object_list);
     186             : /* the list of gray-colored objects (see color_gray comment below) */
     187             : static LIST_HEAD(gray_list);
     188             : /* memory pool allocation */
     189             : static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
     190             : static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
     191             : static LIST_HEAD(mem_pool_free_list);
     192             : /* search tree for object boundaries */
     193             : static struct rb_root object_tree_root = RB_ROOT;
     194             : /* protecting the access to object_list and object_tree_root */
     195             : static DEFINE_RAW_SPINLOCK(kmemleak_lock);
     196             : 
     197             : /* allocation caches for kmemleak internal data */
     198             : static struct kmem_cache *object_cache;
     199             : static struct kmem_cache *scan_area_cache;
     200             : 
     201             : /* set if tracing memory operations is enabled */
     202             : static int kmemleak_enabled = 1;
     203             : /* same as above but only for the kmemleak_free() callback */
     204             : static int kmemleak_free_enabled = 1;
     205             : /* set in the late_initcall if there were no errors */
     206             : static int kmemleak_initialized;
     207             : /* set if a kmemleak warning was issued */
     208             : static int kmemleak_warning;
     209             : /* set if a fatal kmemleak error has occurred */
     210             : static int kmemleak_error;
     211             : 
     212             : /* minimum and maximum address that may be valid pointers */
     213             : static unsigned long min_addr = ULONG_MAX;
     214             : static unsigned long max_addr;
     215             : 
     216             : static struct task_struct *scan_thread;
     217             : /* used to avoid reporting of recently allocated objects */
     218             : static unsigned long jiffies_min_age;
     219             : static unsigned long jiffies_last_scan;
     220             : /* delay between automatic memory scannings */
     221             : static signed long jiffies_scan_wait;
     222             : /* enables or disables the task stacks scanning */
     223             : static int kmemleak_stack_scan = 1;
     224             : /* protects the memory scanning, parameters and debug/kmemleak file access */
     225             : static DEFINE_MUTEX(scan_mutex);
     226             : /* setting kmemleak=on, will set this var, skipping the disable */
     227             : static int kmemleak_skip_disable;
     228             : /* If there are leaks that can be reported */
     229             : static bool kmemleak_found_leaks;
     230             : 
     231             : static bool kmemleak_verbose;
     232             : module_param_named(verbose, kmemleak_verbose, bool, 0600);
     233             : 
     234             : static void kmemleak_disable(void);
     235             : 
     236             : /*
     237             :  * Print a warning and dump the stack trace.
     238             :  */
     239             : #define kmemleak_warn(x...)     do {            \
     240             :         pr_warn(x);                             \
     241             :         dump_stack();                           \
     242             :         kmemleak_warning = 1;                   \
     243             : } while (0)
     244             : 
     245             : /*
     246             :  * Macro invoked when a serious kmemleak condition occurred and cannot be
     247             :  * recovered from. Kmemleak will be disabled and further allocation/freeing
     248             :  * tracing no longer available.
     249             :  */
     250             : #define kmemleak_stop(x...)     do {    \
     251             :         kmemleak_warn(x);               \
     252             :         kmemleak_disable();             \
     253             : } while (0)
     254             : 
     255             : #define warn_or_seq_printf(seq, fmt, ...)       do {    \
     256             :         if (seq)                                        \
     257             :                 seq_printf(seq, fmt, ##__VA_ARGS__);    \
     258             :         else                                            \
     259             :                 pr_warn(fmt, ##__VA_ARGS__);            \
     260             : } while (0)
     261             : 
     262           0 : static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
     263             :                                  int rowsize, int groupsize, const void *buf,
     264             :                                  size_t len, bool ascii)
     265             : {
     266           0 :         if (seq)
     267           0 :                 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
     268             :                              buf, len, ascii);
     269             :         else
     270           0 :                 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
     271             :                                rowsize, groupsize, buf, len, ascii);
     272           0 : }
     273             : 
     274             : /*
     275             :  * Printing of the objects hex dump to the seq file. The number of lines to be
     276             :  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
     277             :  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
     278             :  * with the object->lock held.
     279             :  */
     280           0 : static void hex_dump_object(struct seq_file *seq,
     281             :                             struct kmemleak_object *object)
     282             : {
     283           0 :         const u8 *ptr = (const u8 *)object->pointer;
     284           0 :         size_t len;
     285             : 
     286             :         /* limit the number of lines to HEX_MAX_LINES */
     287           0 :         len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
     288             : 
     289           0 :         warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
     290           0 :         kasan_disable_current();
     291           0 :         warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
     292             :                              HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
     293           0 :         kasan_enable_current();
     294           0 : }
     295             : 
     296             : /*
     297             :  * Object colors, encoded with count and min_count:
     298             :  * - white - orphan object, not enough references to it (count < min_count)
     299             :  * - gray  - not orphan, not marked as false positive (min_count == 0) or
     300             :  *              sufficient references to it (count >= min_count)
     301             :  * - black - ignore, it doesn't contain references (e.g. text section)
     302             :  *              (min_count == -1). No function defined for this color.
     303             :  * Newly created objects don't have any color assigned (object->count == -1)
     304             :  * before the next memory scan when they become white.
     305             :  */
     306           0 : static bool color_white(const struct kmemleak_object *object)
     307             : {
     308           0 :         return object->count != KMEMLEAK_BLACK &&
     309           0 :                 object->count < object->min_count;
     310             : }
     311             : 
     312           0 : static bool color_gray(const struct kmemleak_object *object)
     313             : {
     314           0 :         return object->min_count != KMEMLEAK_BLACK &&
     315           0 :                 object->count >= object->min_count;
     316             : }
     317             : 
     318             : /*
     319             :  * Objects are considered unreferenced only if their color is white, they have
     320             :  * not be deleted and have a minimum age to avoid false positives caused by
     321             :  * pointers temporarily stored in CPU registers.
     322             :  */
     323           0 : static bool unreferenced_object(struct kmemleak_object *object)
     324             : {
     325           0 :         return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
     326           0 :                 time_before_eq(object->jiffies + jiffies_min_age,
     327             :                                jiffies_last_scan);
     328             : }
     329             : 
     330             : /*
     331             :  * Printing of the unreferenced objects information to the seq file. The
     332             :  * print_unreferenced function must be called with the object->lock held.
     333             :  */
     334           0 : static void print_unreferenced(struct seq_file *seq,
     335             :                                struct kmemleak_object *object)
     336             : {
     337           0 :         int i;
     338           0 :         unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
     339             : 
     340           0 :         warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
     341             :                    object->pointer, object->size);
     342           0 :         warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
     343             :                    object->comm, object->pid, object->jiffies,
     344             :                    msecs_age / 1000, msecs_age % 1000);
     345           0 :         hex_dump_object(seq, object);
     346           0 :         warn_or_seq_printf(seq, "  backtrace:\n");
     347             : 
     348           0 :         for (i = 0; i < object->trace_len; i++) {
     349           0 :                 void *ptr = (void *)object->trace[i];
     350           0 :                 warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
     351             :         }
     352           0 : }
     353             : 
     354             : /*
     355             :  * Print the kmemleak_object information. This function is used mainly for
     356             :  * debugging special cases when kmemleak operations. It must be called with
     357             :  * the object->lock held.
     358             :  */
     359           0 : static void dump_object_info(struct kmemleak_object *object)
     360             : {
     361           0 :         pr_notice("Object 0x%08lx (size %zu):\n",
     362             :                   object->pointer, object->size);
     363           0 :         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
     364             :                   object->comm, object->pid, object->jiffies);
     365           0 :         pr_notice("  min_count = %d\n", object->min_count);
     366           0 :         pr_notice("  count = %d\n", object->count);
     367           0 :         pr_notice("  flags = 0x%x\n", object->flags);
     368           0 :         pr_notice("  checksum = %u\n", object->checksum);
     369           0 :         pr_notice("  backtrace:\n");
     370           0 :         stack_trace_print(object->trace, object->trace_len, 4);
     371           0 : }
     372             : 
     373             : /*
     374             :  * Look-up a memory block metadata (kmemleak_object) in the object search
     375             :  * tree based on a pointer value. If alias is 0, only values pointing to the
     376             :  * beginning of the memory block are allowed. The kmemleak_lock must be held
     377             :  * when calling this function.
     378             :  */
     379      665569 : static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
     380             : {
     381      665569 :         struct rb_node *rb = object_tree_root.rb_node;
     382             : 
     383    13068040 :         while (rb) {
     384    13068036 :                 struct kmemleak_object *object =
     385    13068036 :                         rb_entry(rb, struct kmemleak_object, rb_node);
     386    13068036 :                 if (ptr < object->pointer)
     387     4380859 :                         rb = object->rb_node.rb_left;
     388     8687177 :                 else if (object->pointer + object->size <= ptr)
     389     8021612 :                         rb = object->rb_node.rb_right;
     390      665565 :                 else if (object->pointer == ptr || alias)
     391             :                         return object;
     392             :                 else {
     393           0 :                         kmemleak_warn("Found object by alias at 0x%08lx\n",
     394             :                                       ptr);
     395           0 :                         dump_object_info(object);
     396           0 :                         break;
     397             :                 }
     398             :         }
     399             :         return NULL;
     400             : }
     401             : 
     402             : /*
     403             :  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
     404             :  * that once an object's use_count reached 0, the RCU freeing was already
     405             :  * registered and the object should no longer be used. This function must be
     406             :  * called under the protection of rcu_read_lock().
     407             :  */
     408       24416 : static int get_object(struct kmemleak_object *object)
     409             : {
     410       24416 :         return atomic_inc_not_zero(&object->use_count);
     411             : }
     412             : 
     413             : /*
     414             :  * Memory pool allocation and freeing. kmemleak_lock must not be held.
     415             :  */
     416      721259 : static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
     417             : {
     418      721259 :         unsigned long flags;
     419      721259 :         struct kmemleak_object *object;
     420             : 
     421             :         /* try the slab allocator first */
     422      721259 :         if (object_cache) {
     423      720724 :                 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
     424      720784 :                 if (object)
     425             :                         return object;
     426             :         }
     427             : 
     428             :         /* slab allocation failed, try the memory pool */
     429         535 :         raw_spin_lock_irqsave(&kmemleak_lock, flags);
     430         535 :         object = list_first_entry_or_null(&mem_pool_free_list,
     431             :                                           typeof(*object), object_list);
     432           3 :         if (object)
     433           3 :                 list_del(&object->object_list);
     434         532 :         else if (mem_pool_free_count)
     435         532 :                 object = &mem_pool[--mem_pool_free_count];
     436             :         else
     437           0 :                 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
     438         535 :         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
     439             : 
     440         535 :         return object;
     441             : }
     442             : 
     443             : /*
     444             :  * Return the object to either the slab allocator or the memory pool.
     445             :  */
     446      640266 : static void mem_pool_free(struct kmemleak_object *object)
     447             : {
     448      640266 :         unsigned long flags;
     449             : 
     450      640266 :         if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
     451      640263 :                 kmem_cache_free(object_cache, object);
     452      640263 :                 return;
     453             :         }
     454             : 
     455             :         /* add the object to the memory pool free list */
     456           3 :         raw_spin_lock_irqsave(&kmemleak_lock, flags);
     457           3 :         list_add(&object->object_list, &mem_pool_free_list);
     458           3 :         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
     459             : }
     460             : 
     461             : /*
     462             :  * RCU callback to free a kmemleak_object.
     463             :  */
     464      638691 : static void free_object_rcu(struct rcu_head *rcu)
     465             : {
     466      638691 :         struct hlist_node *tmp;
     467      638691 :         struct kmemleak_scan_area *area;
     468      638691 :         struct kmemleak_object *object =
     469      638691 :                 container_of(rcu, struct kmemleak_object, rcu);
     470             : 
     471             :         /*
     472             :          * Once use_count is 0 (guaranteed by put_object), there is no other
     473             :          * code accessing this object, hence no need for locking.
     474             :          */
     475     1292624 :         hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
     476       12148 :                 hlist_del(&area->node);
     477       12148 :                 kmem_cache_free(scan_area_cache, area);
     478             :         }
     479      640238 :         mem_pool_free(object);
     480      639560 : }
     481             : 
     482             : /*
     483             :  * Decrement the object use_count. Once the count is 0, free the object using
     484             :  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
     485             :  * delete_object() path, the delayed RCU freeing ensures that there is no
     486             :  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
     487             :  * is also possible.
     488             :  */
     489      665432 : static void put_object(struct kmemleak_object *object)
     490             : {
     491     1330981 :         if (!atomic_dec_and_test(&object->use_count))
     492             :                 return;
     493             : 
     494             :         /* should only get here after delete_object was called */
     495      641141 :         WARN_ON(object->flags & OBJECT_ALLOCATED);
     496             : 
     497             :         /*
     498             :          * It may be too early for the RCU callbacks, however, there is no
     499             :          * concurrent object_list traversal when !object_cache and all objects
     500             :          * came from the memory pool. Free the object directly.
     501             :          */
     502      641141 :         if (object_cache)
     503      641138 :                 call_rcu(&object->rcu, free_object_rcu);
     504             :         else
     505           3 :                 free_object_rcu(&object->rcu);
     506             : }
     507             : 
     508             : /*
     509             :  * Look up an object in the object search tree and increase its use_count.
     510             :  */
     511       24406 : static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
     512             : {
     513       24406 :         unsigned long flags;
     514       24406 :         struct kmemleak_object *object;
     515             : 
     516       24406 :         rcu_read_lock();
     517       24406 :         raw_spin_lock_irqsave(&kmemleak_lock, flags);
     518       24406 :         object = lookup_object(ptr, alias);
     519       24406 :         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
     520             : 
     521             :         /* check whether the object is still available */
     522       24406 :         if (object && !get_object(object))
     523           0 :                 object = NULL;
     524       24406 :         rcu_read_unlock();
     525             : 
     526       24406 :         return object;
     527             : }
     528             : 
     529             : /*
     530             :  * Remove an object from the object_tree_root and object_list. Must be called
     531             :  * with the kmemleak_lock held _if_ kmemleak is still enabled.
     532             :  */
     533      641147 : static void __remove_object(struct kmemleak_object *object)
     534             : {
     535      641147 :         rb_erase(&object->rb_node, &object_tree_root);
     536      641147 :         list_del_rcu(&object->object_list);
     537      641147 : }
     538             : 
     539             : /*
     540             :  * Look up an object in the object search tree and remove it from both
     541             :  * object_tree_root and object_list. The returned object's use_count should be
     542             :  * at least 1, as initially set by create_object().
     543             :  */
     544      640997 : static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
     545             : {
     546      640997 :         unsigned long flags;
     547      640997 :         struct kmemleak_object *object;
     548             : 
     549      640997 :         raw_spin_lock_irqsave(&kmemleak_lock, flags);
     550      641151 :         object = lookup_object(ptr, alias);
     551      641151 :         if (object)
     552      641147 :                 __remove_object(object);
     553      641151 :         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
     554             : 
     555      641145 :         return object;
     556             : }
     557             : 
     558             : /*
     559             :  * Save stack trace to the given array of MAX_TRACE size.
     560             :  */
     561      720971 : static int __save_stack_trace(unsigned long *trace)
     562             : {
     563      720971 :         return stack_trace_save(trace, MAX_TRACE, 2);
     564             : }
     565             : 
     566             : /*
     567             :  * Create the metadata (struct kmemleak_object) corresponding to an allocated
     568             :  * memory block and add it to the object_list and object_tree_root.
     569             :  */
     570      721229 : static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
     571             :                                              int min_count, gfp_t gfp)
     572             : {
     573      721229 :         unsigned long flags;
     574      721229 :         struct kmemleak_object *object, *parent;
     575      721229 :         struct rb_node **link, *rb_parent;
     576      721229 :         unsigned long untagged_ptr;
     577             : 
     578      721229 :         object = mem_pool_alloc(gfp);
     579      721284 :         if (!object) {
     580           0 :                 pr_warn("Cannot allocate a kmemleak_object structure\n");
     581           0 :                 kmemleak_disable();
     582           0 :                 return NULL;
     583             :         }
     584             : 
     585      721284 :         INIT_LIST_HEAD(&object->object_list);
     586      721284 :         INIT_LIST_HEAD(&object->gray_list);
     587      721284 :         INIT_HLIST_HEAD(&object->area_list);
     588      721284 :         raw_spin_lock_init(&object->lock);
     589      721126 :         atomic_set(&object->use_count, 1);
     590      720971 :         object->flags = OBJECT_ALLOCATED;
     591      720971 :         object->pointer = ptr;
     592      720971 :         object->size = size;
     593      720971 :         object->excess_ref = 0;
     594      720971 :         object->min_count = min_count;
     595      720971 :         object->count = 0;                   /* white color initially */
     596      720971 :         object->jiffies = jiffies;
     597      720971 :         object->checksum = 0;
     598             : 
     599             :         /* task information */
     600      720971 :         if (in_irq()) {
     601           0 :                 object->pid = 0;
     602           0 :                 strncpy(object->comm, "hardirq", sizeof(object->comm));
     603      720971 :         } else if (in_serving_softirq()) {
     604        1609 :                 object->pid = 0;
     605        1609 :                 strncpy(object->comm, "softirq", sizeof(object->comm));
     606             :         } else {
     607      719362 :                 object->pid = current->pid;
     608             :                 /*
     609             :                  * There is a small chance of a race with set_task_comm(),
     610             :                  * however using get_task_comm() here may cause locking
     611             :                  * dependency issues with current->alloc_lock. In the worst
     612             :                  * case, the command line is not correct.
     613             :                  */
     614      719362 :                 strncpy(object->comm, current->comm, sizeof(object->comm));
     615             :         }
     616             : 
     617             :         /* kernel backtrace */
     618      720971 :         object->trace_len = __save_stack_trace(object->trace);
     619             : 
     620      721223 :         raw_spin_lock_irqsave(&kmemleak_lock, flags);
     621             : 
     622      721449 :         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
     623      721449 :         min_addr = min(min_addr, untagged_ptr);
     624      721449 :         max_addr = max(max_addr, untagged_ptr + size);
     625      721449 :         link = &object_tree_root.rb_node;
     626      721449 :         rb_parent = NULL;
     627    15960215 :         while (*link) {
     628    15238766 :                 rb_parent = *link;
     629    15238766 :                 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
     630    15238766 :                 if (ptr + size <= parent->pointer)
     631     5005027 :                         link = &parent->rb_node.rb_left;
     632    10233739 :                 else if (parent->pointer + parent->size <= ptr)
     633    10233739 :                         link = &parent->rb_node.rb_right;
     634             :                 else {
     635           0 :                         kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
     636             :                                       ptr);
     637             :                         /*
     638             :                          * No need for parent->lock here since "parent" cannot
     639             :                          * be freed while the kmemleak_lock is held.
     640             :                          */
     641           0 :                         dump_object_info(parent);
     642           0 :                         kmem_cache_free(object_cache, object);
     643           0 :                         object = NULL;
     644           0 :                         goto out;
     645             :                 }
     646             :         }
     647      721449 :         rb_link_node(&object->rb_node, rb_parent, link);
     648      721449 :         rb_insert_color(&object->rb_node, &object_tree_root);
     649             : 
     650      721449 :         list_add_tail_rcu(&object->object_list, &object_list);
     651      721449 : out:
     652      721449 :         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
     653      721449 :         return object;
     654             : }
     655             : 
     656             : /*
     657             :  * Mark the object as not allocated and schedule RCU freeing via put_object().
     658             :  */
     659      641140 : static void __delete_object(struct kmemleak_object *object)
     660             : {
     661      641140 :         unsigned long flags;
     662             : 
     663      641140 :         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
     664      641140 :         WARN_ON(atomic_read(&object->use_count) < 1);
     665             : 
     666             :         /*
     667             :          * Locking here also ensures that the corresponding memory block
     668             :          * cannot be freed when it is being scanned.
     669             :          */
     670      641140 :         raw_spin_lock_irqsave(&object->lock, flags);
     671      641131 :         object->flags &= ~OBJECT_ALLOCATED;
     672      641131 :         raw_spin_unlock_irqrestore(&object->lock, flags);
     673      641035 :         put_object(object);
     674      640816 : }
     675             : 
     676             : /*
     677             :  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
     678             :  * delete it.
     679             :  */
     680      640996 : static void delete_object_full(unsigned long ptr)
     681             : {
     682      640996 :         struct kmemleak_object *object;
     683             : 
     684      640996 :         object = find_and_remove_object(ptr, 0);
     685      641139 :         if (!object) {
     686             : #ifdef DEBUG
     687             :                 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
     688             :                               ptr);
     689             : #endif
     690             :                 return;
     691             :         }
     692      641139 :         __delete_object(object);
     693             : }
     694             : 
     695             : /*
     696             :  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
     697             :  * delete it. If the memory block is partially freed, the function may create
     698             :  * additional metadata for the remaining parts of the block.
     699             :  */
     700           6 : static void delete_object_part(unsigned long ptr, size_t size)
     701             : {
     702           6 :         struct kmemleak_object *object;
     703           6 :         unsigned long start, end;
     704             : 
     705           6 :         object = find_and_remove_object(ptr, 1);
     706           6 :         if (!object) {
     707             : #ifdef DEBUG
     708             :                 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
     709             :                               ptr, size);
     710             : #endif
     711             :                 return;
     712             :         }
     713             : 
     714             :         /*
     715             :          * Create one or two objects that may result from the memory block
     716             :          * split. Note that partial freeing is only done by free_bootmem() and
     717             :          * this happens before kmemleak_init() is called.
     718             :          */
     719           2 :         start = object->pointer;
     720           2 :         end = object->pointer + object->size;
     721           2 :         if (ptr > start)
     722           0 :                 create_object(start, ptr - start, object->min_count,
     723             :                               GFP_KERNEL);
     724           2 :         if (ptr + size < end)
     725           0 :                 create_object(ptr + size, end - ptr - size, object->min_count,
     726             :                               GFP_KERNEL);
     727             : 
     728           2 :         __delete_object(object);
     729             : }
     730             : 
     731           2 : static void __paint_it(struct kmemleak_object *object, int color)
     732             : {
     733           2 :         object->min_count = color;
     734           2 :         if (color == KMEMLEAK_BLACK)
     735           1 :                 object->flags |= OBJECT_NO_SCAN;
     736           0 : }
     737             : 
     738           2 : static void paint_it(struct kmemleak_object *object, int color)
     739             : {
     740           2 :         unsigned long flags;
     741             : 
     742           2 :         raw_spin_lock_irqsave(&object->lock, flags);
     743           2 :         __paint_it(object, color);
     744           2 :         raw_spin_unlock_irqrestore(&object->lock, flags);
     745           2 : }
     746             : 
     747           2 : static void paint_ptr(unsigned long ptr, int color)
     748             : {
     749           2 :         struct kmemleak_object *object;
     750             : 
     751           2 :         object = find_and_get_object(ptr, 0);
     752           2 :         if (!object) {
     753           0 :                 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
     754             :                               ptr,
     755             :                               (color == KMEMLEAK_GREY) ? "Grey" :
     756             :                               (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
     757           0 :                 return;
     758             :         }
     759           2 :         paint_it(object, color);
     760           2 :         put_object(object);
     761             : }
     762             : 
     763             : /*
     764             :  * Mark an object permanently as gray-colored so that it can no longer be
     765             :  * reported as a leak. This is used in general to mark a false positive.
     766             :  */
     767           1 : static void make_gray_object(unsigned long ptr)
     768             : {
     769           1 :         paint_ptr(ptr, KMEMLEAK_GREY);
     770           1 : }
     771             : 
     772             : /*
     773             :  * Mark the object as black-colored so that it is ignored from scans and
     774             :  * reporting.
     775             :  */
     776           1 : static void make_black_object(unsigned long ptr)
     777             : {
     778           1 :         paint_ptr(ptr, KMEMLEAK_BLACK);
     779           1 : }
     780             : 
     781             : /*
     782             :  * Add a scanning area to the object. If at least one such area is added,
     783             :  * kmemleak will only scan these ranges rather than the whole memory block.
     784             :  */
     785       12194 : static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
     786             : {
     787       12194 :         unsigned long flags;
     788       12194 :         struct kmemleak_object *object;
     789       12194 :         struct kmemleak_scan_area *area = NULL;
     790             : 
     791       12194 :         object = find_and_get_object(ptr, 1);
     792       12194 :         if (!object) {
     793           0 :                 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
     794             :                               ptr);
     795           0 :                 return;
     796             :         }
     797             : 
     798       12194 :         if (scan_area_cache)
     799       12194 :                 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
     800             : 
     801       12194 :         raw_spin_lock_irqsave(&object->lock, flags);
     802       12194 :         if (!area) {
     803           0 :                 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
     804             :                 /* mark the object for full scan to avoid false positives */
     805           0 :                 object->flags |= OBJECT_FULL_SCAN;
     806           0 :                 goto out_unlock;
     807             :         }
     808       12194 :         if (size == SIZE_MAX) {
     809       12194 :                 size = object->pointer + object->size - ptr;
     810           0 :         } else if (ptr + size > object->pointer + object->size) {
     811           0 :                 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
     812           0 :                 dump_object_info(object);
     813           0 :                 kmem_cache_free(scan_area_cache, area);
     814           0 :                 goto out_unlock;
     815             :         }
     816             : 
     817       12194 :         INIT_HLIST_NODE(&area->node);
     818       12194 :         area->start = ptr;
     819       12194 :         area->size = size;
     820             : 
     821       12194 :         hlist_add_head(&area->node, &object->area_list);
     822       12194 : out_unlock:
     823       12194 :         raw_spin_unlock_irqrestore(&object->lock, flags);
     824       12194 :         put_object(object);
     825             : }
     826             : 
     827             : /*
     828             :  * Any surplus references (object already gray) to 'ptr' are passed to
     829             :  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
     830             :  * vm_struct may be used as an alternative reference to the vmalloc'ed object
     831             :  * (see free_thread_stack()).
     832             :  */
     833       12191 : static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
     834             : {
     835       12191 :         unsigned long flags;
     836       12191 :         struct kmemleak_object *object;
     837             : 
     838       12191 :         object = find_and_get_object(ptr, 0);
     839       12191 :         if (!object) {
     840           0 :                 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
     841             :                               ptr);
     842           0 :                 return;
     843             :         }
     844             : 
     845       12191 :         raw_spin_lock_irqsave(&object->lock, flags);
     846       12191 :         object->excess_ref = excess_ref;
     847       12191 :         raw_spin_unlock_irqrestore(&object->lock, flags);
     848       12191 :         put_object(object);
     849             : }
     850             : 
     851             : /*
     852             :  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
     853             :  * pointer. Such object will not be scanned by kmemleak but references to it
     854             :  * are searched.
     855             :  */
     856           0 : static void object_no_scan(unsigned long ptr)
     857             : {
     858           0 :         unsigned long flags;
     859           0 :         struct kmemleak_object *object;
     860             : 
     861           0 :         object = find_and_get_object(ptr, 0);
     862           0 :         if (!object) {
     863           0 :                 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
     864           0 :                 return;
     865             :         }
     866             : 
     867           0 :         raw_spin_lock_irqsave(&object->lock, flags);
     868           0 :         object->flags |= OBJECT_NO_SCAN;
     869           0 :         raw_spin_unlock_irqrestore(&object->lock, flags);
     870           0 :         put_object(object);
     871             : }
     872             : 
     873             : /**
     874             :  * kmemleak_alloc - register a newly allocated object
     875             :  * @ptr:        pointer to beginning of the object
     876             :  * @size:       size of the object
     877             :  * @min_count:  minimum number of references to this object. If during memory
     878             :  *              scanning a number of references less than @min_count is found,
     879             :  *              the object is reported as a memory leak. If @min_count is 0,
     880             :  *              the object is never reported as a leak. If @min_count is -1,
     881             :  *              the object is ignored (not scanned and not reported as a leak)
     882             :  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
     883             :  *
     884             :  * This function is called from the kernel allocators when a new object
     885             :  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
     886             :  */
     887      700495 : void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
     888             :                           gfp_t gfp)
     889             : {
     890      700495 :         pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
     891             : 
     892      700495 :         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
     893      700506 :                 create_object((unsigned long)ptr, size, min_count, gfp);
     894      700652 : }
     895             : EXPORT_SYMBOL_GPL(kmemleak_alloc);
     896             : 
     897             : /**
     898             :  * kmemleak_alloc_percpu - register a newly allocated __percpu object
     899             :  * @ptr:        __percpu pointer to beginning of the object
     900             :  * @size:       size of the object
     901             :  * @gfp:        flags used for kmemleak internal memory allocations
     902             :  *
     903             :  * This function is called from the kernel percpu allocator when a new object
     904             :  * (memory block) is allocated (alloc_percpu).
     905             :  */
     906        2175 : void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
     907             :                                  gfp_t gfp)
     908             : {
     909        2175 :         unsigned int cpu;
     910             : 
     911        2175 :         pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
     912             : 
     913             :         /*
     914             :          * Percpu allocations are only scanned and not reported as leaks
     915             :          * (min_count is set to 0).
     916             :          */
     917        2175 :         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
     918       10875 :                 for_each_possible_cpu(cpu)
     919        8700 :                         create_object((unsigned long)per_cpu_ptr(ptr, cpu),
     920             :                                       size, 0, gfp);
     921        2175 : }
     922             : EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
     923             : 
     924             : /**
     925             :  * kmemleak_vmalloc - register a newly vmalloc'ed object
     926             :  * @area:       pointer to vm_struct
     927             :  * @size:       size of the object
     928             :  * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
     929             :  *
     930             :  * This function is called from the vmalloc() kernel allocator when a new
     931             :  * object (memory block) is allocated.
     932             :  */
     933       12225 : void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
     934             : {
     935       12225 :         pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
     936             : 
     937             :         /*
     938             :          * A min_count = 2 is needed because vm_struct contains a reference to
     939             :          * the virtual address of the vmalloc'ed block.
     940             :          */
     941       12225 :         if (kmemleak_enabled) {
     942       12225 :                 create_object((unsigned long)area->addr, size, 2, gfp);
     943       12225 :                 object_set_excess_ref((unsigned long)area,
     944       12225 :                                       (unsigned long)area->addr);
     945             :         }
     946       12225 : }
     947             : EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
     948             : 
     949             : /**
     950             :  * kmemleak_free - unregister a previously registered object
     951             :  * @ptr:        pointer to beginning of the object
     952             :  *
     953             :  * This function is called from the kernel allocators when an object (memory
     954             :  * block) is freed (kmem_cache_free, kfree, vfree etc.).
     955             :  */
     956      634752 : void __ref kmemleak_free(const void *ptr)
     957             : {
     958      634752 :         pr_debug("%s(0x%p)\n", __func__, ptr);
     959             : 
     960      634752 :         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
     961      634757 :                 delete_object_full((unsigned long)ptr);
     962      634579 : }
     963             : EXPORT_SYMBOL_GPL(kmemleak_free);
     964             : 
     965             : /**
     966             :  * kmemleak_free_part - partially unregister a previously registered object
     967             :  * @ptr:        pointer to the beginning or inside the object. This also
     968             :  *              represents the start of the range to be freed
     969             :  * @size:       size to be unregistered
     970             :  *
     971             :  * This function is called when only a part of a memory block is freed
     972             :  * (usually from the bootmem allocator).
     973             :  */
     974           6 : void __ref kmemleak_free_part(const void *ptr, size_t size)
     975             : {
     976           6 :         pr_debug("%s(0x%p)\n", __func__, ptr);
     977             : 
     978           6 :         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
     979           6 :                 delete_object_part((unsigned long)ptr, size);
     980           6 : }
     981             : EXPORT_SYMBOL_GPL(kmemleak_free_part);
     982             : 
     983             : /**
     984             :  * kmemleak_free_percpu - unregister a previously registered __percpu object
     985             :  * @ptr:        __percpu pointer to beginning of the object
     986             :  *
     987             :  * This function is called from the kernel percpu allocator when an object
     988             :  * (memory block) is freed (free_percpu).
     989             :  */
     990        1564 : void __ref kmemleak_free_percpu(const void __percpu *ptr)
     991             : {
     992        1564 :         unsigned int cpu;
     993             : 
     994        1564 :         pr_debug("%s(0x%p)\n", __func__, ptr);
     995             : 
     996        1564 :         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
     997        7820 :                 for_each_possible_cpu(cpu)
     998        6256 :                         delete_object_full((unsigned long)per_cpu_ptr(ptr,
     999             :                                                                       cpu));
    1000        1564 : }
    1001             : EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
    1002             : 
    1003             : /**
    1004             :  * kmemleak_update_trace - update object allocation stack trace
    1005             :  * @ptr:        pointer to beginning of the object
    1006             :  *
    1007             :  * Override the object allocation stack trace for cases where the actual
    1008             :  * allocation place is not always useful.
    1009             :  */
    1010           0 : void __ref kmemleak_update_trace(const void *ptr)
    1011             : {
    1012           0 :         struct kmemleak_object *object;
    1013           0 :         unsigned long flags;
    1014             : 
    1015           0 :         pr_debug("%s(0x%p)\n", __func__, ptr);
    1016             : 
    1017           0 :         if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
    1018             :                 return;
    1019             : 
    1020           0 :         object = find_and_get_object((unsigned long)ptr, 1);
    1021           0 :         if (!object) {
    1022             : #ifdef DEBUG
    1023             :                 kmemleak_warn("Updating stack trace for unknown object at %p\n",
    1024             :                               ptr);
    1025             : #endif
    1026             :                 return;
    1027             :         }
    1028             : 
    1029           0 :         raw_spin_lock_irqsave(&object->lock, flags);
    1030           0 :         object->trace_len = __save_stack_trace(object->trace);
    1031           0 :         raw_spin_unlock_irqrestore(&object->lock, flags);
    1032             : 
    1033           0 :         put_object(object);
    1034             : }
    1035             : EXPORT_SYMBOL(kmemleak_update_trace);
    1036             : 
    1037             : /**
    1038             :  * kmemleak_not_leak - mark an allocated object as false positive
    1039             :  * @ptr:        pointer to beginning of the object
    1040             :  *
    1041             :  * Calling this function on an object will cause the memory block to no longer
    1042             :  * be reported as leak and always be scanned.
    1043             :  */
    1044           1 : void __ref kmemleak_not_leak(const void *ptr)
    1045             : {
    1046           1 :         pr_debug("%s(0x%p)\n", __func__, ptr);
    1047             : 
    1048           1 :         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
    1049           1 :                 make_gray_object((unsigned long)ptr);
    1050           1 : }
    1051             : EXPORT_SYMBOL(kmemleak_not_leak);
    1052             : 
    1053             : /**
    1054             :  * kmemleak_ignore - ignore an allocated object
    1055             :  * @ptr:        pointer to beginning of the object
    1056             :  *
    1057             :  * Calling this function on an object will cause the memory block to be
    1058             :  * ignored (not scanned and not reported as a leak). This is usually done when
    1059             :  * it is known that the corresponding block is not a leak and does not contain
    1060             :  * any references to other allocated memory blocks.
    1061             :  */
    1062           1 : void __ref kmemleak_ignore(const void *ptr)
    1063             : {
    1064           1 :         pr_debug("%s(0x%p)\n", __func__, ptr);
    1065             : 
    1066           1 :         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
    1067           1 :                 make_black_object((unsigned long)ptr);
    1068           1 : }
    1069             : EXPORT_SYMBOL(kmemleak_ignore);
    1070             : 
    1071             : /**
    1072             :  * kmemleak_scan_area - limit the range to be scanned in an allocated object
    1073             :  * @ptr:        pointer to beginning or inside the object. This also
    1074             :  *              represents the start of the scan area
    1075             :  * @size:       size of the scan area
    1076             :  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
    1077             :  *
    1078             :  * This function is used when it is known that only certain parts of an object
    1079             :  * contain references to other objects. Kmemleak will only scan these areas
    1080             :  * reducing the number false negatives.
    1081             :  */
    1082       12220 : void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
    1083             : {
    1084       12220 :         pr_debug("%s(0x%p)\n", __func__, ptr);
    1085             : 
    1086       12220 :         if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
    1087       12220 :                 add_scan_area((unsigned long)ptr, size, gfp);
    1088       12220 : }
    1089             : EXPORT_SYMBOL(kmemleak_scan_area);
    1090             : 
    1091             : /**
    1092             :  * kmemleak_no_scan - do not scan an allocated object
    1093             :  * @ptr:        pointer to beginning of the object
    1094             :  *
    1095             :  * This function notifies kmemleak not to scan the given memory block. Useful
    1096             :  * in situations where it is known that the given object does not contain any
    1097             :  * references to other objects. Kmemleak will not scan such objects reducing
    1098             :  * the number of false negatives.
    1099             :  */
    1100           0 : void __ref kmemleak_no_scan(const void *ptr)
    1101             : {
    1102           0 :         pr_debug("%s(0x%p)\n", __func__, ptr);
    1103             : 
    1104           0 :         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
    1105           0 :                 object_no_scan((unsigned long)ptr);
    1106           0 : }
    1107             : EXPORT_SYMBOL(kmemleak_no_scan);
    1108             : 
    1109             : /**
    1110             :  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
    1111             :  *                       address argument
    1112             :  * @phys:       physical address of the object
    1113             :  * @size:       size of the object
    1114             :  * @min_count:  minimum number of references to this object.
    1115             :  *              See kmemleak_alloc()
    1116             :  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
    1117             :  */
    1118         362 : void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
    1119             :                                gfp_t gfp)
    1120             : {
    1121         362 :         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
    1122         362 :                 kmemleak_alloc(__va(phys), size, min_count, gfp);
    1123         362 : }
    1124             : EXPORT_SYMBOL(kmemleak_alloc_phys);
    1125             : 
    1126             : /**
    1127             :  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
    1128             :  *                           physical address argument
    1129             :  * @phys:       physical address if the beginning or inside an object. This
    1130             :  *              also represents the start of the range to be freed
    1131             :  * @size:       size to be unregistered
    1132             :  */
    1133           6 : void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
    1134             : {
    1135           6 :         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
    1136           6 :                 kmemleak_free_part(__va(phys), size);
    1137           6 : }
    1138             : EXPORT_SYMBOL(kmemleak_free_part_phys);
    1139             : 
    1140             : /**
    1141             :  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
    1142             :  *                          address argument
    1143             :  * @phys:       physical address of the object
    1144             :  */
    1145           0 : void __ref kmemleak_not_leak_phys(phys_addr_t phys)
    1146             : {
    1147           0 :         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
    1148           0 :                 kmemleak_not_leak(__va(phys));
    1149           0 : }
    1150             : EXPORT_SYMBOL(kmemleak_not_leak_phys);
    1151             : 
    1152             : /**
    1153             :  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
    1154             :  *                        address argument
    1155             :  * @phys:       physical address of the object
    1156             :  */
    1157           0 : void __ref kmemleak_ignore_phys(phys_addr_t phys)
    1158             : {
    1159           0 :         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
    1160           0 :                 kmemleak_ignore(__va(phys));
    1161           0 : }
    1162             : EXPORT_SYMBOL(kmemleak_ignore_phys);
    1163             : 
    1164             : /*
    1165             :  * Update an object's checksum and return true if it was modified.
    1166             :  */
    1167           0 : static bool update_checksum(struct kmemleak_object *object)
    1168             : {
    1169           0 :         u32 old_csum = object->checksum;
    1170             : 
    1171           0 :         kasan_disable_current();
    1172           0 :         kcsan_disable_current();
    1173           0 :         object->checksum = crc32(0, (void *)object->pointer, object->size);
    1174           0 :         kasan_enable_current();
    1175           0 :         kcsan_enable_current();
    1176             : 
    1177           0 :         return object->checksum != old_csum;
    1178             : }
    1179             : 
    1180             : /*
    1181             :  * Update an object's references. object->lock must be held by the caller.
    1182             :  */
    1183           0 : static void update_refs(struct kmemleak_object *object)
    1184             : {
    1185           0 :         if (!color_white(object)) {
    1186             :                 /* non-orphan, ignored or new */
    1187             :                 return;
    1188             :         }
    1189             : 
    1190             :         /*
    1191             :          * Increase the object's reference count (number of pointers to the
    1192             :          * memory block). If this count reaches the required minimum, the
    1193             :          * object's color will become gray and it will be added to the
    1194             :          * gray_list.
    1195             :          */
    1196           0 :         object->count++;
    1197           0 :         if (color_gray(object)) {
    1198             :                 /* put_object() called when removing from gray_list */
    1199           0 :                 WARN_ON(!get_object(object));
    1200           0 :                 list_add_tail(&object->gray_list, &gray_list);
    1201             :         }
    1202             : }
    1203             : 
    1204             : /*
    1205             :  * Memory scanning is a long process and it needs to be interruptable. This
    1206             :  * function checks whether such interrupt condition occurred.
    1207             :  */
    1208           0 : static int scan_should_stop(void)
    1209             : {
    1210           0 :         if (!kmemleak_enabled)
    1211             :                 return 1;
    1212             : 
    1213             :         /*
    1214             :          * This function may be called from either process or kthread context,
    1215             :          * hence the need to check for both stop conditions.
    1216             :          */
    1217           0 :         if (current->mm)
    1218           0 :                 return signal_pending(current);
    1219             :         else
    1220           0 :                 return kthread_should_stop();
    1221             : 
    1222             :         return 0;
    1223             : }
    1224             : 
    1225             : /*
    1226             :  * Scan a memory block (exclusive range) for valid pointers and add those
    1227             :  * found to the gray list.
    1228             :  */
    1229           0 : static void scan_block(void *_start, void *_end,
    1230             :                        struct kmemleak_object *scanned)
    1231             : {
    1232           0 :         unsigned long *ptr;
    1233           0 :         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
    1234           0 :         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
    1235           0 :         unsigned long flags;
    1236           0 :         unsigned long untagged_ptr;
    1237             : 
    1238           0 :         raw_spin_lock_irqsave(&kmemleak_lock, flags);
    1239           0 :         for (ptr = start; ptr < end; ptr++) {
    1240           0 :                 struct kmemleak_object *object;
    1241           0 :                 unsigned long pointer;
    1242           0 :                 unsigned long excess_ref;
    1243             : 
    1244           0 :                 if (scan_should_stop())
    1245             :                         break;
    1246             : 
    1247           0 :                 kasan_disable_current();
    1248           0 :                 pointer = *ptr;
    1249           0 :                 kasan_enable_current();
    1250             : 
    1251           0 :                 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
    1252           0 :                 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
    1253           0 :                         continue;
    1254             : 
    1255             :                 /*
    1256             :                  * No need for get_object() here since we hold kmemleak_lock.
    1257             :                  * object->use_count cannot be dropped to 0 while the object
    1258             :                  * is still present in object_tree_root and object_list
    1259             :                  * (with updates protected by kmemleak_lock).
    1260             :                  */
    1261           0 :                 object = lookup_object(pointer, 1);
    1262           0 :                 if (!object)
    1263           0 :                         continue;
    1264           0 :                 if (object == scanned)
    1265             :                         /* self referenced, ignore */
    1266           0 :                         continue;
    1267             : 
    1268             :                 /*
    1269             :                  * Avoid the lockdep recursive warning on object->lock being
    1270             :                  * previously acquired in scan_object(). These locks are
    1271             :                  * enclosed by scan_mutex.
    1272             :                  */
    1273           0 :                 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
    1274             :                 /* only pass surplus references (object already gray) */
    1275           0 :                 if (color_gray(object)) {
    1276           0 :                         excess_ref = object->excess_ref;
    1277             :                         /* no need for update_refs() if object already gray */
    1278             :                 } else {
    1279           0 :                         excess_ref = 0;
    1280           0 :                         update_refs(object);
    1281             :                 }
    1282           0 :                 raw_spin_unlock(&object->lock);
    1283             : 
    1284           0 :                 if (excess_ref) {
    1285           0 :                         object = lookup_object(excess_ref, 0);
    1286           0 :                         if (!object)
    1287           0 :                                 continue;
    1288           0 :                         if (object == scanned)
    1289             :                                 /* circular reference, ignore */
    1290           0 :                                 continue;
    1291           0 :                         raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
    1292           0 :                         update_refs(object);
    1293           0 :                         raw_spin_unlock(&object->lock);
    1294             :                 }
    1295             :         }
    1296           0 :         raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
    1297           0 : }
    1298             : 
    1299             : /*
    1300             :  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
    1301             :  */
    1302             : #ifdef CONFIG_SMP
    1303           0 : static void scan_large_block(void *start, void *end)
    1304             : {
    1305           0 :         void *next;
    1306             : 
    1307           0 :         while (start < end) {
    1308           0 :                 next = min(start + MAX_SCAN_SIZE, end);
    1309           0 :                 scan_block(start, next, NULL);
    1310           0 :                 start = next;
    1311           0 :                 cond_resched();
    1312             :         }
    1313           0 : }
    1314             : #endif
    1315             : 
    1316             : /*
    1317             :  * Scan a memory block corresponding to a kmemleak_object. A condition is
    1318             :  * that object->use_count >= 1.
    1319             :  */
    1320           0 : static void scan_object(struct kmemleak_object *object)
    1321             : {
    1322           0 :         struct kmemleak_scan_area *area;
    1323           0 :         unsigned long flags;
    1324             : 
    1325             :         /*
    1326             :          * Once the object->lock is acquired, the corresponding memory block
    1327             :          * cannot be freed (the same lock is acquired in delete_object).
    1328             :          */
    1329           0 :         raw_spin_lock_irqsave(&object->lock, flags);
    1330           0 :         if (object->flags & OBJECT_NO_SCAN)
    1331           0 :                 goto out;
    1332           0 :         if (!(object->flags & OBJECT_ALLOCATED))
    1333             :                 /* already freed object */
    1334           0 :                 goto out;
    1335           0 :         if (hlist_empty(&object->area_list) ||
    1336           0 :             object->flags & OBJECT_FULL_SCAN) {
    1337           0 :                 void *start = (void *)object->pointer;
    1338           0 :                 void *end = (void *)(object->pointer + object->size);
    1339           0 :                 void *next;
    1340             : 
    1341           0 :                 do {
    1342           0 :                         next = min(start + MAX_SCAN_SIZE, end);
    1343           0 :                         scan_block(start, next, object);
    1344             : 
    1345           0 :                         start = next;
    1346           0 :                         if (start >= end)
    1347             :                                 break;
    1348             : 
    1349           0 :                         raw_spin_unlock_irqrestore(&object->lock, flags);
    1350           0 :                         cond_resched();
    1351           0 :                         raw_spin_lock_irqsave(&object->lock, flags);
    1352           0 :                 } while (object->flags & OBJECT_ALLOCATED);
    1353             :         } else
    1354           0 :                 hlist_for_each_entry(area, &object->area_list, node)
    1355           0 :                         scan_block((void *)area->start,
    1356           0 :                                    (void *)(area->start + area->size),
    1357             :                                    object);
    1358           0 : out:
    1359           0 :         raw_spin_unlock_irqrestore(&object->lock, flags);
    1360           0 : }
    1361             : 
    1362             : /*
    1363             :  * Scan the objects already referenced (gray objects). More objects will be
    1364             :  * referenced and, if there are no memory leaks, all the objects are scanned.
    1365             :  */
    1366           0 : static void scan_gray_list(void)
    1367             : {
    1368           0 :         struct kmemleak_object *object, *tmp;
    1369             : 
    1370             :         /*
    1371             :          * The list traversal is safe for both tail additions and removals
    1372             :          * from inside the loop. The kmemleak objects cannot be freed from
    1373             :          * outside the loop because their use_count was incremented.
    1374             :          */
    1375           0 :         object = list_entry(gray_list.next, typeof(*object), gray_list);
    1376           0 :         while (&object->gray_list != &gray_list) {
    1377           0 :                 cond_resched();
    1378             : 
    1379             :                 /* may add new objects to the list */
    1380           0 :                 if (!scan_should_stop())
    1381           0 :                         scan_object(object);
    1382             : 
    1383           0 :                 tmp = list_entry(object->gray_list.next, typeof(*object),
    1384             :                                  gray_list);
    1385             : 
    1386             :                 /* remove the object from the list and release it */
    1387           0 :                 list_del(&object->gray_list);
    1388           0 :                 put_object(object);
    1389             : 
    1390           0 :                 object = tmp;
    1391             :         }
    1392           0 :         WARN_ON(!list_empty(&gray_list));
    1393           0 : }
    1394             : 
    1395             : /*
    1396             :  * Scan data sections and all the referenced memory blocks allocated via the
    1397             :  * kernel's standard allocators. This function must be called with the
    1398             :  * scan_mutex held.
    1399             :  */
    1400           0 : static void kmemleak_scan(void)
    1401             : {
    1402           0 :         unsigned long flags;
    1403           0 :         struct kmemleak_object *object;
    1404           0 :         int i;
    1405           0 :         int new_leaks = 0;
    1406             : 
    1407           0 :         jiffies_last_scan = jiffies;
    1408             : 
    1409             :         /* prepare the kmemleak_object's */
    1410           0 :         rcu_read_lock();
    1411           0 :         list_for_each_entry_rcu(object, &object_list, object_list) {
    1412           0 :                 raw_spin_lock_irqsave(&object->lock, flags);
    1413             : #ifdef DEBUG
    1414             :                 /*
    1415             :                  * With a few exceptions there should be a maximum of
    1416             :                  * 1 reference to any object at this point.
    1417             :                  */
    1418             :                 if (atomic_read(&object->use_count) > 1) {
    1419             :                         pr_debug("object->use_count = %d\n",
    1420             :                                  atomic_read(&object->use_count));
    1421             :                         dump_object_info(object);
    1422             :                 }
    1423             : #endif
    1424             :                 /* reset the reference count (whiten the object) */
    1425           0 :                 object->count = 0;
    1426           0 :                 if (color_gray(object) && get_object(object))
    1427           0 :                         list_add_tail(&object->gray_list, &gray_list);
    1428             : 
    1429           0 :                 raw_spin_unlock_irqrestore(&object->lock, flags);
    1430             :         }
    1431           0 :         rcu_read_unlock();
    1432             : 
    1433             : #ifdef CONFIG_SMP
    1434             :         /* per-cpu sections scanning */
    1435           0 :         for_each_possible_cpu(i)
    1436           0 :                 scan_large_block(__per_cpu_start + per_cpu_offset(i),
    1437           0 :                                  __per_cpu_end + per_cpu_offset(i));
    1438             : #endif
    1439             : 
    1440             :         /*
    1441             :          * Struct page scanning for each node.
    1442             :          */
    1443           0 :         get_online_mems();
    1444           0 :         for_each_online_node(i) {
    1445           0 :                 unsigned long start_pfn = node_start_pfn(i);
    1446           0 :                 unsigned long end_pfn = node_end_pfn(i);
    1447           0 :                 unsigned long pfn;
    1448             : 
    1449           0 :                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
    1450           0 :                         struct page *page = pfn_to_online_page(pfn);
    1451             : 
    1452           0 :                         if (!page)
    1453           0 :                                 continue;
    1454             : 
    1455             :                         /* only scan pages belonging to this node */
    1456           0 :                         if (page_to_nid(page) != i)
    1457           0 :                                 continue;
    1458             :                         /* only scan if page is in use */
    1459           0 :                         if (page_count(page) == 0)
    1460           0 :                                 continue;
    1461           0 :                         scan_block(page, page + 1, NULL);
    1462           0 :                         if (!(pfn & 63))
    1463           0 :                                 cond_resched();
    1464             :                 }
    1465             :         }
    1466           0 :         put_online_mems();
    1467             : 
    1468             :         /*
    1469             :          * Scanning the task stacks (may introduce false negatives).
    1470             :          */
    1471           0 :         if (kmemleak_stack_scan) {
    1472           0 :                 struct task_struct *p, *g;
    1473             : 
    1474           0 :                 rcu_read_lock();
    1475           0 :                 for_each_process_thread(g, p) {
    1476           0 :                         void *stack = try_get_task_stack(p);
    1477           0 :                         if (stack) {
    1478           0 :                                 scan_block(stack, stack + THREAD_SIZE, NULL);
    1479           0 :                                 put_task_stack(p);
    1480             :                         }
    1481             :                 }
    1482           0 :                 rcu_read_unlock();
    1483             :         }
    1484             : 
    1485             :         /*
    1486             :          * Scan the objects already referenced from the sections scanned
    1487             :          * above.
    1488             :          */
    1489           0 :         scan_gray_list();
    1490             : 
    1491             :         /*
    1492             :          * Check for new or unreferenced objects modified since the previous
    1493             :          * scan and color them gray until the next scan.
    1494             :          */
    1495           0 :         rcu_read_lock();
    1496           0 :         list_for_each_entry_rcu(object, &object_list, object_list) {
    1497           0 :                 raw_spin_lock_irqsave(&object->lock, flags);
    1498           0 :                 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
    1499           0 :                     && update_checksum(object) && get_object(object)) {
    1500             :                         /* color it gray temporarily */
    1501           0 :                         object->count = object->min_count;
    1502           0 :                         list_add_tail(&object->gray_list, &gray_list);
    1503             :                 }
    1504           0 :                 raw_spin_unlock_irqrestore(&object->lock, flags);
    1505             :         }
    1506           0 :         rcu_read_unlock();
    1507             : 
    1508             :         /*
    1509             :          * Re-scan the gray list for modified unreferenced objects.
    1510             :          */
    1511           0 :         scan_gray_list();
    1512             : 
    1513             :         /*
    1514             :          * If scanning was stopped do not report any new unreferenced objects.
    1515             :          */
    1516           0 :         if (scan_should_stop())
    1517             :                 return;
    1518             : 
    1519             :         /*
    1520             :          * Scanning result reporting.
    1521             :          */
    1522           0 :         rcu_read_lock();
    1523           0 :         list_for_each_entry_rcu(object, &object_list, object_list) {
    1524           0 :                 raw_spin_lock_irqsave(&object->lock, flags);
    1525           0 :                 if (unreferenced_object(object) &&
    1526           0 :                     !(object->flags & OBJECT_REPORTED)) {
    1527           0 :                         object->flags |= OBJECT_REPORTED;
    1528             : 
    1529           0 :                         if (kmemleak_verbose)
    1530           0 :                                 print_unreferenced(NULL, object);
    1531             : 
    1532           0 :                         new_leaks++;
    1533             :                 }
    1534           0 :                 raw_spin_unlock_irqrestore(&object->lock, flags);
    1535             :         }
    1536           0 :         rcu_read_unlock();
    1537             : 
    1538           0 :         if (new_leaks) {
    1539           0 :                 kmemleak_found_leaks = true;
    1540             : 
    1541           0 :                 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
    1542             :                         new_leaks);
    1543             :         }
    1544             : 
    1545             : }
    1546             : 
    1547             : /*
    1548             :  * Thread function performing automatic memory scanning. Unreferenced objects
    1549             :  * at the end of a memory scan are reported but only the first time.
    1550             :  */
    1551           0 : static int kmemleak_scan_thread(void *arg)
    1552             : {
    1553           0 :         static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
    1554             : 
    1555           0 :         pr_info("Automatic memory scanning thread started\n");
    1556           0 :         set_user_nice(current, 10);
    1557             : 
    1558             :         /*
    1559             :          * Wait before the first scan to allow the system to fully initialize.
    1560             :          */
    1561           0 :         if (first_run) {
    1562           0 :                 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
    1563           0 :                 first_run = 0;
    1564           0 :                 while (timeout && !kthread_should_stop())
    1565           0 :                         timeout = schedule_timeout_interruptible(timeout);
    1566             :         }
    1567             : 
    1568           0 :         while (!kthread_should_stop()) {
    1569           0 :                 signed long timeout = jiffies_scan_wait;
    1570             : 
    1571           0 :                 mutex_lock(&scan_mutex);
    1572           0 :                 kmemleak_scan();
    1573           0 :                 mutex_unlock(&scan_mutex);
    1574             : 
    1575             :                 /* wait before the next scan */
    1576           0 :                 while (timeout && !kthread_should_stop())
    1577           0 :                         timeout = schedule_timeout_interruptible(timeout);
    1578             :         }
    1579             : 
    1580           0 :         pr_info("Automatic memory scanning thread ended\n");
    1581             : 
    1582           0 :         return 0;
    1583             : }
    1584             : 
    1585             : /*
    1586             :  * Start the automatic memory scanning thread. This function must be called
    1587             :  * with the scan_mutex held.
    1588             :  */
    1589           0 : static void start_scan_thread(void)
    1590             : {
    1591           0 :         if (scan_thread)
    1592             :                 return;
    1593           0 :         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
    1594           0 :         if (IS_ERR(scan_thread)) {
    1595           0 :                 pr_warn("Failed to create the scan thread\n");
    1596           0 :                 scan_thread = NULL;
    1597             :         }
    1598             : }
    1599             : 
    1600             : /*
    1601             :  * Stop the automatic memory scanning thread.
    1602             :  */
    1603           0 : static void stop_scan_thread(void)
    1604             : {
    1605           0 :         if (scan_thread) {
    1606           0 :                 kthread_stop(scan_thread);
    1607           0 :                 scan_thread = NULL;
    1608             :         }
    1609           0 : }
    1610             : 
    1611             : /*
    1612             :  * Iterate over the object_list and return the first valid object at or after
    1613             :  * the required position with its use_count incremented. The function triggers
    1614             :  * a memory scanning when the pos argument points to the first position.
    1615             :  */
    1616           0 : static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
    1617             : {
    1618           0 :         struct kmemleak_object *object;
    1619           0 :         loff_t n = *pos;
    1620           0 :         int err;
    1621             : 
    1622           0 :         err = mutex_lock_interruptible(&scan_mutex);
    1623           0 :         if (err < 0)
    1624           0 :                 return ERR_PTR(err);
    1625             : 
    1626           0 :         rcu_read_lock();
    1627           0 :         list_for_each_entry_rcu(object, &object_list, object_list) {
    1628           0 :                 if (n-- > 0)
    1629           0 :                         continue;
    1630           0 :                 if (get_object(object))
    1631           0 :                         goto out;
    1632             :         }
    1633             :         object = NULL;
    1634             : out:
    1635             :         return object;
    1636             : }
    1637             : 
    1638             : /*
    1639             :  * Return the next object in the object_list. The function decrements the
    1640             :  * use_count of the previous object and increases that of the next one.
    1641             :  */
    1642           0 : static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
    1643             : {
    1644           0 :         struct kmemleak_object *prev_obj = v;
    1645           0 :         struct kmemleak_object *next_obj = NULL;
    1646           0 :         struct kmemleak_object *obj = prev_obj;
    1647             : 
    1648           0 :         ++(*pos);
    1649             : 
    1650           0 :         list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
    1651           0 :                 if (get_object(obj)) {
    1652             :                         next_obj = obj;
    1653             :                         break;
    1654             :                 }
    1655             :         }
    1656             : 
    1657           0 :         put_object(prev_obj);
    1658           0 :         return next_obj;
    1659             : }
    1660             : 
    1661             : /*
    1662             :  * Decrement the use_count of the last object required, if any.
    1663             :  */
    1664           0 : static void kmemleak_seq_stop(struct seq_file *seq, void *v)
    1665             : {
    1666           0 :         if (!IS_ERR(v)) {
    1667             :                 /*
    1668             :                  * kmemleak_seq_start may return ERR_PTR if the scan_mutex
    1669             :                  * waiting was interrupted, so only release it if !IS_ERR.
    1670             :                  */
    1671           0 :                 rcu_read_unlock();
    1672           0 :                 mutex_unlock(&scan_mutex);
    1673           0 :                 if (v)
    1674           0 :                         put_object(v);
    1675             :         }
    1676           0 : }
    1677             : 
    1678             : /*
    1679             :  * Print the information for an unreferenced object to the seq file.
    1680             :  */
    1681           0 : static int kmemleak_seq_show(struct seq_file *seq, void *v)
    1682             : {
    1683           0 :         struct kmemleak_object *object = v;
    1684           0 :         unsigned long flags;
    1685             : 
    1686           0 :         raw_spin_lock_irqsave(&object->lock, flags);
    1687           0 :         if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
    1688           0 :                 print_unreferenced(seq, object);
    1689           0 :         raw_spin_unlock_irqrestore(&object->lock, flags);
    1690           0 :         return 0;
    1691             : }
    1692             : 
    1693             : static const struct seq_operations kmemleak_seq_ops = {
    1694             :         .start = kmemleak_seq_start,
    1695             :         .next  = kmemleak_seq_next,
    1696             :         .stop  = kmemleak_seq_stop,
    1697             :         .show  = kmemleak_seq_show,
    1698             : };
    1699             : 
    1700           0 : static int kmemleak_open(struct inode *inode, struct file *file)
    1701             : {
    1702           0 :         return seq_open(file, &kmemleak_seq_ops);
    1703             : }
    1704             : 
    1705           0 : static int dump_str_object_info(const char *str)
    1706             : {
    1707           0 :         unsigned long flags;
    1708           0 :         struct kmemleak_object *object;
    1709           0 :         unsigned long addr;
    1710             : 
    1711           0 :         if (kstrtoul(str, 0, &addr))
    1712             :                 return -EINVAL;
    1713           0 :         object = find_and_get_object(addr, 0);
    1714           0 :         if (!object) {
    1715           0 :                 pr_info("Unknown object at 0x%08lx\n", addr);
    1716           0 :                 return -EINVAL;
    1717             :         }
    1718             : 
    1719           0 :         raw_spin_lock_irqsave(&object->lock, flags);
    1720           0 :         dump_object_info(object);
    1721           0 :         raw_spin_unlock_irqrestore(&object->lock, flags);
    1722             : 
    1723           0 :         put_object(object);
    1724           0 :         return 0;
    1725             : }
    1726             : 
    1727             : /*
    1728             :  * We use grey instead of black to ensure we can do future scans on the same
    1729             :  * objects. If we did not do future scans these black objects could
    1730             :  * potentially contain references to newly allocated objects in the future and
    1731             :  * we'd end up with false positives.
    1732             :  */
    1733           0 : static void kmemleak_clear(void)
    1734             : {
    1735           0 :         struct kmemleak_object *object;
    1736           0 :         unsigned long flags;
    1737             : 
    1738           0 :         rcu_read_lock();
    1739           0 :         list_for_each_entry_rcu(object, &object_list, object_list) {
    1740           0 :                 raw_spin_lock_irqsave(&object->lock, flags);
    1741           0 :                 if ((object->flags & OBJECT_REPORTED) &&
    1742           0 :                     unreferenced_object(object))
    1743           0 :                         __paint_it(object, KMEMLEAK_GREY);
    1744           0 :                 raw_spin_unlock_irqrestore(&object->lock, flags);
    1745             :         }
    1746           0 :         rcu_read_unlock();
    1747             : 
    1748           0 :         kmemleak_found_leaks = false;
    1749           0 : }
    1750             : 
    1751             : static void __kmemleak_do_cleanup(void);
    1752             : 
    1753             : /*
    1754             :  * File write operation to configure kmemleak at run-time. The following
    1755             :  * commands can be written to the /sys/kernel/debug/kmemleak file:
    1756             :  *   off        - disable kmemleak (irreversible)
    1757             :  *   stack=on   - enable the task stacks scanning
    1758             :  *   stack=off  - disable the tasks stacks scanning
    1759             :  *   scan=on    - start the automatic memory scanning thread
    1760             :  *   scan=off   - stop the automatic memory scanning thread
    1761             :  *   scan=...   - set the automatic memory scanning period in seconds (0 to
    1762             :  *                disable it)
    1763             :  *   scan       - trigger a memory scan
    1764             :  *   clear      - mark all current reported unreferenced kmemleak objects as
    1765             :  *                grey to ignore printing them, or free all kmemleak objects
    1766             :  *                if kmemleak has been disabled.
    1767             :  *   dump=...   - dump information about the object found at the given address
    1768             :  */
    1769           0 : static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
    1770             :                               size_t size, loff_t *ppos)
    1771             : {
    1772           0 :         char buf[64];
    1773           0 :         int buf_size;
    1774           0 :         int ret;
    1775             : 
    1776           0 :         buf_size = min(size, (sizeof(buf) - 1));
    1777           0 :         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
    1778             :                 return -EFAULT;
    1779           0 :         buf[buf_size] = 0;
    1780             : 
    1781           0 :         ret = mutex_lock_interruptible(&scan_mutex);
    1782           0 :         if (ret < 0)
    1783           0 :                 return ret;
    1784             : 
    1785           0 :         if (strncmp(buf, "clear", 5) == 0) {
    1786           0 :                 if (kmemleak_enabled)
    1787           0 :                         kmemleak_clear();
    1788             :                 else
    1789           0 :                         __kmemleak_do_cleanup();
    1790           0 :                 goto out;
    1791             :         }
    1792             : 
    1793           0 :         if (!kmemleak_enabled) {
    1794           0 :                 ret = -EPERM;
    1795           0 :                 goto out;
    1796             :         }
    1797             : 
    1798           0 :         if (strncmp(buf, "off", 3) == 0)
    1799           0 :                 kmemleak_disable();
    1800           0 :         else if (strncmp(buf, "stack=on", 8) == 0)
    1801           0 :                 kmemleak_stack_scan = 1;
    1802           0 :         else if (strncmp(buf, "stack=off", 9) == 0)
    1803           0 :                 kmemleak_stack_scan = 0;
    1804           0 :         else if (strncmp(buf, "scan=on", 7) == 0)
    1805           0 :                 start_scan_thread();
    1806           0 :         else if (strncmp(buf, "scan=off", 8) == 0)
    1807           0 :                 stop_scan_thread();
    1808           0 :         else if (strncmp(buf, "scan=", 5) == 0) {
    1809           0 :                 unsigned long secs;
    1810             : 
    1811           0 :                 ret = kstrtoul(buf + 5, 0, &secs);
    1812           0 :                 if (ret < 0)
    1813           0 :                         goto out;
    1814           0 :                 stop_scan_thread();
    1815           0 :                 if (secs) {
    1816           0 :                         jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
    1817           0 :                         start_scan_thread();
    1818             :                 }
    1819           0 :         } else if (strncmp(buf, "scan", 4) == 0)
    1820           0 :                 kmemleak_scan();
    1821           0 :         else if (strncmp(buf, "dump=", 5) == 0)
    1822           0 :                 ret = dump_str_object_info(buf + 5);
    1823             :         else
    1824             :                 ret = -EINVAL;
    1825             : 
    1826           0 : out:
    1827           0 :         mutex_unlock(&scan_mutex);
    1828           0 :         if (ret < 0)
    1829           0 :                 return ret;
    1830             : 
    1831             :         /* ignore the rest of the buffer, only one command at a time */
    1832           0 :         *ppos += size;
    1833           0 :         return size;
    1834             : }
    1835             : 
    1836             : static const struct file_operations kmemleak_fops = {
    1837             :         .owner          = THIS_MODULE,
    1838             :         .open           = kmemleak_open,
    1839             :         .read           = seq_read,
    1840             :         .write          = kmemleak_write,
    1841             :         .llseek         = seq_lseek,
    1842             :         .release        = seq_release,
    1843             : };
    1844             : 
    1845           0 : static void __kmemleak_do_cleanup(void)
    1846             : {
    1847           0 :         struct kmemleak_object *object, *tmp;
    1848             : 
    1849             :         /*
    1850             :          * Kmemleak has already been disabled, no need for RCU list traversal
    1851             :          * or kmemleak_lock held.
    1852             :          */
    1853           0 :         list_for_each_entry_safe(object, tmp, &object_list, object_list) {
    1854           0 :                 __remove_object(object);
    1855           0 :                 __delete_object(object);
    1856             :         }
    1857           0 : }
    1858             : 
    1859             : /*
    1860             :  * Stop the memory scanning thread and free the kmemleak internal objects if
    1861             :  * no previous scan thread (otherwise, kmemleak may still have some useful
    1862             :  * information on memory leaks).
    1863             :  */
    1864           0 : static void kmemleak_do_cleanup(struct work_struct *work)
    1865             : {
    1866           0 :         stop_scan_thread();
    1867             : 
    1868           0 :         mutex_lock(&scan_mutex);
    1869             :         /*
    1870             :          * Once it is made sure that kmemleak_scan has stopped, it is safe to no
    1871             :          * longer track object freeing. Ordering of the scan thread stopping and
    1872             :          * the memory accesses below is guaranteed by the kthread_stop()
    1873             :          * function.
    1874             :          */
    1875           0 :         kmemleak_free_enabled = 0;
    1876           0 :         mutex_unlock(&scan_mutex);
    1877             : 
    1878           0 :         if (!kmemleak_found_leaks)
    1879           0 :                 __kmemleak_do_cleanup();
    1880             :         else
    1881           0 :                 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
    1882           0 : }
    1883             : 
    1884             : static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
    1885             : 
    1886             : /*
    1887             :  * Disable kmemleak. No memory allocation/freeing will be traced once this
    1888             :  * function is called. Disabling kmemleak is an irreversible operation.
    1889             :  */
    1890           0 : static void kmemleak_disable(void)
    1891             : {
    1892             :         /* atomically check whether it was already invoked */
    1893           0 :         if (cmpxchg(&kmemleak_error, 0, 1))
    1894             :                 return;
    1895             : 
    1896             :         /* stop any memory operation tracing */
    1897           0 :         kmemleak_enabled = 0;
    1898             : 
    1899             :         /* check whether it is too early for a kernel thread */
    1900           0 :         if (kmemleak_initialized)
    1901           0 :                 schedule_work(&cleanup_work);
    1902             :         else
    1903           0 :                 kmemleak_free_enabled = 0;
    1904             : 
    1905           0 :         pr_info("Kernel memory leak detector disabled\n");
    1906             : }
    1907             : 
    1908             : /*
    1909             :  * Allow boot-time kmemleak disabling (enabled by default).
    1910             :  */
    1911           0 : static int __init kmemleak_boot_config(char *str)
    1912             : {
    1913           0 :         if (!str)
    1914             :                 return -EINVAL;
    1915           0 :         if (strcmp(str, "off") == 0)
    1916           0 :                 kmemleak_disable();
    1917           0 :         else if (strcmp(str, "on") == 0)
    1918           0 :                 kmemleak_skip_disable = 1;
    1919             :         else
    1920             :                 return -EINVAL;
    1921             :         return 0;
    1922             : }
    1923             : early_param("kmemleak", kmemleak_boot_config);
    1924             : 
    1925             : /*
    1926             :  * Kmemleak initialization.
    1927             :  */
    1928           1 : void __init kmemleak_init(void)
    1929             : {
    1930             : #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
    1931             :         if (!kmemleak_skip_disable) {
    1932             :                 kmemleak_disable();
    1933             :                 return;
    1934             :         }
    1935             : #endif
    1936             : 
    1937           1 :         if (kmemleak_error)
    1938             :                 return;
    1939             : 
    1940           1 :         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
    1941           1 :         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
    1942             : 
    1943           1 :         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
    1944           1 :         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
    1945             : 
    1946             :         /* register the data/bss sections */
    1947           1 :         create_object((unsigned long)_sdata, _edata - _sdata,
    1948             :                       KMEMLEAK_GREY, GFP_ATOMIC);
    1949           1 :         create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
    1950             :                       KMEMLEAK_GREY, GFP_ATOMIC);
    1951             :         /* only register .data..ro_after_init if not within .data */
    1952           1 :         if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
    1953           1 :                 create_object((unsigned long)__start_ro_after_init,
    1954           1 :                               __end_ro_after_init - __start_ro_after_init,
    1955             :                               KMEMLEAK_GREY, GFP_ATOMIC);
    1956             : }
    1957             : 
    1958             : /*
    1959             :  * Late initialization function.
    1960             :  */
    1961           1 : static int __init kmemleak_late_init(void)
    1962             : {
    1963           1 :         kmemleak_initialized = 1;
    1964             : 
    1965           1 :         debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
    1966             : 
    1967           1 :         if (kmemleak_error) {
    1968             :                 /*
    1969             :                  * Some error occurred and kmemleak was disabled. There is a
    1970             :                  * small chance that kmemleak_disable() was called immediately
    1971             :                  * after setting kmemleak_initialized and we may end up with
    1972             :                  * two clean-up threads but serialized by scan_mutex.
    1973             :                  */
    1974           0 :                 schedule_work(&cleanup_work);
    1975           0 :                 return -ENOMEM;
    1976             :         }
    1977             : 
    1978           1 :         if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
    1979             :                 mutex_lock(&scan_mutex);
    1980             :                 start_scan_thread();
    1981             :                 mutex_unlock(&scan_mutex);
    1982             :         }
    1983             : 
    1984           1 :         pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
    1985             :                 mem_pool_free_count);
    1986             : 
    1987           1 :         return 0;
    1988             : }
    1989             : late_initcall(kmemleak_late_init);

Generated by: LCOV version 1.14