LCOV - code coverage report
Current view: top level - mm - slab_common.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 177 384 46.1 %
Date: 2021-04-22 12:43:58 Functions: 20 40 50.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Slab allocator functions that are independent of the allocator strategy
       4             :  *
       5             :  * (C) 2012 Christoph Lameter <cl@linux.com>
       6             :  */
       7             : #include <linux/slab.h>
       8             : 
       9             : #include <linux/mm.h>
      10             : #include <linux/poison.h>
      11             : #include <linux/interrupt.h>
      12             : #include <linux/memory.h>
      13             : #include <linux/cache.h>
      14             : #include <linux/compiler.h>
      15             : #include <linux/kfence.h>
      16             : #include <linux/module.h>
      17             : #include <linux/cpu.h>
      18             : #include <linux/uaccess.h>
      19             : #include <linux/seq_file.h>
      20             : #include <linux/proc_fs.h>
      21             : #include <linux/debugfs.h>
      22             : #include <linux/kasan.h>
      23             : #include <asm/cacheflush.h>
      24             : #include <asm/tlbflush.h>
      25             : #include <asm/page.h>
      26             : #include <linux/memcontrol.h>
      27             : 
      28             : #define CREATE_TRACE_POINTS
      29             : #include <trace/events/kmem.h>
      30             : 
      31             : #include "internal.h"
      32             : 
      33             : #include "slab.h"
      34             : 
      35             : enum slab_state slab_state;
      36             : LIST_HEAD(slab_caches);
      37             : DEFINE_MUTEX(slab_mutex);
      38             : struct kmem_cache *kmem_cache;
      39             : 
      40             : #ifdef CONFIG_HARDENED_USERCOPY
      41             : bool usercopy_fallback __ro_after_init =
      42             :                 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
      43             : module_param(usercopy_fallback, bool, 0400);
      44             : MODULE_PARM_DESC(usercopy_fallback,
      45             :                 "WARN instead of reject usercopy whitelist violations");
      46             : #endif
      47             : 
      48             : static LIST_HEAD(slab_caches_to_rcu_destroy);
      49             : static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
      50             : static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
      51             :                     slab_caches_to_rcu_destroy_workfn);
      52             : 
      53             : /*
      54             :  * Set of flags that will prevent slab merging
      55             :  */
      56             : #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
      57             :                 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
      58             :                 SLAB_FAILSLAB | kasan_never_merge())
      59             : 
      60             : #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
      61             :                          SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
      62             : 
      63             : /*
      64             :  * Merge control. If this is set then no merging of slab caches will occur.
      65             :  */
      66             : static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
      67             : 
      68           0 : static int __init setup_slab_nomerge(char *str)
      69             : {
      70           0 :         slab_nomerge = true;
      71           0 :         return 1;
      72             : }
      73             : 
      74             : #ifdef CONFIG_SLUB
      75             : __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
      76             : #endif
      77             : 
      78             : __setup("slab_nomerge", setup_slab_nomerge);
      79             : 
      80             : /*
      81             :  * Determine the size of a slab object
      82             :  */
      83           0 : unsigned int kmem_cache_size(struct kmem_cache *s)
      84             : {
      85           0 :         return s->object_size;
      86             : }
      87             : EXPORT_SYMBOL(kmem_cache_size);
      88             : 
      89             : #ifdef CONFIG_DEBUG_VM
      90         117 : static int kmem_cache_sanity_check(const char *name, unsigned int size)
      91             : {
      92         117 :         if (!name || in_interrupt() || size < sizeof(void *) ||
      93             :                 size > KMALLOC_MAX_SIZE) {
      94           0 :                 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
      95           0 :                 return -EINVAL;
      96             :         }
      97             : 
      98         117 :         WARN_ON(strchr(name, ' '));     /* It confuses parsers */
      99             :         return 0;
     100             : }
     101             : #else
     102             : static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
     103             : {
     104             :         return 0;
     105             : }
     106             : #endif
     107             : 
     108           0 : void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
     109             : {
     110           0 :         size_t i;
     111             : 
     112           0 :         for (i = 0; i < nr; i++) {
     113           0 :                 if (s)
     114           0 :                         kmem_cache_free(s, p[i]);
     115             :                 else
     116           0 :                         kfree(p[i]);
     117             :         }
     118           0 : }
     119             : 
     120           0 : int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
     121             :                                                                 void **p)
     122             : {
     123           0 :         size_t i;
     124             : 
     125           0 :         for (i = 0; i < nr; i++) {
     126           0 :                 void *x = p[i] = kmem_cache_alloc(s, flags);
     127           0 :                 if (!x) {
     128           0 :                         __kmem_cache_free_bulk(s, i, p);
     129           0 :                         return 0;
     130             :                 }
     131             :         }
     132           0 :         return i;
     133             : }
     134             : 
     135             : /*
     136             :  * Figure out what the alignment of the objects will be given a set of
     137             :  * flags, a user specified alignment and the size of the objects.
     138             :  */
     139         145 : static unsigned int calculate_alignment(slab_flags_t flags,
     140             :                 unsigned int align, unsigned int size)
     141             : {
     142             :         /*
     143             :          * If the user wants hardware cache aligned objects then follow that
     144             :          * suggestion if the object is sufficiently large.
     145             :          *
     146             :          * The hardware cache alignment cannot override the specified
     147             :          * alignment though. If that is greater then use it.
     148             :          */
     149         145 :         if (flags & SLAB_HWCACHE_ALIGN) {
     150          44 :                 unsigned int ralign;
     151             : 
     152          44 :                 ralign = cache_line_size();
     153          45 :                 while (size <= ralign / 2)
     154             :                         ralign /= 2;
     155          44 :                 align = max(align, ralign);
     156             :         }
     157             : 
     158         145 :         if (align < ARCH_SLAB_MINALIGN)
     159             :                 align = ARCH_SLAB_MINALIGN;
     160             : 
     161         145 :         return ALIGN(align, sizeof(void *));
     162             : }
     163             : 
     164             : /*
     165             :  * Find a mergeable slab cache
     166             :  */
     167         145 : int slab_unmergeable(struct kmem_cache *s)
     168             : {
     169         145 :         if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
     170         145 :                 return 1;
     171             : 
     172           0 :         if (s->ctor)
     173             :                 return 1;
     174             : 
     175           0 :         if (s->usersize)
     176             :                 return 1;
     177             : 
     178             :         /*
     179             :          * We may have set a slab to be unmergeable during bootstrap.
     180             :          */
     181           0 :         if (s->refcount < 0)
     182           0 :                 return 1;
     183             : 
     184             :         return 0;
     185             : }
     186             : 
     187         109 : struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
     188             :                 slab_flags_t flags, const char *name, void (*ctor)(void *))
     189             : {
     190         109 :         struct kmem_cache *s;
     191             : 
     192         109 :         if (slab_nomerge)
     193             :                 return NULL;
     194             : 
     195           0 :         if (ctor)
     196             :                 return NULL;
     197             : 
     198           0 :         size = ALIGN(size, sizeof(void *));
     199           0 :         align = calculate_alignment(flags, align, size);
     200           0 :         size = ALIGN(size, align);
     201           0 :         flags = kmem_cache_flags(size, flags, name);
     202             : 
     203           0 :         if (flags & SLAB_NEVER_MERGE)
     204             :                 return NULL;
     205             : 
     206           0 :         list_for_each_entry_reverse(s, &slab_caches, list) {
     207           0 :                 if (slab_unmergeable(s))
     208           0 :                         continue;
     209             : 
     210           0 :                 if (size > s->size)
     211           0 :                         continue;
     212             : 
     213           0 :                 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
     214           0 :                         continue;
     215             :                 /*
     216             :                  * Check if alignment is compatible.
     217             :                  * Courtesy of Adrian Drzewiecki
     218             :                  */
     219           0 :                 if ((s->size & ~(align - 1)) != s->size)
     220           0 :                         continue;
     221             : 
     222           0 :                 if (s->size - size >= sizeof(void *))
     223           0 :                         continue;
     224             : 
     225             :                 if (IS_ENABLED(CONFIG_SLAB) && align &&
     226             :                         (align > s->align || s->align % align))
     227             :                         continue;
     228             : 
     229             :                 return s;
     230             :         }
     231             :         return NULL;
     232             : }
     233             : 
     234         117 : static struct kmem_cache *create_cache(const char *name,
     235             :                 unsigned int object_size, unsigned int align,
     236             :                 slab_flags_t flags, unsigned int useroffset,
     237             :                 unsigned int usersize, void (*ctor)(void *),
     238             :                 struct kmem_cache *root_cache)
     239             : {
     240         117 :         struct kmem_cache *s;
     241         117 :         int err;
     242             : 
     243         117 :         if (WARN_ON(useroffset + usersize > object_size))
     244           0 :                 useroffset = usersize = 0;
     245             : 
     246         117 :         err = -ENOMEM;
     247         117 :         s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
     248         117 :         if (!s)
     249           0 :                 goto out;
     250             : 
     251         117 :         s->name = name;
     252         117 :         s->size = s->object_size = object_size;
     253         117 :         s->align = align;
     254         117 :         s->ctor = ctor;
     255         117 :         s->useroffset = useroffset;
     256         117 :         s->usersize = usersize;
     257             : 
     258         117 :         err = __kmem_cache_create(s, flags);
     259         117 :         if (err)
     260           0 :                 goto out_free_cache;
     261             : 
     262         117 :         s->refcount = 1;
     263         117 :         list_add(&s->list, &slab_caches);
     264         117 : out:
     265         117 :         if (err)
     266           0 :                 return ERR_PTR(err);
     267             :         return s;
     268             : 
     269           0 : out_free_cache:
     270           0 :         kmem_cache_free(kmem_cache, s);
     271           0 :         goto out;
     272             : }
     273             : 
     274             : /**
     275             :  * kmem_cache_create_usercopy - Create a cache with a region suitable
     276             :  * for copying to userspace
     277             :  * @name: A string which is used in /proc/slabinfo to identify this cache.
     278             :  * @size: The size of objects to be created in this cache.
     279             :  * @align: The required alignment for the objects.
     280             :  * @flags: SLAB flags
     281             :  * @useroffset: Usercopy region offset
     282             :  * @usersize: Usercopy region size
     283             :  * @ctor: A constructor for the objects.
     284             :  *
     285             :  * Cannot be called within a interrupt, but can be interrupted.
     286             :  * The @ctor is run when new pages are allocated by the cache.
     287             :  *
     288             :  * The flags are
     289             :  *
     290             :  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
     291             :  * to catch references to uninitialised memory.
     292             :  *
     293             :  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
     294             :  * for buffer overruns.
     295             :  *
     296             :  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
     297             :  * cacheline.  This can be beneficial if you're counting cycles as closely
     298             :  * as davem.
     299             :  *
     300             :  * Return: a pointer to the cache on success, NULL on failure.
     301             :  */
     302             : struct kmem_cache *
     303         117 : kmem_cache_create_usercopy(const char *name,
     304             :                   unsigned int size, unsigned int align,
     305             :                   slab_flags_t flags,
     306             :                   unsigned int useroffset, unsigned int usersize,
     307             :                   void (*ctor)(void *))
     308             : {
     309         117 :         struct kmem_cache *s = NULL;
     310         117 :         const char *cache_name;
     311         117 :         int err;
     312             : 
     313         117 :         mutex_lock(&slab_mutex);
     314             : 
     315         117 :         err = kmem_cache_sanity_check(name, size);
     316         117 :         if (err) {
     317           0 :                 goto out_unlock;
     318             :         }
     319             : 
     320             :         /* Refuse requests with allocator specific flags */
     321         117 :         if (flags & ~SLAB_FLAGS_PERMITTED) {
     322           0 :                 err = -EINVAL;
     323           0 :                 goto out_unlock;
     324             :         }
     325             : 
     326             :         /*
     327             :          * Some allocators will constraint the set of valid flags to a subset
     328             :          * of all flags. We expect them to define CACHE_CREATE_MASK in this
     329             :          * case, and we'll just provide them with a sanitized version of the
     330             :          * passed flags.
     331             :          */
     332         117 :         flags &= CACHE_CREATE_MASK;
     333             : 
     334             :         /* Fail closed on bad usersize of useroffset values. */
     335         117 :         if (WARN_ON(!usersize && useroffset) ||
     336         234 :             WARN_ON(size < usersize || size - usersize < useroffset))
     337             :                 usersize = useroffset = 0;
     338             : 
     339         117 :         if (!usersize)
     340         109 :                 s = __kmem_cache_alias(name, size, align, flags, ctor);
     341         109 :         if (s)
     342           0 :                 goto out_unlock;
     343             : 
     344         117 :         cache_name = kstrdup_const(name, GFP_KERNEL);
     345         117 :         if (!cache_name) {
     346           0 :                 err = -ENOMEM;
     347           0 :                 goto out_unlock;
     348             :         }
     349             : 
     350         159 :         s = create_cache(cache_name, size,
     351             :                          calculate_alignment(flags, align, size),
     352             :                          flags, useroffset, usersize, ctor, NULL);
     353         117 :         if (IS_ERR(s)) {
     354           0 :                 err = PTR_ERR(s);
     355           0 :                 kfree_const(cache_name);
     356             :         }
     357             : 
     358         117 : out_unlock:
     359         117 :         mutex_unlock(&slab_mutex);
     360             : 
     361         117 :         if (err) {
     362           0 :                 if (flags & SLAB_PANIC)
     363           0 :                         panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
     364             :                                 name, err);
     365             :                 else {
     366           0 :                         pr_warn("kmem_cache_create(%s) failed with error %d\n",
     367             :                                 name, err);
     368           0 :                         dump_stack();
     369             :                 }
     370           0 :                 return NULL;
     371             :         }
     372             :         return s;
     373             : }
     374             : EXPORT_SYMBOL(kmem_cache_create_usercopy);
     375             : 
     376             : /**
     377             :  * kmem_cache_create - Create a cache.
     378             :  * @name: A string which is used in /proc/slabinfo to identify this cache.
     379             :  * @size: The size of objects to be created in this cache.
     380             :  * @align: The required alignment for the objects.
     381             :  * @flags: SLAB flags
     382             :  * @ctor: A constructor for the objects.
     383             :  *
     384             :  * Cannot be called within a interrupt, but can be interrupted.
     385             :  * The @ctor is run when new pages are allocated by the cache.
     386             :  *
     387             :  * The flags are
     388             :  *
     389             :  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
     390             :  * to catch references to uninitialised memory.
     391             :  *
     392             :  * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
     393             :  * for buffer overruns.
     394             :  *
     395             :  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
     396             :  * cacheline.  This can be beneficial if you're counting cycles as closely
     397             :  * as davem.
     398             :  *
     399             :  * Return: a pointer to the cache on success, NULL on failure.
     400             :  */
     401             : struct kmem_cache *
     402         104 : kmem_cache_create(const char *name, unsigned int size, unsigned int align,
     403             :                 slab_flags_t flags, void (*ctor)(void *))
     404             : {
     405         104 :         return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
     406             :                                           ctor);
     407             : }
     408             : EXPORT_SYMBOL(kmem_cache_create);
     409             : 
     410           0 : static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
     411             : {
     412           0 :         LIST_HEAD(to_destroy);
     413           0 :         struct kmem_cache *s, *s2;
     414             : 
     415             :         /*
     416             :          * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
     417             :          * @slab_caches_to_rcu_destroy list.  The slab pages are freed
     418             :          * through RCU and the associated kmem_cache are dereferenced
     419             :          * while freeing the pages, so the kmem_caches should be freed only
     420             :          * after the pending RCU operations are finished.  As rcu_barrier()
     421             :          * is a pretty slow operation, we batch all pending destructions
     422             :          * asynchronously.
     423             :          */
     424           0 :         mutex_lock(&slab_mutex);
     425           0 :         list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
     426           0 :         mutex_unlock(&slab_mutex);
     427             : 
     428           0 :         if (list_empty(&to_destroy))
     429           0 :                 return;
     430             : 
     431           0 :         rcu_barrier();
     432             : 
     433           0 :         list_for_each_entry_safe(s, s2, &to_destroy, list) {
     434           0 :                 kfence_shutdown_cache(s);
     435             : #ifdef SLAB_SUPPORTS_SYSFS
     436           0 :                 sysfs_slab_release(s);
     437             : #else
     438             :                 slab_kmem_cache_release(s);
     439             : #endif
     440             :         }
     441             : }
     442             : 
     443           0 : static int shutdown_cache(struct kmem_cache *s)
     444             : {
     445             :         /* free asan quarantined objects */
     446           0 :         kasan_cache_shutdown(s);
     447             : 
     448           0 :         if (__kmem_cache_shutdown(s) != 0)
     449             :                 return -EBUSY;
     450             : 
     451           0 :         list_del(&s->list);
     452             : 
     453           0 :         if (s->flags & SLAB_TYPESAFE_BY_RCU) {
     454             : #ifdef SLAB_SUPPORTS_SYSFS
     455           0 :                 sysfs_slab_unlink(s);
     456             : #endif
     457           0 :                 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
     458           0 :                 schedule_work(&slab_caches_to_rcu_destroy_work);
     459             :         } else {
     460           0 :                 kfence_shutdown_cache(s);
     461             : #ifdef SLAB_SUPPORTS_SYSFS
     462           0 :                 sysfs_slab_unlink(s);
     463           0 :                 sysfs_slab_release(s);
     464             : #else
     465             :                 slab_kmem_cache_release(s);
     466             : #endif
     467             :         }
     468             : 
     469             :         return 0;
     470             : }
     471             : 
     472           0 : void slab_kmem_cache_release(struct kmem_cache *s)
     473             : {
     474           0 :         __kmem_cache_release(s);
     475           0 :         kfree_const(s->name);
     476           0 :         kmem_cache_free(kmem_cache, s);
     477           0 : }
     478             : 
     479           0 : void kmem_cache_destroy(struct kmem_cache *s)
     480             : {
     481           0 :         int err;
     482             : 
     483           0 :         if (unlikely(!s))
     484             :                 return;
     485             : 
     486           0 :         mutex_lock(&slab_mutex);
     487             : 
     488           0 :         s->refcount--;
     489           0 :         if (s->refcount)
     490           0 :                 goto out_unlock;
     491             : 
     492           0 :         err = shutdown_cache(s);
     493           0 :         if (err) {
     494           0 :                 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
     495             :                        s->name);
     496           0 :                 dump_stack();
     497             :         }
     498           0 : out_unlock:
     499           0 :         mutex_unlock(&slab_mutex);
     500             : }
     501             : EXPORT_SYMBOL(kmem_cache_destroy);
     502             : 
     503             : /**
     504             :  * kmem_cache_shrink - Shrink a cache.
     505             :  * @cachep: The cache to shrink.
     506             :  *
     507             :  * Releases as many slabs as possible for a cache.
     508             :  * To help debugging, a zero exit status indicates all slabs were released.
     509             :  *
     510             :  * Return: %0 if all slabs were released, non-zero otherwise
     511             :  */
     512           0 : int kmem_cache_shrink(struct kmem_cache *cachep)
     513             : {
     514           0 :         int ret;
     515             : 
     516             : 
     517           0 :         kasan_cache_shrink(cachep);
     518           0 :         ret = __kmem_cache_shrink(cachep);
     519             : 
     520           0 :         return ret;
     521             : }
     522             : EXPORT_SYMBOL(kmem_cache_shrink);
     523             : 
     524         376 : bool slab_is_available(void)
     525             : {
     526         376 :         return slab_state >= UP;
     527             : }
     528             : 
     529             : /**
     530             :  * kmem_valid_obj - does the pointer reference a valid slab object?
     531             :  * @object: pointer to query.
     532             :  *
     533             :  * Return: %true if the pointer is to a not-yet-freed object from
     534             :  * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
     535             :  * is to an already-freed object, and %false otherwise.
     536             :  */
     537           0 : bool kmem_valid_obj(void *object)
     538             : {
     539           0 :         struct page *page;
     540             : 
     541             :         /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
     542           0 :         if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
     543           0 :                 return false;
     544           0 :         page = virt_to_head_page(object);
     545           0 :         return PageSlab(page);
     546             : }
     547             : 
     548             : /**
     549             :  * kmem_dump_obj - Print available slab provenance information
     550             :  * @object: slab object for which to find provenance information.
     551             :  *
     552             :  * This function uses pr_cont(), so that the caller is expected to have
     553             :  * printed out whatever preamble is appropriate.  The provenance information
     554             :  * depends on the type of object and on how much debugging is enabled.
     555             :  * For a slab-cache object, the fact that it is a slab object is printed,
     556             :  * and, if available, the slab name, return address, and stack trace from
     557             :  * the allocation of that object.
     558             :  *
     559             :  * This function will splat if passed a pointer to a non-slab object.
     560             :  * If you are not sure what type of object you have, you should instead
     561             :  * use mem_dump_obj().
     562             :  */
     563           0 : void kmem_dump_obj(void *object)
     564             : {
     565           0 :         char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
     566           0 :         int i;
     567           0 :         struct page *page;
     568           0 :         unsigned long ptroffset;
     569           0 :         struct kmem_obj_info kp = { };
     570             : 
     571           0 :         if (WARN_ON_ONCE(!virt_addr_valid(object)))
     572           0 :                 return;
     573           0 :         page = virt_to_head_page(object);
     574           0 :         if (WARN_ON_ONCE(!PageSlab(page))) {
     575           0 :                 pr_cont(" non-slab memory.\n");
     576           0 :                 return;
     577             :         }
     578           0 :         kmem_obj_info(&kp, object, page);
     579           0 :         if (kp.kp_slab_cache)
     580           0 :                 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
     581             :         else
     582           0 :                 pr_cont(" slab%s", cp);
     583           0 :         if (kp.kp_objp)
     584           0 :                 pr_cont(" start %px", kp.kp_objp);
     585           0 :         if (kp.kp_data_offset)
     586           0 :                 pr_cont(" data offset %lu", kp.kp_data_offset);
     587           0 :         if (kp.kp_objp) {
     588           0 :                 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
     589           0 :                 pr_cont(" pointer offset %lu", ptroffset);
     590             :         }
     591           0 :         if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
     592           0 :                 pr_cont(" size %u", kp.kp_slab_cache->usersize);
     593           0 :         if (kp.kp_ret)
     594           0 :                 pr_cont(" allocated at %pS\n", kp.kp_ret);
     595             :         else
     596           0 :                 pr_cont("\n");
     597           0 :         for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
     598           0 :                 if (!kp.kp_stack[i])
     599             :                         break;
     600           0 :                 pr_info("    %pS\n", kp.kp_stack[i]);
     601             :         }
     602             : }
     603             : 
     604             : #ifndef CONFIG_SLOB
     605             : /* Create a cache during boot when no slab services are available yet */
     606          28 : void __init create_boot_cache(struct kmem_cache *s, const char *name,
     607             :                 unsigned int size, slab_flags_t flags,
     608             :                 unsigned int useroffset, unsigned int usersize)
     609             : {
     610          28 :         int err;
     611          28 :         unsigned int align = ARCH_KMALLOC_MINALIGN;
     612             : 
     613          28 :         s->name = name;
     614          28 :         s->size = s->object_size = size;
     615             : 
     616             :         /*
     617             :          * For power of two sizes, guarantee natural alignment for kmalloc
     618             :          * caches, regardless of SL*B debugging options.
     619             :          */
     620          56 :         if (is_power_of_2(size))
     621          22 :                 align = max(align, size);
     622          28 :         s->align = calculate_alignment(flags, align, size);
     623             : 
     624          28 :         s->useroffset = useroffset;
     625          28 :         s->usersize = usersize;
     626             : 
     627          28 :         err = __kmem_cache_create(s, flags);
     628             : 
     629          28 :         if (err)
     630           0 :                 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
     631             :                                         name, size, err);
     632             : 
     633          28 :         s->refcount = -1;    /* Exempt from merging for now */
     634          28 : }
     635             : 
     636          26 : struct kmem_cache *__init create_kmalloc_cache(const char *name,
     637             :                 unsigned int size, slab_flags_t flags,
     638             :                 unsigned int useroffset, unsigned int usersize)
     639             : {
     640          26 :         struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
     641             : 
     642          26 :         if (!s)
     643           0 :                 panic("Out of memory when creating slab %s\n", name);
     644             : 
     645          26 :         create_boot_cache(s, name, size, flags, useroffset, usersize);
     646          26 :         kasan_cache_create_kmalloc(s);
     647          26 :         list_add(&s->list, &slab_caches);
     648          26 :         s->refcount = 1;
     649          26 :         return s;
     650             : }
     651             : 
     652             : struct kmem_cache *
     653             : kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
     654             : { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
     655             : EXPORT_SYMBOL(kmalloc_caches);
     656             : 
     657             : /*
     658             :  * Conversion table for small slabs sizes / 8 to the index in the
     659             :  * kmalloc array. This is necessary for slabs < 192 since we have non power
     660             :  * of two cache sizes there. The size of larger slabs can be determined using
     661             :  * fls.
     662             :  */
     663             : static u8 size_index[24] __ro_after_init = {
     664             :         3,      /* 8 */
     665             :         4,      /* 16 */
     666             :         5,      /* 24 */
     667             :         5,      /* 32 */
     668             :         6,      /* 40 */
     669             :         6,      /* 48 */
     670             :         6,      /* 56 */
     671             :         6,      /* 64 */
     672             :         1,      /* 72 */
     673             :         1,      /* 80 */
     674             :         1,      /* 88 */
     675             :         1,      /* 96 */
     676             :         7,      /* 104 */
     677             :         7,      /* 112 */
     678             :         7,      /* 120 */
     679             :         7,      /* 128 */
     680             :         2,      /* 136 */
     681             :         2,      /* 144 */
     682             :         2,      /* 152 */
     683             :         2,      /* 160 */
     684             :         2,      /* 168 */
     685             :         2,      /* 176 */
     686             :         2,      /* 184 */
     687             :         2       /* 192 */
     688             : };
     689             : 
     690       54739 : static inline unsigned int size_index_elem(unsigned int bytes)
     691             : {
     692       54739 :         return (bytes - 1) / 8;
     693             : }
     694             : 
     695             : /*
     696             :  * Find the kmem_cache structure that serves a given size of
     697             :  * allocation
     698             :  */
     699       65491 : struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
     700             : {
     701       65491 :         unsigned int index;
     702             : 
     703       65491 :         if (size <= 192) {
     704       54739 :                 if (!size)
     705             :                         return ZERO_SIZE_PTR;
     706             : 
     707       54739 :                 index = size_index[size_index_elem(size)];
     708             :         } else {
     709       10752 :                 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
     710             :                         return NULL;
     711       10752 :                 index = fls(size - 1);
     712             :         }
     713             : 
     714       65491 :         return kmalloc_caches[kmalloc_type(flags)][index];
     715             : }
     716             : 
     717             : #ifdef CONFIG_ZONE_DMA
     718             : #define INIT_KMALLOC_INFO(__size, __short_size)                 \
     719             : {                                                               \
     720             :         .name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,    \
     721             :         .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,        \
     722             :         .name[KMALLOC_DMA]     = "dma-kmalloc-" #__short_size,        \
     723             :         .size = __size,                                         \
     724             : }
     725             : #else
     726             : #define INIT_KMALLOC_INFO(__size, __short_size)                 \
     727             : {                                                               \
     728             :         .name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,    \
     729             :         .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,        \
     730             :         .size = __size,                                         \
     731             : }
     732             : #endif
     733             : 
     734             : /*
     735             :  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
     736             :  * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
     737             :  * kmalloc-67108864.
     738             :  */
     739             : const struct kmalloc_info_struct kmalloc_info[] __initconst = {
     740             :         INIT_KMALLOC_INFO(0, 0),
     741             :         INIT_KMALLOC_INFO(96, 96),
     742             :         INIT_KMALLOC_INFO(192, 192),
     743             :         INIT_KMALLOC_INFO(8, 8),
     744             :         INIT_KMALLOC_INFO(16, 16),
     745             :         INIT_KMALLOC_INFO(32, 32),
     746             :         INIT_KMALLOC_INFO(64, 64),
     747             :         INIT_KMALLOC_INFO(128, 128),
     748             :         INIT_KMALLOC_INFO(256, 256),
     749             :         INIT_KMALLOC_INFO(512, 512),
     750             :         INIT_KMALLOC_INFO(1024, 1k),
     751             :         INIT_KMALLOC_INFO(2048, 2k),
     752             :         INIT_KMALLOC_INFO(4096, 4k),
     753             :         INIT_KMALLOC_INFO(8192, 8k),
     754             :         INIT_KMALLOC_INFO(16384, 16k),
     755             :         INIT_KMALLOC_INFO(32768, 32k),
     756             :         INIT_KMALLOC_INFO(65536, 64k),
     757             :         INIT_KMALLOC_INFO(131072, 128k),
     758             :         INIT_KMALLOC_INFO(262144, 256k),
     759             :         INIT_KMALLOC_INFO(524288, 512k),
     760             :         INIT_KMALLOC_INFO(1048576, 1M),
     761             :         INIT_KMALLOC_INFO(2097152, 2M),
     762             :         INIT_KMALLOC_INFO(4194304, 4M),
     763             :         INIT_KMALLOC_INFO(8388608, 8M),
     764             :         INIT_KMALLOC_INFO(16777216, 16M),
     765             :         INIT_KMALLOC_INFO(33554432, 32M),
     766             :         INIT_KMALLOC_INFO(67108864, 64M)
     767             : };
     768             : 
     769             : /*
     770             :  * Patch up the size_index table if we have strange large alignment
     771             :  * requirements for the kmalloc array. This is only the case for
     772             :  * MIPS it seems. The standard arches will not generate any code here.
     773             :  *
     774             :  * Largest permitted alignment is 256 bytes due to the way we
     775             :  * handle the index determination for the smaller caches.
     776             :  *
     777             :  * Make sure that nothing crazy happens if someone starts tinkering
     778             :  * around with ARCH_KMALLOC_MINALIGN
     779             :  */
     780           1 : void __init setup_kmalloc_cache_index_table(void)
     781             : {
     782           1 :         unsigned int i;
     783             : 
     784           1 :         BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
     785             :                 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
     786             : 
     787           1 :         for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
     788             :                 unsigned int elem = size_index_elem(i);
     789             : 
     790             :                 if (elem >= ARRAY_SIZE(size_index))
     791             :                         break;
     792             :                 size_index[elem] = KMALLOC_SHIFT_LOW;
     793             :         }
     794             : 
     795           1 :         if (KMALLOC_MIN_SIZE >= 64) {
     796             :                 /*
     797             :                  * The 96 byte size cache is not used if the alignment
     798             :                  * is 64 byte.
     799             :                  */
     800             :                 for (i = 64 + 8; i <= 96; i += 8)
     801             :                         size_index[size_index_elem(i)] = 7;
     802             : 
     803             :         }
     804             : 
     805           1 :         if (KMALLOC_MIN_SIZE >= 128) {
     806             :                 /*
     807             :                  * The 192 byte sized cache is not used if the alignment
     808             :                  * is 128 byte. Redirect kmalloc to use the 256 byte cache
     809             :                  * instead.
     810             :                  */
     811             :                 for (i = 128 + 8; i <= 192; i += 8)
     812             :                         size_index[size_index_elem(i)] = 8;
     813             :         }
     814           1 : }
     815             : 
     816             : static void __init
     817          26 : new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
     818             : {
     819          26 :         if (type == KMALLOC_RECLAIM)
     820          13 :                 flags |= SLAB_RECLAIM_ACCOUNT;
     821             : 
     822          26 :         kmalloc_caches[type][idx] = create_kmalloc_cache(
     823             :                                         kmalloc_info[idx].name[type],
     824             :                                         kmalloc_info[idx].size, flags, 0,
     825             :                                         kmalloc_info[idx].size);
     826          26 : }
     827             : 
     828             : /*
     829             :  * Create the kmalloc array. Some of the regular kmalloc arrays
     830             :  * may already have been created because they were needed to
     831             :  * enable allocations for slab creation.
     832             :  */
     833           1 : void __init create_kmalloc_caches(slab_flags_t flags)
     834             : {
     835           1 :         int i;
     836           1 :         enum kmalloc_cache_type type;
     837             : 
     838           3 :         for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
     839          24 :                 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
     840          22 :                         if (!kmalloc_caches[type][i])
     841          22 :                                 new_kmalloc_cache(i, type, flags);
     842             : 
     843             :                         /*
     844             :                          * Caches that are not of the two-to-the-power-of size.
     845             :                          * These have to be created immediately after the
     846             :                          * earlier power of two caches
     847             :                          */
     848          22 :                         if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
     849           2 :                                         !kmalloc_caches[type][1])
     850           2 :                                 new_kmalloc_cache(1, type, flags);
     851          22 :                         if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
     852           2 :                                         !kmalloc_caches[type][2])
     853           2 :                                 new_kmalloc_cache(2, type, flags);
     854             :                 }
     855             :         }
     856             : 
     857             :         /* Kmalloc array is now usable */
     858           1 :         slab_state = UP;
     859             : 
     860             : #ifdef CONFIG_ZONE_DMA
     861             :         for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
     862             :                 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
     863             : 
     864             :                 if (s) {
     865             :                         kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
     866             :                                 kmalloc_info[i].name[KMALLOC_DMA],
     867             :                                 kmalloc_info[i].size,
     868             :                                 SLAB_CACHE_DMA | flags, 0,
     869             :                                 kmalloc_info[i].size);
     870             :                 }
     871             :         }
     872             : #endif
     873           1 : }
     874             : #endif /* !CONFIG_SLOB */
     875             : 
     876           0 : gfp_t kmalloc_fix_flags(gfp_t flags)
     877             : {
     878           0 :         gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
     879             : 
     880           0 :         flags &= ~GFP_SLAB_BUG_MASK;
     881           0 :         pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
     882             :                         invalid_mask, &invalid_mask, flags, &flags);
     883           0 :         dump_stack();
     884             : 
     885           0 :         return flags;
     886             : }
     887             : 
     888             : /*
     889             :  * To avoid unnecessary overhead, we pass through large allocation requests
     890             :  * directly to the page allocator. We use __GFP_COMP, because we will need to
     891             :  * know the allocation order to free the pages properly in kfree.
     892             :  */
     893          25 : void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
     894             : {
     895          25 :         void *ret = NULL;
     896          25 :         struct page *page;
     897             : 
     898          25 :         if (unlikely(flags & GFP_SLAB_BUG_MASK))
     899           0 :                 flags = kmalloc_fix_flags(flags);
     900             : 
     901          25 :         flags |= __GFP_COMP;
     902          25 :         page = alloc_pages(flags, order);
     903          25 :         if (likely(page)) {
     904          25 :                 ret = page_address(page);
     905          25 :                 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
     906          25 :                                       PAGE_SIZE << order);
     907             :         }
     908          25 :         ret = kasan_kmalloc_large(ret, size, flags);
     909             :         /* As ret might get tagged, call kmemleak hook after KASAN. */
     910          25 :         kmemleak_alloc(ret, size, 1, flags);
     911          25 :         return ret;
     912             : }
     913             : EXPORT_SYMBOL(kmalloc_order);
     914             : 
     915             : #ifdef CONFIG_TRACING
     916          25 : void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
     917             : {
     918          25 :         void *ret = kmalloc_order(size, flags, order);
     919          25 :         trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
     920          25 :         return ret;
     921             : }
     922             : EXPORT_SYMBOL(kmalloc_order_trace);
     923             : #endif
     924             : 
     925             : #ifdef CONFIG_SLAB_FREELIST_RANDOM
     926             : /* Randomize a generic freelist */
     927             : static void freelist_randomize(struct rnd_state *state, unsigned int *list,
     928             :                                unsigned int count)
     929             : {
     930             :         unsigned int rand;
     931             :         unsigned int i;
     932             : 
     933             :         for (i = 0; i < count; i++)
     934             :                 list[i] = i;
     935             : 
     936             :         /* Fisher-Yates shuffle */
     937             :         for (i = count - 1; i > 0; i--) {
     938             :                 rand = prandom_u32_state(state);
     939             :                 rand %= (i + 1);
     940             :                 swap(list[i], list[rand]);
     941             :         }
     942             : }
     943             : 
     944             : /* Create a random sequence per cache */
     945             : int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
     946             :                                     gfp_t gfp)
     947             : {
     948             :         struct rnd_state state;
     949             : 
     950             :         if (count < 2 || cachep->random_seq)
     951             :                 return 0;
     952             : 
     953             :         cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
     954             :         if (!cachep->random_seq)
     955             :                 return -ENOMEM;
     956             : 
     957             :         /* Get best entropy at this stage of boot */
     958             :         prandom_seed_state(&state, get_random_long());
     959             : 
     960             :         freelist_randomize(&state, cachep->random_seq, count);
     961             :         return 0;
     962             : }
     963             : 
     964             : /* Destroy the per-cache random freelist sequence */
     965             : void cache_random_seq_destroy(struct kmem_cache *cachep)
     966             : {
     967             :         kfree(cachep->random_seq);
     968             :         cachep->random_seq = NULL;
     969             : }
     970             : #endif /* CONFIG_SLAB_FREELIST_RANDOM */
     971             : 
     972             : #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
     973             : #ifdef CONFIG_SLAB
     974             : #define SLABINFO_RIGHTS (0600)
     975             : #else
     976             : #define SLABINFO_RIGHTS (0400)
     977             : #endif
     978             : 
     979           0 : static void print_slabinfo_header(struct seq_file *m)
     980             : {
     981             :         /*
     982             :          * Output format version, so at least we can change it
     983             :          * without _too_ many complaints.
     984             :          */
     985             : #ifdef CONFIG_DEBUG_SLAB
     986             :         seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
     987             : #else
     988           0 :         seq_puts(m, "slabinfo - version: 2.1\n");
     989             : #endif
     990           0 :         seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
     991           0 :         seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
     992           0 :         seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
     993             : #ifdef CONFIG_DEBUG_SLAB
     994             :         seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
     995             :         seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
     996             : #endif
     997           0 :         seq_putc(m, '\n');
     998           0 : }
     999             : 
    1000           0 : void *slab_start(struct seq_file *m, loff_t *pos)
    1001             : {
    1002           0 :         mutex_lock(&slab_mutex);
    1003           0 :         return seq_list_start(&slab_caches, *pos);
    1004             : }
    1005             : 
    1006           0 : void *slab_next(struct seq_file *m, void *p, loff_t *pos)
    1007             : {
    1008           0 :         return seq_list_next(p, &slab_caches, pos);
    1009             : }
    1010             : 
    1011           0 : void slab_stop(struct seq_file *m, void *p)
    1012             : {
    1013           0 :         mutex_unlock(&slab_mutex);
    1014           0 : }
    1015             : 
    1016           0 : static void cache_show(struct kmem_cache *s, struct seq_file *m)
    1017             : {
    1018           0 :         struct slabinfo sinfo;
    1019             : 
    1020           0 :         memset(&sinfo, 0, sizeof(sinfo));
    1021           0 :         get_slabinfo(s, &sinfo);
    1022             : 
    1023           0 :         seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
    1024             :                    s->name, sinfo.active_objs, sinfo.num_objs, s->size,
    1025           0 :                    sinfo.objects_per_slab, (1 << sinfo.cache_order));
    1026             : 
    1027           0 :         seq_printf(m, " : tunables %4u %4u %4u",
    1028             :                    sinfo.limit, sinfo.batchcount, sinfo.shared);
    1029           0 :         seq_printf(m, " : slabdata %6lu %6lu %6lu",
    1030             :                    sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
    1031           0 :         slabinfo_show_stats(m, s);
    1032           0 :         seq_putc(m, '\n');
    1033           0 : }
    1034             : 
    1035           0 : static int slab_show(struct seq_file *m, void *p)
    1036             : {
    1037           0 :         struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
    1038             : 
    1039           0 :         if (p == slab_caches.next)
    1040           0 :                 print_slabinfo_header(m);
    1041           0 :         cache_show(s, m);
    1042           0 :         return 0;
    1043             : }
    1044             : 
    1045           0 : void dump_unreclaimable_slab(void)
    1046             : {
    1047           0 :         struct kmem_cache *s;
    1048           0 :         struct slabinfo sinfo;
    1049             : 
    1050             :         /*
    1051             :          * Here acquiring slab_mutex is risky since we don't prefer to get
    1052             :          * sleep in oom path. But, without mutex hold, it may introduce a
    1053             :          * risk of crash.
    1054             :          * Use mutex_trylock to protect the list traverse, dump nothing
    1055             :          * without acquiring the mutex.
    1056             :          */
    1057           0 :         if (!mutex_trylock(&slab_mutex)) {
    1058           0 :                 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
    1059           0 :                 return;
    1060             :         }
    1061             : 
    1062           0 :         pr_info("Unreclaimable slab info:\n");
    1063           0 :         pr_info("Name                      Used          Total\n");
    1064             : 
    1065           0 :         list_for_each_entry(s, &slab_caches, list) {
    1066           0 :                 if (s->flags & SLAB_RECLAIM_ACCOUNT)
    1067           0 :                         continue;
    1068             : 
    1069           0 :                 get_slabinfo(s, &sinfo);
    1070             : 
    1071           0 :                 if (sinfo.num_objs > 0)
    1072           0 :                         pr_info("%-17s %10luKB %10luKB\n", s->name,
    1073             :                                 (sinfo.active_objs * s->size) / 1024,
    1074             :                                 (sinfo.num_objs * s->size) / 1024);
    1075             :         }
    1076           0 :         mutex_unlock(&slab_mutex);
    1077             : }
    1078             : 
    1079             : #if defined(CONFIG_MEMCG_KMEM)
    1080             : int memcg_slab_show(struct seq_file *m, void *p)
    1081             : {
    1082             :         /*
    1083             :          * Deprecated.
    1084             :          * Please, take a look at tools/cgroup/slabinfo.py .
    1085             :          */
    1086             :         return 0;
    1087             : }
    1088             : #endif
    1089             : 
    1090             : /*
    1091             :  * slabinfo_op - iterator that generates /proc/slabinfo
    1092             :  *
    1093             :  * Output layout:
    1094             :  * cache-name
    1095             :  * num-active-objs
    1096             :  * total-objs
    1097             :  * object size
    1098             :  * num-active-slabs
    1099             :  * total-slabs
    1100             :  * num-pages-per-slab
    1101             :  * + further values on SMP and with statistics enabled
    1102             :  */
    1103             : static const struct seq_operations slabinfo_op = {
    1104             :         .start = slab_start,
    1105             :         .next = slab_next,
    1106             :         .stop = slab_stop,
    1107             :         .show = slab_show,
    1108             : };
    1109             : 
    1110           0 : static int slabinfo_open(struct inode *inode, struct file *file)
    1111             : {
    1112           0 :         return seq_open(file, &slabinfo_op);
    1113             : }
    1114             : 
    1115             : static const struct proc_ops slabinfo_proc_ops = {
    1116             :         .proc_flags     = PROC_ENTRY_PERMANENT,
    1117             :         .proc_open      = slabinfo_open,
    1118             :         .proc_read      = seq_read,
    1119             :         .proc_write     = slabinfo_write,
    1120             :         .proc_lseek     = seq_lseek,
    1121             :         .proc_release   = seq_release,
    1122             : };
    1123             : 
    1124           1 : static int __init slab_proc_init(void)
    1125             : {
    1126           1 :         proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
    1127           1 :         return 0;
    1128             : }
    1129             : module_init(slab_proc_init);
    1130             : 
    1131             : #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
    1132             : 
    1133         228 : static __always_inline void *__do_krealloc(const void *p, size_t new_size,
    1134             :                                            gfp_t flags)
    1135             : {
    1136         228 :         void *ret;
    1137         228 :         size_t ks;
    1138             : 
    1139             :         /* Don't use instrumented ksize to allow precise KASAN poisoning. */
    1140         228 :         if (likely(!ZERO_OR_NULL_PTR(p))) {
    1141         440 :                 if (!kasan_check_byte(p))
    1142             :                         return NULL;
    1143         220 :                 ks = kfence_ksize(p) ?: __ksize(p);
    1144             :         } else
    1145             :                 ks = 0;
    1146             : 
    1147             :         /* If the object still fits, repoison it precisely. */
    1148         228 :         if (ks >= new_size) {
    1149          92 :                 p = kasan_krealloc((void *)p, new_size, flags);
    1150          92 :                 return (void *)p;
    1151             :         }
    1152             : 
    1153         136 :         ret = kmalloc_track_caller(new_size, flags);
    1154         136 :         if (ret && p) {
    1155             :                 /* Disable KASAN checks as the object's redzone is accessed. */
    1156         128 :                 kasan_disable_current();
    1157         128 :                 memcpy(ret, kasan_reset_tag(p), ks);
    1158         128 :                 kasan_enable_current();
    1159             :         }
    1160             : 
    1161             :         return ret;
    1162             : }
    1163             : 
    1164             : /**
    1165             :  * krealloc - reallocate memory. The contents will remain unchanged.
    1166             :  * @p: object to reallocate memory for.
    1167             :  * @new_size: how many bytes of memory are required.
    1168             :  * @flags: the type of memory to allocate.
    1169             :  *
    1170             :  * The contents of the object pointed to are preserved up to the
    1171             :  * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
    1172             :  * If @p is %NULL, krealloc() behaves exactly like kmalloc().  If @new_size
    1173             :  * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
    1174             :  *
    1175             :  * Return: pointer to the allocated memory or %NULL in case of error
    1176             :  */
    1177         228 : void *krealloc(const void *p, size_t new_size, gfp_t flags)
    1178             : {
    1179         228 :         void *ret;
    1180             : 
    1181         228 :         if (unlikely(!new_size)) {
    1182           0 :                 kfree(p);
    1183           0 :                 return ZERO_SIZE_PTR;
    1184             :         }
    1185             : 
    1186         228 :         ret = __do_krealloc(p, new_size, flags);
    1187         228 :         if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
    1188         136 :                 kfree(p);
    1189             : 
    1190             :         return ret;
    1191             : }
    1192             : EXPORT_SYMBOL(krealloc);
    1193             : 
    1194             : /**
    1195             :  * kfree_sensitive - Clear sensitive information in memory before freeing
    1196             :  * @p: object to free memory of
    1197             :  *
    1198             :  * The memory of the object @p points to is zeroed before freed.
    1199             :  * If @p is %NULL, kfree_sensitive() does nothing.
    1200             :  *
    1201             :  * Note: this function zeroes the whole allocated buffer which can be a good
    1202             :  * deal bigger than the requested buffer size passed to kmalloc(). So be
    1203             :  * careful when using this function in performance sensitive code.
    1204             :  */
    1205           1 : void kfree_sensitive(const void *p)
    1206             : {
    1207           1 :         size_t ks;
    1208           1 :         void *mem = (void *)p;
    1209             : 
    1210           1 :         ks = ksize(mem);
    1211           1 :         if (ks)
    1212           1 :                 memzero_explicit(mem, ks);
    1213           1 :         kfree(mem);
    1214           1 : }
    1215             : EXPORT_SYMBOL(kfree_sensitive);
    1216             : 
    1217             : /**
    1218             :  * ksize - get the actual amount of memory allocated for a given object
    1219             :  * @objp: Pointer to the object
    1220             :  *
    1221             :  * kmalloc may internally round up allocations and return more memory
    1222             :  * than requested. ksize() can be used to determine the actual amount of
    1223             :  * memory allocated. The caller may use this additional memory, even though
    1224             :  * a smaller amount of memory was initially specified with the kmalloc call.
    1225             :  * The caller must guarantee that objp points to a valid object previously
    1226             :  * allocated with either kmalloc() or kmem_cache_alloc(). The object
    1227             :  * must not be freed during the duration of the call.
    1228             :  *
    1229             :  * Return: size of the actual memory used by @objp in bytes
    1230             :  */
    1231        9861 : size_t ksize(const void *objp)
    1232             : {
    1233        9861 :         size_t size;
    1234             : 
    1235             :         /*
    1236             :          * We need to first check that the pointer to the object is valid, and
    1237             :          * only then unpoison the memory. The report printed from ksize() is
    1238             :          * more useful, then when it's printed later when the behaviour could
    1239             :          * be undefined due to a potential use-after-free or double-free.
    1240             :          *
    1241             :          * We use kasan_check_byte(), which is supported for the hardware
    1242             :          * tag-based KASAN mode, unlike kasan_check_read/write().
    1243             :          *
    1244             :          * If the pointed to memory is invalid, we return 0 to avoid users of
    1245             :          * ksize() writing to and potentially corrupting the memory region.
    1246             :          *
    1247             :          * We want to perform the check before __ksize(), to avoid potentially
    1248             :          * crashing in __ksize() due to accessing invalid metadata.
    1249             :          */
    1250       19722 :         if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
    1251           0 :                 return 0;
    1252             : 
    1253        9861 :         size = kfence_ksize(objp) ?: __ksize(objp);
    1254             :         /*
    1255             :          * We assume that ksize callers could use whole allocated area,
    1256             :          * so we need to unpoison this area.
    1257             :          */
    1258        9861 :         kasan_unpoison_range(objp, size);
    1259        9861 :         return size;
    1260             : }
    1261             : EXPORT_SYMBOL(ksize);
    1262             : 
    1263             : /* Tracepoints definitions. */
    1264             : EXPORT_TRACEPOINT_SYMBOL(kmalloc);
    1265             : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
    1266             : EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
    1267             : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
    1268             : EXPORT_TRACEPOINT_SYMBOL(kfree);
    1269             : EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
    1270             : 
    1271     1413833 : int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
    1272             : {
    1273     1413833 :         if (__should_failslab(s, gfpflags))
    1274             :                 return -ENOMEM;
    1275     1413833 :         return 0;
    1276             : }
    1277             : ALLOW_ERROR_INJECTION(should_failslab, ERRNO);

Generated by: LCOV version 1.14