LCOV - code coverage report
Current view: top level - mm/kasan - common.c (source / functions) Hit Total Coverage
Test: landlock.info Lines: 175 205 85.4 %
Date: 2021-04-22 12:43:58 Functions: 29 32 90.6 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * This file contains common KASAN code.
       4             :  *
       5             :  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
       6             :  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
       7             :  *
       8             :  * Some code borrowed from https://github.com/xairy/kasan-prototype by
       9             :  *        Andrey Konovalov <andreyknvl@gmail.com>
      10             :  */
      11             : 
      12             : #include <linux/export.h>
      13             : #include <linux/init.h>
      14             : #include <linux/kasan.h>
      15             : #include <linux/kernel.h>
      16             : #include <linux/linkage.h>
      17             : #include <linux/memblock.h>
      18             : #include <linux/memory.h>
      19             : #include <linux/mm.h>
      20             : #include <linux/module.h>
      21             : #include <linux/printk.h>
      22             : #include <linux/sched.h>
      23             : #include <linux/sched/task_stack.h>
      24             : #include <linux/slab.h>
      25             : #include <linux/stacktrace.h>
      26             : #include <linux/string.h>
      27             : #include <linux/types.h>
      28             : #include <linux/bug.h>
      29             : 
      30             : #include "kasan.h"
      31             : #include "../slab.h"
      32             : 
      33     3443996 : depot_stack_handle_t kasan_save_stack(gfp_t flags)
      34             : {
      35     3443996 :         unsigned long entries[KASAN_STACK_DEPTH];
      36     3443996 :         unsigned int nr_entries;
      37             : 
      38     3443996 :         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
      39     3449340 :         nr_entries = filter_irq_stacks(entries, nr_entries);
      40     3451479 :         return stack_depot_save(entries, nr_entries, flags);
      41             : }
      42             : 
      43     2723744 : void kasan_set_track(struct kasan_track *track, gfp_t flags)
      44             : {
      45     1263736 :         track->pid = current->pid;
      46     2723744 :         track->stack = kasan_save_stack(flags);
      47     1461220 : }
      48             : 
      49             : #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
      50       74161 : void kasan_enable_current(void)
      51             : {
      52       74161 :         current->kasan_depth++;
      53       74161 : }
      54             : 
      55       74161 : void kasan_disable_current(void)
      56             : {
      57       74161 :         current->kasan_depth--;
      58       74161 : }
      59             : #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
      60             : 
      61       10035 : void __kasan_unpoison_range(const void *address, size_t size)
      62             : {
      63       10035 :         kasan_unpoison(address, size);
      64       10035 : }
      65             : 
      66             : #if CONFIG_KASAN_STACK
      67             : /* Unpoison the entire stack for a task. */
      68          10 : void kasan_unpoison_task_stack(struct task_struct *task)
      69             : {
      70          10 :         void *base = task_stack_page(task);
      71             : 
      72          10 :         kasan_unpoison(base, THREAD_SIZE);
      73          10 : }
      74             : 
      75             : /* Unpoison the stack for the current task beyond a watermark sp value. */
      76           0 : asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
      77             : {
      78             :         /*
      79             :          * Calculate the task stack base address.  Avoid using 'current'
      80             :          * because this function is called by early resume code which hasn't
      81             :          * yet set up the percpu register (%gs).
      82             :          */
      83           0 :         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
      84             : 
      85           0 :         kasan_unpoison(base, watermark - base);
      86           0 : }
      87             : #endif /* CONFIG_KASAN_STACK */
      88             : 
      89             : /*
      90             :  * Only allow cache merging when stack collection is disabled and no metadata
      91             :  * is present.
      92             :  */
      93           0 : slab_flags_t __kasan_never_merge(void)
      94             : {
      95           0 :         if (kasan_stack_collection_enabled())
      96           0 :                 return SLAB_KASAN;
      97             :         return 0;
      98             : }
      99             : 
     100      195394 : void __kasan_alloc_pages(struct page *page, unsigned int order)
     101             : {
     102      195394 :         u8 tag;
     103      195394 :         unsigned long i;
     104             : 
     105      195394 :         if (unlikely(PageHighMem(page)))
     106             :                 return;
     107             : 
     108      195394 :         tag = kasan_random_tag();
     109      195394 :         for (i = 0; i < (1 << order); i++)
     110             :                 page_kasan_tag_set(page + i, tag);
     111      195394 :         kasan_unpoison(page_address(page), PAGE_SIZE << order);
     112             : }
     113             : 
     114      150606 : void __kasan_free_pages(struct page *page, unsigned int order)
     115             : {
     116      150606 :         if (likely(!PageHighMem(page)))
     117      150606 :                 kasan_poison(page_address(page), PAGE_SIZE << order,
     118             :                              KASAN_FREE_PAGE);
     119      150555 : }
     120             : 
     121             : /*
     122             :  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
     123             :  * For larger allocations larger redzones are used.
     124             :  */
     125         145 : static inline unsigned int optimal_redzone(unsigned int object_size)
     126             : {
     127         145 :         return
     128         145 :                 object_size <= 64        - 16   ? 16 :
     129             :                 object_size <= 128       - 32   ? 32 :
     130             :                 object_size <= 512       - 64   ? 64 :
     131             :                 object_size <= 4096      - 128  ? 128 :
     132             :                 object_size <= (1 << 14) - 256  ? 256 :
     133             :                 object_size <= (1 << 15) - 512  ? 512 :
     134             :                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
     135             : }
     136             : 
     137         145 : void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
     138             :                           slab_flags_t *flags)
     139             : {
     140         145 :         unsigned int ok_size;
     141         145 :         unsigned int optimal_size;
     142             : 
     143             :         /*
     144             :          * SLAB_KASAN is used to mark caches as ones that are sanitized by
     145             :          * KASAN. Currently this flag is used in two places:
     146             :          * 1. In slab_ksize() when calculating the size of the accessible
     147             :          *    memory within the object.
     148             :          * 2. In slab_common.c to prevent merging of sanitized caches.
     149             :          */
     150         145 :         *flags |= SLAB_KASAN;
     151             : 
     152         145 :         if (!kasan_stack_collection_enabled())
     153             :                 return;
     154             : 
     155         145 :         ok_size = *size;
     156             : 
     157             :         /* Add alloc meta into redzone. */
     158         145 :         cache->kasan_info.alloc_meta_offset = *size;
     159         145 :         *size += sizeof(struct kasan_alloc_meta);
     160             : 
     161             :         /*
     162             :          * If alloc meta doesn't fit, don't add it.
     163             :          * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
     164             :          * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
     165             :          * larger sizes.
     166             :          */
     167         145 :         if (*size > KMALLOC_MAX_SIZE) {
     168           0 :                 cache->kasan_info.alloc_meta_offset = 0;
     169           0 :                 *size = ok_size;
     170             :                 /* Continue, since free meta might still fit. */
     171             :         }
     172             : 
     173             :         /* Only the generic mode uses free meta or flexible redzones. */
     174         145 :         if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
     175             :                 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
     176             :                 return;
     177             :         }
     178             : 
     179             :         /*
     180             :          * Add free meta into redzone when it's not possible to store
     181             :          * it in the object. This is the case when:
     182             :          * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
     183             :          *    be touched after it was freed, or
     184             :          * 2. Object has a constructor, which means it's expected to
     185             :          *    retain its content until the next allocation, or
     186             :          * 3. Object is too small.
     187             :          * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
     188             :          */
     189         145 :         if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
     190         128 :             cache->object_size < sizeof(struct kasan_free_meta)) {
     191          19 :                 ok_size = *size;
     192             : 
     193          19 :                 cache->kasan_info.free_meta_offset = *size;
     194          19 :                 *size += sizeof(struct kasan_free_meta);
     195             : 
     196             :                 /* If free meta doesn't fit, don't add it. */
     197          19 :                 if (*size > KMALLOC_MAX_SIZE) {
     198           0 :                         cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
     199           0 :                         *size = ok_size;
     200             :                 }
     201             :         }
     202             : 
     203             :         /* Calculate size with optimal redzone. */
     204         145 :         optimal_size = cache->object_size + optimal_redzone(cache->object_size);
     205             :         /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
     206         145 :         if (optimal_size > KMALLOC_MAX_SIZE)
     207             :                 optimal_size = KMALLOC_MAX_SIZE;
     208             :         /* Use optimal size if the size with added metas is not large enough. */
     209         145 :         if (*size < optimal_size)
     210         121 :                 *size = optimal_size;
     211             : }
     212             : 
     213          26 : void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
     214             : {
     215          26 :         cache->kasan_info.is_kmalloc = true;
     216          26 : }
     217             : 
     218           0 : size_t __kasan_metadata_size(struct kmem_cache *cache)
     219             : {
     220           0 :         if (!kasan_stack_collection_enabled())
     221             :                 return 0;
     222           0 :         return (cache->kasan_info.alloc_meta_offset ?
     223           0 :                 sizeof(struct kasan_alloc_meta) : 0) +
     224           0 :                 (cache->kasan_info.free_meta_offset ?
     225           0 :                 sizeof(struct kasan_free_meta) : 0);
     226             : }
     227             : 
     228     2675649 : struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
     229             :                                               const void *object)
     230             : {
     231      727345 :         if (!cache->kasan_info.alloc_meta_offset)
     232             :                 return NULL;
     233     2675649 :         return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
     234             : }
     235             : 
     236             : #ifdef CONFIG_KASAN_GENERIC
     237     2521771 : struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
     238             :                                             const void *object)
     239             : {
     240     2521771 :         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
     241     2521771 :         if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
     242             :                 return NULL;
     243     2521771 :         return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
     244             : }
     245             : #endif
     246             : 
     247       28183 : void __kasan_poison_slab(struct page *page)
     248             : {
     249       28183 :         unsigned long i;
     250             : 
     251      131833 :         for (i = 0; i < compound_nr(page); i++)
     252      103650 :                 page_kasan_tag_reset(page + i);
     253       28183 :         kasan_poison(page_address(page), page_size(page),
     254             :                      KASAN_KMALLOC_REDZONE);
     255       28183 : }
     256             : 
     257       21341 : void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
     258             : {
     259       21341 :         kasan_unpoison(object, cache->object_size);
     260       21341 : }
     261             : 
     262       20697 : void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
     263             : {
     264       20697 :         kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
     265             :                         KASAN_KMALLOC_REDZONE);
     266       20698 : }
     267             : 
     268             : /*
     269             :  * This function assigns a tag to an object considering the following:
     270             :  * 1. A cache might have a constructor, which might save a pointer to a slab
     271             :  *    object somewhere (e.g. in the object itself). We preassign a tag for
     272             :  *    each object in caches with constructors during slab creation and reuse
     273             :  *    the same tag each time a particular object is allocated.
     274             :  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
     275             :  *    accessed after being freed. We preassign tags for objects in these
     276             :  *    caches as well.
     277             :  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
     278             :  *    is stored as an array of indexes instead of a linked list. Assign tags
     279             :  *    based on objects indexes, so that objects that are next to each other
     280             :  *    get different tags.
     281             :  */
     282     1961815 : static inline u8 assign_tag(struct kmem_cache *cache,
     283             :                                         const void *object, bool init)
     284             : {
     285     1961815 :         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
     286     1961815 :                 return 0xff;
     287             : 
     288             :         /*
     289             :          * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
     290             :          * set, assign a tag when the object is being allocated (init == false).
     291             :          */
     292             :         if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
     293             :                 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
     294             : 
     295             :         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
     296             : #ifdef CONFIG_SLAB
     297             :         /* For SLAB assign tags based on the object index in the freelist. */
     298             :         return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
     299             : #else
     300             :         /*
     301             :          * For SLUB assign a random tag during slab creation, otherwise reuse
     302             :          * the already assigned tag.
     303             :          */
     304             :         return init ? kasan_random_tag() : get_tag(object);
     305             : #endif
     306             : }
     307             : 
     308      488296 : void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
     309             :                                                 const void *object)
     310             : {
     311      488296 :         struct kasan_alloc_meta *alloc_meta;
     312             : 
     313      488296 :         if (kasan_stack_collection_enabled()) {
     314      488296 :                 alloc_meta = kasan_get_alloc_meta(cache, object);
     315      488296 :                 if (alloc_meta)
     316      488296 :                         __memset(alloc_meta, 0, sizeof(*alloc_meta));
     317             :         }
     318             : 
     319             :         /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
     320      488300 :         object = set_tag(object, assign_tag(cache, object, true));
     321             : 
     322      488300 :         return (void *)object;
     323             : }
     324             : 
     325     1306150 : static inline bool ____kasan_slab_free(struct kmem_cache *cache,
     326             :                                 void *object, unsigned long ip, bool quarantine)
     327             : {
     328     1306150 :         u8 tag;
     329     1306150 :         void *tagged_object;
     330             : 
     331     1306150 :         tag = get_tag(object);
     332     1306150 :         tagged_object = object;
     333     1306150 :         object = kasan_reset_tag(object);
     334             : 
     335     1306150 :         if (is_kfence_address(object))
     336             :                 return false;
     337             : 
     338     1306150 :         if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
     339             :             object)) {
     340           0 :                 kasan_report_invalid_free(tagged_object, ip);
     341           0 :                 return true;
     342             :         }
     343             : 
     344             :         /* RCU slabs could be legally used after free within the RCU period */
     345     1306698 :         if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
     346             :                 return false;
     347             : 
     348     1264548 :         if (!kasan_byte_accessible(tagged_object)) {
     349           0 :                 kasan_report_invalid_free(tagged_object, ip);
     350           0 :                 return true;
     351             :         }
     352             : 
     353     1264679 :         kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
     354             :                         KASAN_KMALLOC_FREE);
     355             : 
     356     1264293 :         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
     357             :                 return false;
     358             : 
     359     1264109 :         if (kasan_stack_collection_enabled())
     360     1264109 :                 kasan_set_free_info(cache, object, tag);
     361             : 
     362     1265369 :         return kasan_quarantine_put(cache, object);
     363             : }
     364             : 
     365     1305955 : bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
     366             : {
     367     1305955 :         return ____kasan_slab_free(cache, object, ip, true);
     368             : }
     369             : 
     370          16 : static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
     371             : {
     372          16 :         if (ptr != page_address(virt_to_head_page(ptr))) {
     373           0 :                 kasan_report_invalid_free(ptr, ip);
     374           0 :                 return true;
     375             :         }
     376             : 
     377          16 :         if (!kasan_byte_accessible(ptr)) {
     378           0 :                 kasan_report_invalid_free(ptr, ip);
     379           0 :                 return true;
     380             :         }
     381             : 
     382             :         /*
     383             :          * The object will be poisoned by kasan_free_pages() or
     384             :          * kasan_slab_free_mempool().
     385             :          */
     386             : 
     387             :         return false;
     388             : }
     389             : 
     390          16 : void __kasan_kfree_large(void *ptr, unsigned long ip)
     391             : {
     392          16 :         ____kasan_kfree_large(ptr, ip);
     393          16 : }
     394             : 
     395         298 : void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
     396             : {
     397         298 :         struct page *page;
     398             : 
     399         298 :         page = virt_to_head_page(ptr);
     400             : 
     401             :         /*
     402             :          * Even though this function is only called for kmem_cache_alloc and
     403             :          * kmalloc backed mempool allocations, those allocations can still be
     404             :          * !PageSlab() when the size provided to kmalloc is larger than
     405             :          * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
     406             :          */
     407         298 :         if (unlikely(!PageSlab(page))) {
     408           0 :                 if (____kasan_kfree_large(ptr, ip))
     409             :                         return;
     410           0 :                 kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE);
     411             :         } else {
     412         298 :                 ____kasan_slab_free(page->slab_cache, ptr, ip, false);
     413             :         }
     414             : }
     415             : 
     416     1571650 : static void set_alloc_info(struct kmem_cache *cache, void *object,
     417             :                                 gfp_t flags, bool is_kmalloc)
     418             : {
     419     1571650 :         struct kasan_alloc_meta *alloc_meta;
     420             : 
     421             :         /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
     422     1571650 :         if (cache->kasan_info.is_kmalloc && !is_kmalloc)
     423             :                 return;
     424             : 
     425     1460008 :         alloc_meta = kasan_get_alloc_meta(cache, object);
     426     1460008 :         if (alloc_meta)
     427     1460008 :                 kasan_set_track(&alloc_meta->alloc_track, flags);
     428             : }
     429             : 
     430     1473723 : void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
     431             :                                         void *object, gfp_t flags)
     432             : {
     433     1473723 :         u8 tag;
     434     1473723 :         void *tagged_object;
     435             : 
     436     1473723 :         if (gfpflags_allow_blocking(flags))
     437     1281649 :                 kasan_quarantine_reduce();
     438             : 
     439     1473515 :         if (unlikely(object == NULL))
     440             :                 return NULL;
     441             : 
     442     1473515 :         if (is_kfence_address(object))
     443             :                 return (void *)object;
     444             : 
     445             :         /*
     446             :          * Generate and assign random tag for tag-based modes.
     447             :          * Tag is ignored in set_tag() for the generic mode.
     448             :          */
     449     1473515 :         tag = assign_tag(cache, object, false);
     450     1473515 :         tagged_object = set_tag(object, tag);
     451             : 
     452             :         /*
     453             :          * Unpoison the whole object.
     454             :          * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
     455             :          */
     456     1473515 :         kasan_unpoison(tagged_object, cache->object_size);
     457             : 
     458             :         /* Save alloc info (if possible) for non-kmalloc() allocations. */
     459     1473238 :         if (kasan_stack_collection_enabled())
     460     1473238 :                 set_alloc_info(cache, (void *)object, flags, false);
     461             : 
     462     1473238 :         return tagged_object;
     463             : }
     464             : 
     465       98273 : static inline void *____kasan_kmalloc(struct kmem_cache *cache,
     466             :                                 const void *object, size_t size, gfp_t flags)
     467             : {
     468       98273 :         unsigned long redzone_start;
     469       98273 :         unsigned long redzone_end;
     470             : 
     471       98273 :         if (gfpflags_allow_blocking(flags))
     472       94252 :                 kasan_quarantine_reduce();
     473             : 
     474       98272 :         if (unlikely(object == NULL))
     475             :                 return NULL;
     476             : 
     477       98272 :         if (is_kfence_address(kasan_reset_tag(object)))
     478             :                 return (void *)object;
     479             : 
     480             :         /*
     481             :          * The object has already been unpoisoned by kasan_slab_alloc() for
     482             :          * kmalloc() or by kasan_krealloc() for krealloc().
     483             :          */
     484             : 
     485             :         /*
     486             :          * The redzone has byte-level precision for the generic mode.
     487             :          * Partially poison the last object granule to cover the unaligned
     488             :          * part of the redzone.
     489             :          */
     490       98272 :         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
     491       98272 :                 kasan_poison_last_granule((void *)object, size);
     492             : 
     493             :         /* Poison the aligned part of the redzone. */
     494       98273 :         redzone_start = round_up((unsigned long)(object + size),
     495             :                                 KASAN_GRANULE_SIZE);
     496       98273 :         redzone_end = round_up((unsigned long)(object + cache->object_size),
     497             :                                 KASAN_GRANULE_SIZE);
     498       98273 :         kasan_poison((void *)redzone_start, redzone_end - redzone_start,
     499             :                            KASAN_KMALLOC_REDZONE);
     500             : 
     501             :         /*
     502             :          * Save alloc info (if possible) for kmalloc() allocations.
     503             :          * This also rewrites the alloc info when called from kasan_krealloc().
     504             :          */
     505       98273 :         if (kasan_stack_collection_enabled())
     506       98273 :                 set_alloc_info(cache, (void *)object, flags, true);
     507             : 
     508             :         /* Keep the tag that was set by kasan_slab_alloc(). */
     509       98273 :         return (void *)object;
     510             : }
     511             : 
     512       98178 : void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
     513             :                                         size_t size, gfp_t flags)
     514             : {
     515       98178 :         return ____kasan_kmalloc(cache, object, size, flags);
     516             : }
     517             : EXPORT_SYMBOL(__kasan_kmalloc);
     518             : 
     519          27 : void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
     520             :                                                 gfp_t flags)
     521             : {
     522          27 :         unsigned long redzone_start;
     523          27 :         unsigned long redzone_end;
     524             : 
     525          27 :         if (gfpflags_allow_blocking(flags))
     526          27 :                 kasan_quarantine_reduce();
     527             : 
     528          27 :         if (unlikely(ptr == NULL))
     529             :                 return NULL;
     530             : 
     531             :         /*
     532             :          * The object has already been unpoisoned by kasan_alloc_pages() for
     533             :          * alloc_pages() or by kasan_krealloc() for krealloc().
     534             :          */
     535             : 
     536             :         /*
     537             :          * The redzone has byte-level precision for the generic mode.
     538             :          * Partially poison the last object granule to cover the unaligned
     539             :          * part of the redzone.
     540             :          */
     541          27 :         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
     542          27 :                 kasan_poison_last_granule(ptr, size);
     543             : 
     544             :         /* Poison the aligned part of the redzone. */
     545          27 :         redzone_start = round_up((unsigned long)(ptr + size),
     546             :                                 KASAN_GRANULE_SIZE);
     547          27 :         redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
     548          27 :         kasan_poison((void *)redzone_start, redzone_end - redzone_start,
     549             :                      KASAN_PAGE_REDZONE);
     550             : 
     551          27 :         return (void *)ptr;
     552             : }
     553             : 
     554          92 : void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
     555             : {
     556          92 :         struct page *page;
     557             : 
     558          92 :         if (unlikely(object == ZERO_SIZE_PTR))
     559             :                 return (void *)object;
     560             : 
     561             :         /*
     562             :          * Unpoison the object's data.
     563             :          * Part of it might already have been unpoisoned, but it's unknown
     564             :          * how big that part is.
     565             :          */
     566          92 :         kasan_unpoison(object, size);
     567             : 
     568          92 :         page = virt_to_head_page(object);
     569             : 
     570             :         /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
     571          92 :         if (unlikely(!PageSlab(page)))
     572           0 :                 return __kasan_kmalloc_large(object, size, flags);
     573             :         else
     574          92 :                 return ____kasan_kmalloc(page->slab_cache, object, size, flags);
     575             : }
     576             : 
     577       10255 : bool __kasan_check_byte(const void *address, unsigned long ip)
     578             : {
     579       10255 :         if (!kasan_byte_accessible(address)) {
     580           0 :                 kasan_report((unsigned long)address, 1, false, ip);
     581           0 :                 return false;
     582             :         }
     583             :         return true;
     584             : }

Generated by: LCOV version 1.14