LCOV - code coverage report
Current view: top level - include/linux - highmem.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 47 64 73.4 %
Date: 2021-04-22 12:43:58 Functions: 6 8 75.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_HIGHMEM_H
       3             : #define _LINUX_HIGHMEM_H
       4             : 
       5             : #include <linux/fs.h>
       6             : #include <linux/kernel.h>
       7             : #include <linux/bug.h>
       8             : #include <linux/mm.h>
       9             : #include <linux/uaccess.h>
      10             : #include <linux/hardirq.h>
      11             : 
      12             : #include <asm/cacheflush.h>
      13             : 
      14             : #include "highmem-internal.h"
      15             : 
      16             : /**
      17             :  * kmap - Map a page for long term usage
      18             :  * @page:       Pointer to the page to be mapped
      19             :  *
      20             :  * Returns: The virtual address of the mapping
      21             :  *
      22             :  * Can only be invoked from preemptible task context because on 32bit
      23             :  * systems with CONFIG_HIGHMEM enabled this function might sleep.
      24             :  *
      25             :  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
      26             :  * this returns the virtual address of the direct kernel mapping.
      27             :  *
      28             :  * The returned virtual address is globally visible and valid up to the
      29             :  * point where it is unmapped via kunmap(). The pointer can be handed to
      30             :  * other contexts.
      31             :  *
      32             :  * For highmem pages on 32bit systems this can be slow as the mapping space
      33             :  * is limited and protected by a global lock. In case that there is no
      34             :  * mapping slot available the function blocks until a slot is released via
      35             :  * kunmap().
      36             :  */
      37             : static inline void *kmap(struct page *page);
      38             : 
      39             : /**
      40             :  * kunmap - Unmap the virtual address mapped by kmap()
      41             :  * @addr:       Virtual address to be unmapped
      42             :  *
      43             :  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
      44             :  * pages in the low memory area.
      45             :  */
      46             : static inline void kunmap(struct page *page);
      47             : 
      48             : /**
      49             :  * kmap_to_page - Get the page for a kmap'ed address
      50             :  * @addr:       The address to look up
      51             :  *
      52             :  * Returns: The page which is mapped to @addr.
      53             :  */
      54             : static inline struct page *kmap_to_page(void *addr);
      55             : 
      56             : /**
      57             :  * kmap_flush_unused - Flush all unused kmap mappings in order to
      58             :  *                     remove stray mappings
      59             :  */
      60             : static inline void kmap_flush_unused(void);
      61             : 
      62             : /**
      63             :  * kmap_local_page - Map a page for temporary usage
      64             :  * @page:       Pointer to the page to be mapped
      65             :  *
      66             :  * Returns: The virtual address of the mapping
      67             :  *
      68             :  * Can be invoked from any context.
      69             :  *
      70             :  * Requires careful handling when nesting multiple mappings because the map
      71             :  * management is stack based. The unmap has to be in the reverse order of
      72             :  * the map operation:
      73             :  *
      74             :  * addr1 = kmap_local_page(page1);
      75             :  * addr2 = kmap_local_page(page2);
      76             :  * ...
      77             :  * kunmap_local(addr2);
      78             :  * kunmap_local(addr1);
      79             :  *
      80             :  * Unmapping addr1 before addr2 is invalid and causes malfunction.
      81             :  *
      82             :  * Contrary to kmap() mappings the mapping is only valid in the context of
      83             :  * the caller and cannot be handed to other contexts.
      84             :  *
      85             :  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
      86             :  * virtual address of the direct mapping. Only real highmem pages are
      87             :  * temporarily mapped.
      88             :  *
      89             :  * While it is significantly faster than kmap() for the higmem case it
      90             :  * comes with restrictions about the pointer validity. Only use when really
      91             :  * necessary.
      92             :  *
      93             :  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
      94             :  * disabling migration in order to keep the virtual address stable across
      95             :  * preemption. No caller of kmap_local_page() can rely on this side effect.
      96             :  */
      97             : static inline void *kmap_local_page(struct page *page);
      98             : 
      99             : /**
     100             :  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
     101             :  * @page:       Pointer to the page to be mapped
     102             :  *
     103             :  * Returns: The virtual address of the mapping
     104             :  *
     105             :  * Effectively a wrapper around kmap_local_page() which disables pagefaults
     106             :  * and preemption.
     107             :  *
     108             :  * Do not use in new code. Use kmap_local_page() instead.
     109             :  */
     110             : static inline void *kmap_atomic(struct page *page);
     111             : 
     112             : /**
     113             :  * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
     114             :  * @addr:       Virtual address to be unmapped
     115             :  *
     116             :  * Counterpart to kmap_atomic().
     117             :  *
     118             :  * Effectively a wrapper around kunmap_local() which additionally undoes
     119             :  * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
     120             :  * preemption.
     121             :  */
     122             : 
     123             : /* Highmem related interfaces for management code */
     124             : static inline unsigned int nr_free_highpages(void);
     125             : static inline unsigned long totalhigh_pages(void);
     126             : 
     127             : #ifndef ARCH_HAS_FLUSH_ANON_PAGE
     128        7770 : static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
     129             : {
     130        7770 : }
     131             : #endif
     132             : 
     133             : #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
     134        8477 : static inline void flush_kernel_dcache_page(struct page *page)
     135             : {
     136        8477 : }
     137           0 : static inline void flush_kernel_vmap_range(void *vaddr, int size)
     138             : {
     139           0 : }
     140             : static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
     141             : {
     142             : }
     143             : #endif
     144             : 
     145             : /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
     146             : #ifndef clear_user_highpage
     147        9718 : static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
     148             : {
     149        9718 :         void *addr = kmap_atomic(page);
     150        9718 :         clear_user_page(addr, vaddr, page);
     151        9718 :         kunmap_atomic(addr);
     152        9718 : }
     153             : #endif
     154             : 
     155             : #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
     156             : /**
     157             :  * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
     158             :  * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
     159             :  * @vma: The VMA the page is to be allocated for
     160             :  * @vaddr: The virtual address the page will be inserted into
     161             :  *
     162             :  * This function will allocate a page for a VMA but the caller is expected
     163             :  * to specify via movableflags whether the page will be movable in the
     164             :  * future or not
     165             :  *
     166             :  * An architecture may override this function by defining
     167             :  * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
     168             :  * implementation.
     169             :  */
     170             : static inline struct page *
     171             : __alloc_zeroed_user_highpage(gfp_t movableflags,
     172             :                         struct vm_area_struct *vma,
     173             :                         unsigned long vaddr)
     174             : {
     175             :         struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
     176             :                         vma, vaddr);
     177             : 
     178             :         if (page)
     179             :                 clear_user_highpage(page, vaddr);
     180             : 
     181             :         return page;
     182             : }
     183             : #endif
     184             : 
     185             : /**
     186             :  * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
     187             :  * @vma: The VMA the page is to be allocated for
     188             :  * @vaddr: The virtual address the page will be inserted into
     189             :  *
     190             :  * This function will allocate a page for a VMA that the caller knows will
     191             :  * be able to migrate in the future using move_pages() or reclaimed
     192             :  */
     193             : static inline struct page *
     194       30318 : alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
     195             :                                         unsigned long vaddr)
     196             : {
     197       30318 :         return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
     198             : }
     199             : 
     200       84839 : static inline void clear_highpage(struct page *page)
     201             : {
     202       84839 :         void *kaddr = kmap_atomic(page);
     203       84842 :         clear_page(kaddr);
     204       84836 :         kunmap_atomic(kaddr);
     205       84837 : }
     206             : 
     207             : /*
     208             :  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
     209             :  * If we pass in a head page, we can zero up to the size of the compound page.
     210             :  */
     211             : #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
     212             : void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
     213             :                 unsigned start2, unsigned end2);
     214             : #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
     215         631 : static inline void zero_user_segments(struct page *page,
     216             :                 unsigned start1, unsigned end1,
     217             :                 unsigned start2, unsigned end2)
     218             : {
     219         631 :         void *kaddr = kmap_atomic(page);
     220         631 :         unsigned int i;
     221             : 
     222         631 :         BUG_ON(end1 > page_size(page) || end2 > page_size(page));
     223             : 
     224         631 :         if (end1 > start1)
     225         495 :                 memset(kaddr + start1, 0, end1 - start1);
     226             : 
     227         631 :         if (end2 > start2)
     228         136 :                 memset(kaddr + start2, 0, end2 - start2);
     229             : 
     230         631 :         kunmap_atomic(kaddr);
     231        1262 :         for (i = 0; i < compound_nr(page); i++)
     232         631 :                 flush_dcache_page(page + i);
     233         631 : }
     234             : #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
     235             : 
     236          80 : static inline void zero_user_segment(struct page *page,
     237             :         unsigned start, unsigned end)
     238             : {
     239          80 :         zero_user_segments(page, start, end, 0, 0);
     240          29 : }
     241             : 
     242           2 : static inline void zero_user(struct page *page,
     243             :         unsigned start, unsigned size)
     244             : {
     245           2 :         zero_user_segments(page, start, start + size, 0, 0);
     246           0 : }
     247             : 
     248             : #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
     249             : 
     250       37990 : static inline void copy_user_highpage(struct page *to, struct page *from,
     251             :         unsigned long vaddr, struct vm_area_struct *vma)
     252             : {
     253       37990 :         char *vfrom, *vto;
     254             : 
     255       37990 :         vfrom = kmap_atomic(from);
     256       37992 :         vto = kmap_atomic(to);
     257       37990 :         copy_user_page(vto, vfrom, vaddr, to);
     258       37989 :         kunmap_atomic(vto);
     259       37989 :         kunmap_atomic(vfrom);
     260       37989 : }
     261             : 
     262             : #endif
     263             : 
     264             : #ifndef __HAVE_ARCH_COPY_HIGHPAGE
     265             : 
     266           0 : static inline void copy_highpage(struct page *to, struct page *from)
     267             : {
     268           0 :         char *vfrom, *vto;
     269             : 
     270           0 :         vfrom = kmap_atomic(from);
     271           0 :         vto = kmap_atomic(to);
     272           0 :         copy_page(vto, vfrom);
     273           0 :         kunmap_atomic(vto);
     274           0 :         kunmap_atomic(vfrom);
     275           0 : }
     276             : 
     277             : #endif
     278             : 
     279             : static inline void memcpy_page(struct page *dst_page, size_t dst_off,
     280             :                                struct page *src_page, size_t src_off,
     281             :                                size_t len)
     282             : {
     283             :         char *dst = kmap_local_page(dst_page);
     284             :         char *src = kmap_local_page(src_page);
     285             : 
     286             :         VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
     287             :         memcpy(dst + dst_off, src + src_off, len);
     288             :         kunmap_local(src);
     289             :         kunmap_local(dst);
     290             : }
     291             : 
     292             : static inline void memmove_page(struct page *dst_page, size_t dst_off,
     293             :                                struct page *src_page, size_t src_off,
     294             :                                size_t len)
     295             : {
     296             :         char *dst = kmap_local_page(dst_page);
     297             :         char *src = kmap_local_page(src_page);
     298             : 
     299             :         VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
     300             :         memmove(dst + dst_off, src + src_off, len);
     301             :         kunmap_local(src);
     302             :         kunmap_local(dst);
     303             : }
     304             : 
     305             : static inline void memset_page(struct page *page, size_t offset, int val,
     306             :                                size_t len)
     307             : {
     308             :         char *addr = kmap_local_page(page);
     309             : 
     310             :         VM_BUG_ON(offset + len > PAGE_SIZE);
     311             :         memset(addr + offset, val, len);
     312             :         kunmap_local(addr);
     313             : }
     314             : 
     315         576 : static inline void memcpy_from_page(char *to, struct page *page,
     316             :                                     size_t offset, size_t len)
     317             : {
     318         576 :         char *from = kmap_local_page(page);
     319             : 
     320         576 :         VM_BUG_ON(offset + len > PAGE_SIZE);
     321         576 :         memcpy(to, from + offset, len);
     322         576 :         kunmap_local(from);
     323         576 : }
     324             : 
     325           0 : static inline void memcpy_to_page(struct page *page, size_t offset,
     326             :                                   const char *from, size_t len)
     327             : {
     328           0 :         char *to = kmap_local_page(page);
     329             : 
     330           0 :         VM_BUG_ON(offset + len > PAGE_SIZE);
     331           0 :         memcpy(to + offset, from, len);
     332           0 :         kunmap_local(to);
     333           0 : }
     334             : 
     335             : #endif /* _LINUX_HIGHMEM_H */

Generated by: LCOV version 1.14