LCOV - code coverage report
Current view: top level - include/linux - uaccess.h (source / functions) Hit Total Coverage
Test: landlock.info Lines: 49 62 79.0 %
Date: 2021-04-22 12:43:58 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef __LINUX_UACCESS_H__
       3             : #define __LINUX_UACCESS_H__
       4             : 
       5             : #include <linux/fault-inject-usercopy.h>
       6             : #include <linux/instrumented.h>
       7             : #include <linux/minmax.h>
       8             : #include <linux/sched.h>
       9             : #include <linux/thread_info.h>
      10             : 
      11             : #include <asm/uaccess.h>
      12             : 
      13             : #ifdef CONFIG_SET_FS
      14             : /*
      15             :  * Force the uaccess routines to be wired up for actual userspace access,
      16             :  * overriding any possible set_fs(KERNEL_DS) still lingering around.  Undone
      17             :  * using force_uaccess_end below.
      18             :  */
      19             : static inline mm_segment_t force_uaccess_begin(void)
      20             : {
      21             :         mm_segment_t fs = get_fs();
      22             : 
      23             :         set_fs(USER_DS);
      24             :         return fs;
      25             : }
      26             : 
      27             : static inline void force_uaccess_end(mm_segment_t oldfs)
      28             : {
      29             :         set_fs(oldfs);
      30             : }
      31             : #else /* CONFIG_SET_FS */
      32             : typedef struct {
      33             :         /* empty dummy */
      34             : } mm_segment_t;
      35             : 
      36             : #ifndef TASK_SIZE_MAX
      37             : #define TASK_SIZE_MAX                   TASK_SIZE
      38             : #endif
      39             : 
      40             : #define uaccess_kernel()                (false)
      41             : #define user_addr_max()                 (TASK_SIZE_MAX)
      42             : 
      43        1021 : static inline mm_segment_t force_uaccess_begin(void)
      44             : {
      45        1021 :         return (mm_segment_t) { };
      46             : }
      47             : 
      48           0 : static inline void force_uaccess_end(mm_segment_t oldfs)
      49             : {
      50           0 : }
      51             : #endif /* CONFIG_SET_FS */
      52             : 
      53             : /*
      54             :  * Architectures should provide two primitives (raw_copy_{to,from}_user())
      55             :  * and get rid of their private instances of copy_{to,from}_user() and
      56             :  * __copy_{to,from}_user{,_inatomic}().
      57             :  *
      58             :  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
      59             :  * return the amount left to copy.  They should assume that access_ok() has
      60             :  * already been checked (and succeeded); they should *not* zero-pad anything.
      61             :  * No KASAN or object size checks either - those belong here.
      62             :  *
      63             :  * Both of these functions should attempt to copy size bytes starting at from
      64             :  * into the area starting at to.  They must not fetch or store anything
      65             :  * outside of those areas.  Return value must be between 0 (everything
      66             :  * copied successfully) and size (nothing copied).
      67             :  *
      68             :  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
      69             :  * at to must become equal to the bytes fetched from the corresponding area
      70             :  * starting at from.  All data past to + size - N must be left unmodified.
      71             :  *
      72             :  * If copying succeeds, the return value must be 0.  If some data cannot be
      73             :  * fetched, it is permitted to copy less than had been fetched; the only
      74             :  * hard requirement is that not storing anything at all (i.e. returning size)
      75             :  * should happen only when nothing could be copied.  In other words, you don't
      76             :  * have to squeeze as much as possible - it is allowed, but not necessary.
      77             :  *
      78             :  * For raw_copy_from_user() to always points to kernel memory and no faults
      79             :  * on store should happen.  Interpretation of from is affected by set_fs().
      80             :  * For raw_copy_to_user() it's the other way round.
      81             :  *
      82             :  * Both can be inlined - it's up to architectures whether it wants to bother
      83             :  * with that.  They should not be used directly; they are used to implement
      84             :  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
      85             :  * that are used instead.  Out of those, __... ones are inlined.  Plain
      86             :  * copy_{to,from}_user() might or might not be inlined.  If you want them
      87             :  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
      88             :  *
      89             :  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
      90             :  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
      91             :  * at all; their callers absolutely must check the return value.
      92             :  *
      93             :  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
      94             :  * but both source and destination are __user pointers (affected by set_fs()
      95             :  * as usual) and both source and destination can trigger faults.
      96             :  */
      97             : 
      98             : static __always_inline __must_check unsigned long
      99           1 : __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
     100             : {
     101           1 :         instrument_copy_from_user(to, from, n);
     102           1 :         check_object_size(to, n, false);
     103           1 :         return raw_copy_from_user(to, from, n);
     104             : }
     105             : 
     106             : static __always_inline __must_check unsigned long
     107        1306 : __copy_from_user(void *to, const void __user *from, unsigned long n)
     108             : {
     109        1306 :         might_fault();
     110        1306 :         if (should_fail_usercopy())
     111             :                 return n;
     112        1306 :         instrument_copy_from_user(to, from, n);
     113        1306 :         check_object_size(to, n, false);
     114        1306 :         return raw_copy_from_user(to, from, n);
     115             : }
     116             : 
     117             : /**
     118             :  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
     119             :  * @to:   Destination address, in user space.
     120             :  * @from: Source address, in kernel space.
     121             :  * @n:    Number of bytes to copy.
     122             :  *
     123             :  * Context: User context only.
     124             :  *
     125             :  * Copy data from kernel space to user space.  Caller must check
     126             :  * the specified block with access_ok() before calling this function.
     127             :  * The caller should also make sure he pins the user space address
     128             :  * so that we don't result in page fault and sleep.
     129             :  */
     130             : static __always_inline __must_check unsigned long
     131           0 : __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
     132             : {
     133           0 :         if (should_fail_usercopy())
     134             :                 return n;
     135           0 :         instrument_copy_to_user(to, from, n);
     136           0 :         check_object_size(from, n, true);
     137           0 :         return raw_copy_to_user(to, from, n);
     138             : }
     139             : 
     140             : static __always_inline __must_check unsigned long
     141        2406 : __copy_to_user(void __user *to, const void *from, unsigned long n)
     142             : {
     143        2406 :         might_fault();
     144        2406 :         if (should_fail_usercopy())
     145             :                 return n;
     146        2406 :         instrument_copy_to_user(to, from, n);
     147        2406 :         check_object_size(from, n, true);
     148        2406 :         return raw_copy_to_user(to, from, n);
     149             : }
     150             : 
     151             : #ifdef INLINE_COPY_FROM_USER
     152             : static inline __must_check unsigned long
     153             : _copy_from_user(void *to, const void __user *from, unsigned long n)
     154             : {
     155             :         unsigned long res = n;
     156             :         might_fault();
     157             :         if (!should_fail_usercopy() && likely(access_ok(from, n))) {
     158             :                 instrument_copy_from_user(to, from, n);
     159             :                 res = raw_copy_from_user(to, from, n);
     160             :         }
     161             :         if (unlikely(res))
     162             :                 memset(to + (n - res), 0, res);
     163             :         return res;
     164             : }
     165             : #else
     166             : extern __must_check unsigned long
     167             : _copy_from_user(void *, const void __user *, unsigned long);
     168             : #endif
     169             : 
     170             : #ifdef INLINE_COPY_TO_USER
     171             : static inline __must_check unsigned long
     172             : _copy_to_user(void __user *to, const void *from, unsigned long n)
     173             : {
     174             :         might_fault();
     175             :         if (should_fail_usercopy())
     176             :                 return n;
     177             :         if (access_ok(to, n)) {
     178             :                 instrument_copy_to_user(to, from, n);
     179             :                 n = raw_copy_to_user(to, from, n);
     180             :         }
     181             :         return n;
     182             : }
     183             : #else
     184             : extern __must_check unsigned long
     185             : _copy_to_user(void __user *, const void *, unsigned long);
     186             : #endif
     187             : 
     188             : static __always_inline unsigned long __must_check
     189       98096 : copy_from_user(void *to, const void __user *from, unsigned long n)
     190             : {
     191      107363 :         if (likely(check_copy_size(to, n, false)))
     192       98096 :                 n = _copy_from_user(to, from, n);
     193       98014 :         return n;
     194             : }
     195             : 
     196             : static __always_inline unsigned long __must_check
     197       61881 : copy_to_user(void __user *to, const void *from, unsigned long n)
     198             : {
     199       67483 :         if (likely(check_copy_size(from, n, true)))
     200       61881 :                 n = _copy_to_user(to, from, n);
     201       61703 :         return n;
     202             : }
     203             : #ifdef CONFIG_COMPAT
     204             : static __always_inline unsigned long __must_check
     205           0 : copy_in_user(void __user *to, const void __user *from, unsigned long n)
     206             : {
     207           0 :         might_fault();
     208           0 :         if (access_ok(to, n) && access_ok(from, n))
     209           0 :                 n = raw_copy_in_user(to, from, n);
     210           0 :         return n;
     211             : }
     212             : #endif
     213             : 
     214             : #ifndef copy_mc_to_kernel
     215             : /*
     216             :  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
     217             :  * #MC (or arch equivalent) during source read.
     218             :  */
     219             : static inline unsigned long __must_check
     220             : copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
     221             : {
     222             :         memcpy(dst, src, cnt);
     223             :         return 0;
     224             : }
     225             : #endif
     226             : 
     227      193267 : static __always_inline void pagefault_disabled_inc(void)
     228             : {
     229      193267 :         current->pagefault_disabled++;
     230             : }
     231             : 
     232      193262 : static __always_inline void pagefault_disabled_dec(void)
     233             : {
     234      193262 :         current->pagefault_disabled--;
     235             : }
     236             : 
     237             : /*
     238             :  * These routines enable/disable the pagefault handler. If disabled, it will
     239             :  * not take any locks and go straight to the fixup table.
     240             :  *
     241             :  * User access methods will not sleep when called from a pagefault_disabled()
     242             :  * environment.
     243             :  */
     244      193267 : static inline void pagefault_disable(void)
     245             : {
     246      193267 :         pagefault_disabled_inc();
     247             :         /*
     248             :          * make sure to have issued the store before a pagefault
     249             :          * can hit.
     250             :          */
     251      193267 :         barrier();
     252          98 : }
     253             : 
     254      193261 : static inline void pagefault_enable(void)
     255             : {
     256             :         /*
     257             :          * make sure to issue those last loads/stores before enabling
     258             :          * the pagefault handler again.
     259             :          */
     260      193261 :         barrier();
     261      193261 :         pagefault_disabled_dec();
     262        1644 : }
     263             : 
     264             : /*
     265             :  * Is the pagefault handler disabled? If so, user access methods will not sleep.
     266             :  */
     267      715666 : static inline bool pagefault_disabled(void)
     268             : {
     269      715666 :         return current->pagefault_disabled != 0;
     270             : }
     271             : 
     272             : /*
     273             :  * The pagefault handler is in general disabled by pagefault_disable() or
     274             :  * when in irq context (via in_atomic()).
     275             :  *
     276             :  * This function should only be used by the fault handlers. Other users should
     277             :  * stick to pagefault_disabled().
     278             :  * Please NEVER use preempt_disable() to disable the fault handler. With
     279             :  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
     280             :  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
     281             :  */
     282             : #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
     283             : 
     284             : #ifndef ARCH_HAS_NOCACHE_UACCESS
     285             : 
     286             : static inline __must_check unsigned long
     287             : __copy_from_user_inatomic_nocache(void *to, const void __user *from,
     288             :                                   unsigned long n)
     289             : {
     290             :         return __copy_from_user_inatomic(to, from, n);
     291             : }
     292             : 
     293             : #endif          /* ARCH_HAS_NOCACHE_UACCESS */
     294             : 
     295             : extern __must_check int check_zeroed_user(const void __user *from, size_t size);
     296             : 
     297             : /**
     298             :  * copy_struct_from_user: copy a struct from userspace
     299             :  * @dst:   Destination address, in kernel space. This buffer must be @ksize
     300             :  *         bytes long.
     301             :  * @ksize: Size of @dst struct.
     302             :  * @src:   Source address, in userspace.
     303             :  * @usize: (Alleged) size of @src struct.
     304             :  *
     305             :  * Copies a struct from userspace to kernel space, in a way that guarantees
     306             :  * backwards-compatibility for struct syscall arguments (as long as future
     307             :  * struct extensions are made such that all new fields are *appended* to the
     308             :  * old struct, and zeroed-out new fields have the same meaning as the old
     309             :  * struct).
     310             :  *
     311             :  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
     312             :  * The recommended usage is something like the following:
     313             :  *
     314             :  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
     315             :  *   {
     316             :  *      int err;
     317             :  *      struct foo karg = {};
     318             :  *
     319             :  *      if (usize > PAGE_SIZE)
     320             :  *        return -E2BIG;
     321             :  *      if (usize < FOO_SIZE_VER0)
     322             :  *        return -EINVAL;
     323             :  *
     324             :  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
     325             :  *      if (err)
     326             :  *        return err;
     327             :  *
     328             :  *      // ...
     329             :  *   }
     330             :  *
     331             :  * There are three cases to consider:
     332             :  *  * If @usize == @ksize, then it's copied verbatim.
     333             :  *  * If @usize < @ksize, then the userspace has passed an old struct to a
     334             :  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
     335             :  *    are to be zero-filled.
     336             :  *  * If @usize > @ksize, then the userspace has passed a new struct to an
     337             :  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
     338             :  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
     339             :  *
     340             :  * Returns (in all cases, some data may have been copied):
     341             :  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
     342             :  *  * -EFAULT: access to userspace failed.
     343             :  */
     344             : static __always_inline __must_check int
     345          82 : copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
     346             :                       size_t usize)
     347             : {
     348          82 :         size_t size = min(ksize, usize);
     349          82 :         size_t rest = max(ksize, usize) - size;
     350             : 
     351             :         /* Deal with trailing bytes. */
     352          82 :         if (usize < ksize) {
     353           0 :                 memset(dst + size, 0, rest);
     354          82 :         } else if (usize > ksize) {
     355           2 :                 int ret = check_zeroed_user(src + size, rest);
     356           2 :                 if (ret <= 0)
     357           1 :                         return ret ?: -E2BIG;
     358             :         }
     359             :         /* Copy the interoperable parts of the struct. */
     360          81 :         if (copy_from_user(dst, src, size))
     361             :                 return -EFAULT;
     362             :         return 0;
     363             : }
     364             : 
     365             : bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
     366             : 
     367             : long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
     368             : long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
     369             : 
     370             : long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
     371             : long notrace copy_to_user_nofault(void __user *dst, const void *src,
     372             :                 size_t size);
     373             : 
     374             : long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
     375             :                 long count);
     376             : 
     377             : long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
     378             :                 long count);
     379             : long strnlen_user_nofault(const void __user *unsafe_addr, long count);
     380             : 
     381             : /**
     382             :  * get_kernel_nofault(): safely attempt to read from a location
     383             :  * @val: read into this variable
     384             :  * @ptr: address to read from
     385             :  *
     386             :  * Returns 0 on success, or -EFAULT.
     387             :  */
     388             : #define get_kernel_nofault(val, ptr) ({                         \
     389             :         const typeof(val) *__gk_ptr = (ptr);                    \
     390             :         copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
     391             : })
     392             : 
     393             : #ifndef user_access_begin
     394             : #define user_access_begin(ptr,len) access_ok(ptr, len)
     395             : #define user_access_end() do { } while (0)
     396             : #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
     397             : #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
     398             : #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
     399             : #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
     400             : static inline unsigned long user_access_save(void) { return 0UL; }
     401             : static inline void user_access_restore(unsigned long flags) { }
     402             : #endif
     403             : #ifndef user_write_access_begin
     404             : #define user_write_access_begin user_access_begin
     405             : #define user_write_access_end user_access_end
     406             : #endif
     407             : #ifndef user_read_access_begin
     408             : #define user_read_access_begin user_access_begin
     409             : #define user_read_access_end user_access_end
     410             : #endif
     411             : 
     412             : #ifdef CONFIG_HARDENED_USERCOPY
     413             : void usercopy_warn(const char *name, const char *detail, bool to_user,
     414             :                    unsigned long offset, unsigned long len);
     415             : void __noreturn usercopy_abort(const char *name, const char *detail,
     416             :                                bool to_user, unsigned long offset,
     417             :                                unsigned long len);
     418             : #endif
     419             : 
     420             : #endif          /* __LINUX_UACCESS_H__ */

Generated by: LCOV version 1.14