Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_UACCESS_64_H
3 : #define _ASM_X86_UACCESS_64_H
4 :
5 : /*
6 : * User space memory access functions
7 : */
8 : #include <linux/compiler.h>
9 : #include <linux/lockdep.h>
10 : #include <linux/kasan-checks.h>
11 : #include <asm/alternative.h>
12 : #include <asm/cpufeatures.h>
13 : #include <asm/page.h>
14 :
15 : /*
16 : * Copy To/From Userspace
17 : */
18 :
19 : /* Handles exceptions in both to and from, but doesn't do access_ok */
20 : __must_check unsigned long
21 : copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 : __must_check unsigned long
23 : copy_user_generic_string(void *to, const void *from, unsigned len);
24 : __must_check unsigned long
25 : copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26 :
27 : static __always_inline __must_check unsigned long
28 304730 : copy_user_generic(void *to, const void *from, unsigned len)
29 : {
30 304730 : unsigned ret;
31 :
32 : /*
33 : * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 : * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 : * Otherwise, use copy_user_generic_unrolled.
36 : */
37 609450 : alternative_call_2(copy_user_generic_unrolled,
38 : copy_user_generic_string,
39 : X86_FEATURE_REP_GOOD,
40 : copy_user_enhanced_fast_string,
41 : X86_FEATURE_ERMS,
42 : ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 : "=d" (len)),
44 : "1" (to), "2" (from), "3" (len)
45 : : "memory", "rcx", "r8", "r9", "r10", "r11");
46 304720 : return ret;
47 : }
48 :
49 : static __always_inline __must_check unsigned long
50 201027 : raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51 : {
52 201027 : return copy_user_generic(dst, (__force void *)src, size);
53 : }
54 :
55 : static __always_inline __must_check unsigned long
56 103703 : raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
57 : {
58 103703 : return copy_user_generic((__force void *)dst, src, size);
59 : }
60 :
61 : static __always_inline __must_check
62 0 : unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
63 : {
64 0 : return copy_user_generic((__force void *)dst,
65 : (__force void *)src, size);
66 : }
67 :
68 : extern long __copy_user_nocache(void *dst, const void __user *src,
69 : unsigned size, int zerorest);
70 :
71 : extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
72 : extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
73 : size_t len);
74 :
75 : static inline int
76 0 : __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
77 : unsigned size)
78 : {
79 0 : kasan_check_write(dst, size);
80 0 : return __copy_user_nocache(dst, src, size, 0);
81 : }
82 :
83 : static inline int
84 0 : __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
85 : {
86 0 : kasan_check_write(dst, size);
87 0 : return __copy_user_flushcache(dst, src, size);
88 : }
89 : #endif /* _ASM_X86_UACCESS_64_H */
|