Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_UACCESS_H
3 : #define _ASM_X86_UACCESS_H
4 : /*
5 : * User space memory access functions
6 : */
7 : #include <linux/compiler.h>
8 : #include <linux/kasan-checks.h>
9 : #include <linux/string.h>
10 : #include <asm/asm.h>
11 : #include <asm/page.h>
12 : #include <asm/smap.h>
13 : #include <asm/extable.h>
14 :
15 : /*
16 : * Test whether a block of memory is a valid user space address.
17 : * Returns 0 if the range is valid, nonzero otherwise.
18 : */
19 665412 : static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
20 : {
21 : /*
22 : * If we have used "sizeof()" for the size,
23 : * we know it won't overflow the limit (but
24 : * it might overflow the 'addr', so it's
25 : * important to subtract the size from the
26 : * limit, not add it to the address).
27 : */
28 649585 : if (__builtin_constant_p(size))
29 2029 : return unlikely(addr > limit - size);
30 :
31 : /* Arbitrary sizes? Be careful about overflow */
32 662118 : addr += size;
33 662118 : if (unlikely(addr < size))
34 : return true;
35 662118 : return unlikely(addr > limit);
36 : }
37 :
38 : #define __range_not_ok(addr, size, limit) \
39 : ({ \
40 : __chk_user_ptr(addr); \
41 : __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
42 : })
43 :
44 : #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
45 : static inline bool pagefault_disabled(void);
46 : # define WARN_ON_IN_IRQ() \
47 : WARN_ON_ONCE(!in_task() && !pagefault_disabled())
48 : #else
49 : # define WARN_ON_IN_IRQ()
50 : #endif
51 :
52 : /**
53 : * access_ok - Checks if a user space pointer is valid
54 : * @addr: User space pointer to start of block to check
55 : * @size: Size of block to check
56 : *
57 : * Context: User context only. This function may sleep if pagefaults are
58 : * enabled.
59 : *
60 : * Checks if a pointer to a block of memory in user space is valid.
61 : *
62 : * Note that, depending on architecture, this function probably just
63 : * checks that the pointer is in the user space range - after calling
64 : * this function, memory access functions may still return -EFAULT.
65 : *
66 : * Return: true (nonzero) if the memory block may be valid, false (zero)
67 : * if it is definitely invalid.
68 : */
69 : #define access_ok(addr, size) \
70 : ({ \
71 : WARN_ON_IN_IRQ(); \
72 : likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \
73 : })
74 :
75 : extern int __get_user_1(void);
76 : extern int __get_user_2(void);
77 : extern int __get_user_4(void);
78 : extern int __get_user_8(void);
79 : extern int __get_user_nocheck_1(void);
80 : extern int __get_user_nocheck_2(void);
81 : extern int __get_user_nocheck_4(void);
82 : extern int __get_user_nocheck_8(void);
83 : extern int __get_user_bad(void);
84 :
85 : #define __uaccess_begin() stac()
86 : #define __uaccess_end() clac()
87 : #define __uaccess_begin_nospec() \
88 : ({ \
89 : stac(); \
90 : barrier_nospec(); \
91 : })
92 :
93 : /*
94 : * This is the smallest unsigned integer type that can fit a value
95 : * (up to 'long long')
96 : */
97 : #define __inttype(x) __typeof__( \
98 : __typefits(x,char, \
99 : __typefits(x,short, \
100 : __typefits(x,int, \
101 : __typefits(x,long,0ULL)))))
102 :
103 : #define __typefits(x,type,not) \
104 : __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
105 :
106 : /*
107 : * This is used for both get_user() and __get_user() to expand to
108 : * the proper special function call that has odd calling conventions
109 : * due to returning both a value and an error, and that depends on
110 : * the size of the pointer passed in.
111 : *
112 : * Careful: we have to cast the result to the type of the pointer
113 : * for sign reasons.
114 : *
115 : * The use of _ASM_DX as the register specifier is a bit of a
116 : * simplification, as gcc only cares about it as the starting point
117 : * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
118 : * (%ecx being the next register in gcc's x86 register sequence), and
119 : * %rdx on 64 bits.
120 : *
121 : * Clang/LLVM cares about the size of the register, but still wants
122 : * the base register for something that ends up being a pair.
123 : */
124 : #define do_get_user_call(fn,x,ptr) \
125 : ({ \
126 : int __ret_gu; \
127 : register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
128 : __chk_user_ptr(ptr); \
129 : asm volatile("call __" #fn "_%P4" \
130 : : "=a" (__ret_gu), "=r" (__val_gu), \
131 : ASM_CALL_CONSTRAINT \
132 : : "0" (ptr), "i" (sizeof(*(ptr)))); \
133 : (x) = (__force __typeof__(*(ptr))) __val_gu; \
134 : __builtin_expect(__ret_gu, 0); \
135 : })
136 :
137 : /**
138 : * get_user - Get a simple variable from user space.
139 : * @x: Variable to store result.
140 : * @ptr: Source address, in user space.
141 : *
142 : * Context: User context only. This function may sleep if pagefaults are
143 : * enabled.
144 : *
145 : * This macro copies a single simple variable from user space to kernel
146 : * space. It supports simple types like char and int, but not larger
147 : * data types like structures or arrays.
148 : *
149 : * @ptr must have pointer-to-simple-variable type, and the result of
150 : * dereferencing @ptr must be assignable to @x without a cast.
151 : *
152 : * Return: zero on success, or -EFAULT on error.
153 : * On error, the variable @x is set to zero.
154 : */
155 : #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
156 :
157 : /**
158 : * __get_user - Get a simple variable from user space, with less checking.
159 : * @x: Variable to store result.
160 : * @ptr: Source address, in user space.
161 : *
162 : * Context: User context only. This function may sleep if pagefaults are
163 : * enabled.
164 : *
165 : * This macro copies a single simple variable from user space to kernel
166 : * space. It supports simple types like char and int, but not larger
167 : * data types like structures or arrays.
168 : *
169 : * @ptr must have pointer-to-simple-variable type, and the result of
170 : * dereferencing @ptr must be assignable to @x without a cast.
171 : *
172 : * Caller must check the pointer with access_ok() before calling this
173 : * function.
174 : *
175 : * Return: zero on success, or -EFAULT on error.
176 : * On error, the variable @x is set to zero.
177 : */
178 : #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
179 :
180 :
181 : #ifdef CONFIG_X86_32
182 : #define __put_user_goto_u64(x, addr, label) \
183 : asm_volatile_goto("\n" \
184 : "1: movl %%eax,0(%1)\n" \
185 : "2: movl %%edx,4(%1)\n" \
186 : _ASM_EXTABLE_UA(1b, %l2) \
187 : _ASM_EXTABLE_UA(2b, %l2) \
188 : : : "A" (x), "r" (addr) \
189 : : : label)
190 :
191 : #else
192 : #define __put_user_goto_u64(x, ptr, label) \
193 : __put_user_goto(x, ptr, "q", "er", label)
194 : #endif
195 :
196 : extern void __put_user_bad(void);
197 :
198 : /*
199 : * Strange magic calling convention: pointer in %ecx,
200 : * value in %eax(:%edx), return value in %ecx. clobbers %rbx
201 : */
202 : extern void __put_user_1(void);
203 : extern void __put_user_2(void);
204 : extern void __put_user_4(void);
205 : extern void __put_user_8(void);
206 : extern void __put_user_nocheck_1(void);
207 : extern void __put_user_nocheck_2(void);
208 : extern void __put_user_nocheck_4(void);
209 : extern void __put_user_nocheck_8(void);
210 :
211 : /*
212 : * ptr must be evaluated and assigned to the temporary __ptr_pu before
213 : * the assignment of x to __val_pu, to avoid any function calls
214 : * involved in the ptr expression (possibly implicitly generated due
215 : * to KASAN) from clobbering %ax.
216 : */
217 : #define do_put_user_call(fn,x,ptr) \
218 : ({ \
219 : int __ret_pu; \
220 : void __user *__ptr_pu; \
221 : register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
222 : __chk_user_ptr(ptr); \
223 : __ptr_pu = (ptr); \
224 : __val_pu = (x); \
225 : asm volatile("call __" #fn "_%P[size]" \
226 : : "=c" (__ret_pu), \
227 : ASM_CALL_CONSTRAINT \
228 : : "0" (__ptr_pu), \
229 : "r" (__val_pu), \
230 : [size] "i" (sizeof(*(ptr))) \
231 : :"ebx"); \
232 : __builtin_expect(__ret_pu, 0); \
233 : })
234 :
235 : /**
236 : * put_user - Write a simple value into user space.
237 : * @x: Value to copy to user space.
238 : * @ptr: Destination address, in user space.
239 : *
240 : * Context: User context only. This function may sleep if pagefaults are
241 : * enabled.
242 : *
243 : * This macro copies a single simple value from kernel space to user
244 : * space. It supports simple types like char and int, but not larger
245 : * data types like structures or arrays.
246 : *
247 : * @ptr must have pointer-to-simple-variable type, and @x must be assignable
248 : * to the result of dereferencing @ptr.
249 : *
250 : * Return: zero on success, or -EFAULT on error.
251 : */
252 : #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
253 :
254 : /**
255 : * __put_user - Write a simple value into user space, with less checking.
256 : * @x: Value to copy to user space.
257 : * @ptr: Destination address, in user space.
258 : *
259 : * Context: User context only. This function may sleep if pagefaults are
260 : * enabled.
261 : *
262 : * This macro copies a single simple value from kernel space to user
263 : * space. It supports simple types like char and int, but not larger
264 : * data types like structures or arrays.
265 : *
266 : * @ptr must have pointer-to-simple-variable type, and @x must be assignable
267 : * to the result of dereferencing @ptr.
268 : *
269 : * Caller must check the pointer with access_ok() before calling this
270 : * function.
271 : *
272 : * Return: zero on success, or -EFAULT on error.
273 : */
274 : #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
275 :
276 : #define __put_user_size(x, ptr, size, label) \
277 : do { \
278 : __chk_user_ptr(ptr); \
279 : switch (size) { \
280 : case 1: \
281 : __put_user_goto(x, ptr, "b", "iq", label); \
282 : break; \
283 : case 2: \
284 : __put_user_goto(x, ptr, "w", "ir", label); \
285 : break; \
286 : case 4: \
287 : __put_user_goto(x, ptr, "l", "ir", label); \
288 : break; \
289 : case 8: \
290 : __put_user_goto_u64(x, ptr, label); \
291 : break; \
292 : default: \
293 : __put_user_bad(); \
294 : } \
295 : } while (0)
296 :
297 : #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
298 :
299 : #ifdef CONFIG_X86_32
300 : #define __get_user_asm_u64(x, ptr, label) do { \
301 : unsigned int __gu_low, __gu_high; \
302 : const unsigned int __user *__gu_ptr; \
303 : __gu_ptr = (const void __user *)(ptr); \
304 : __get_user_asm(__gu_low, ptr, "l", "=r", label); \
305 : __get_user_asm(__gu_high, ptr+1, "l", "=r", label); \
306 : (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
307 : } while (0)
308 : #else
309 : #define __get_user_asm_u64(x, ptr, label) \
310 : __get_user_asm(x, ptr, "q", "=r", label)
311 : #endif
312 :
313 : #define __get_user_size(x, ptr, size, label) \
314 : do { \
315 : __chk_user_ptr(ptr); \
316 : switch (size) { \
317 : unsigned char x_u8__; \
318 : case 1: \
319 : __get_user_asm(x_u8__, ptr, "b", "=q", label); \
320 : (x) = x_u8__; \
321 : break; \
322 : case 2: \
323 : __get_user_asm(x, ptr, "w", "=r", label); \
324 : break; \
325 : case 4: \
326 : __get_user_asm(x, ptr, "l", "=r", label); \
327 : break; \
328 : case 8: \
329 : __get_user_asm_u64(x, ptr, label); \
330 : break; \
331 : default: \
332 : (x) = __get_user_bad(); \
333 : } \
334 : } while (0)
335 :
336 : #define __get_user_asm(x, addr, itype, ltype, label) \
337 : asm_volatile_goto("\n" \
338 : "1: mov"itype" %[umem],%[output]\n" \
339 : _ASM_EXTABLE_UA(1b, %l2) \
340 : : [output] ltype(x) \
341 : : [umem] "m" (__m(addr)) \
342 : : : label)
343 :
344 : #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
345 :
346 : #ifdef CONFIG_X86_32
347 : #define __get_user_asm_u64(x, ptr, retval) \
348 : ({ \
349 : __typeof__(ptr) __ptr = (ptr); \
350 : asm volatile("\n" \
351 : "1: movl %[lowbits],%%eax\n" \
352 : "2: movl %[highbits],%%edx\n" \
353 : "3:\n" \
354 : ".section .fixup,\"ax\"\n" \
355 : "4: mov %[efault],%[errout]\n" \
356 : " xorl %%eax,%%eax\n" \
357 : " xorl %%edx,%%edx\n" \
358 : " jmp 3b\n" \
359 : ".previous\n" \
360 : _ASM_EXTABLE_UA(1b, 4b) \
361 : _ASM_EXTABLE_UA(2b, 4b) \
362 : : [errout] "=r" (retval), \
363 : [output] "=&A"(x) \
364 : : [lowbits] "m" (__m(__ptr)), \
365 : [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
366 : [efault] "i" (-EFAULT), "0" (retval)); \
367 : })
368 :
369 : #else
370 : #define __get_user_asm_u64(x, ptr, retval) \
371 : __get_user_asm(x, ptr, retval, "q", "=r")
372 : #endif
373 :
374 : #define __get_user_size(x, ptr, size, retval) \
375 : do { \
376 : unsigned char x_u8__; \
377 : \
378 : retval = 0; \
379 : __chk_user_ptr(ptr); \
380 : switch (size) { \
381 : case 1: \
382 : __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
383 : (x) = x_u8__; \
384 : break; \
385 : case 2: \
386 : __get_user_asm(x, ptr, retval, "w", "=r"); \
387 : break; \
388 : case 4: \
389 : __get_user_asm(x, ptr, retval, "l", "=r"); \
390 : break; \
391 : case 8: \
392 : __get_user_asm_u64(x, ptr, retval); \
393 : break; \
394 : default: \
395 : (x) = __get_user_bad(); \
396 : } \
397 : } while (0)
398 :
399 : #define __get_user_asm(x, addr, err, itype, ltype) \
400 : asm volatile("\n" \
401 : "1: mov"itype" %[umem],%[output]\n" \
402 : "2:\n" \
403 : ".section .fixup,\"ax\"\n" \
404 : "3: mov %[efault],%[errout]\n" \
405 : " xorl %k[output],%k[output]\n" \
406 : " jmp 2b\n" \
407 : ".previous\n" \
408 : _ASM_EXTABLE_UA(1b, 3b) \
409 : : [errout] "=r" (err), \
410 : [output] ltype(x) \
411 : : [umem] "m" (__m(addr)), \
412 : [efault] "i" (-EFAULT), "0" (err))
413 :
414 : #endif // CONFIG_CC_ASM_GOTO_OUTPUT
415 :
416 : /* FIXME: this hack is definitely wrong -AK */
417 : struct __large_struct { unsigned long buf[100]; };
418 : #define __m(x) (*(struct __large_struct __user *)(x))
419 :
420 : /*
421 : * Tell gcc we read from memory instead of writing: this is because
422 : * we do not write to any memory gcc knows about, so there are no
423 : * aliasing issues.
424 : */
425 : #define __put_user_goto(x, addr, itype, ltype, label) \
426 : asm_volatile_goto("\n" \
427 : "1: mov"itype" %0,%1\n" \
428 : _ASM_EXTABLE_UA(1b, %l2) \
429 : : : ltype(x), "m" (__m(addr)) \
430 : : : label)
431 :
432 : extern unsigned long
433 : copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
434 : extern __must_check long
435 : strncpy_from_user(char *dst, const char __user *src, long count);
436 :
437 : extern __must_check long strnlen_user(const char __user *str, long n);
438 :
439 : unsigned long __must_check clear_user(void __user *mem, unsigned long len);
440 : unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
441 :
442 : #ifdef CONFIG_ARCH_HAS_COPY_MC
443 : unsigned long __must_check
444 : copy_mc_to_kernel(void *to, const void *from, unsigned len);
445 : #define copy_mc_to_kernel copy_mc_to_kernel
446 :
447 : unsigned long __must_check
448 : copy_mc_to_user(void *to, const void *from, unsigned len);
449 : #endif
450 :
451 : /*
452 : * movsl can be slow when source and dest are not both 8-byte aligned
453 : */
454 : #ifdef CONFIG_X86_INTEL_USERCOPY
455 : extern struct movsl_mask {
456 : int mask;
457 : } ____cacheline_aligned_in_smp movsl_mask;
458 : #endif
459 :
460 : #define ARCH_HAS_NOCACHE_UACCESS 1
461 :
462 : #ifdef CONFIG_X86_32
463 : # include <asm/uaccess_32.h>
464 : #else
465 : # include <asm/uaccess_64.h>
466 : #endif
467 :
468 : /*
469 : * The "unsafe" user accesses aren't really "unsafe", but the naming
470 : * is a big fat warning: you have to not only do the access_ok()
471 : * checking before using them, but you have to surround them with the
472 : * user_access_begin/end() pair.
473 : */
474 302542 : static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
475 : {
476 603818 : if (unlikely(!access_ok(ptr,len)))
477 : return 0;
478 301973 : __uaccess_begin_nospec();
479 302541 : return 1;
480 : }
481 : #define user_access_begin(a,b) user_access_begin(a,b)
482 : #define user_access_end() __uaccess_end()
483 :
484 : #define user_access_save() smap_save()
485 : #define user_access_restore(x) smap_restore(x)
486 :
487 : #define unsafe_put_user(x, ptr, label) \
488 : __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
489 :
490 : #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
491 : #define unsafe_get_user(x, ptr, err_label) \
492 : do { \
493 : __inttype(*(ptr)) __gu_val; \
494 : __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
495 : (x) = (__force __typeof__(*(ptr)))__gu_val; \
496 : } while (0)
497 : #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
498 : #define unsafe_get_user(x, ptr, err_label) \
499 : do { \
500 : int __gu_err; \
501 : __inttype(*(ptr)) __gu_val; \
502 : __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
503 : (x) = (__force __typeof__(*(ptr)))__gu_val; \
504 : if (unlikely(__gu_err)) goto err_label; \
505 : } while (0)
506 : #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
507 :
508 : /*
509 : * We want the unsafe accessors to always be inlined and use
510 : * the error labels - thus the macro games.
511 : */
512 : #define unsafe_copy_loop(dst, src, len, type, label) \
513 : while (len >= sizeof(type)) { \
514 : unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
515 : dst += sizeof(type); \
516 : src += sizeof(type); \
517 : len -= sizeof(type); \
518 : }
519 :
520 : #define unsafe_copy_to_user(_dst,_src,_len,label) \
521 : do { \
522 : char __user *__ucu_dst = (_dst); \
523 : const char *__ucu_src = (_src); \
524 : size_t __ucu_len = (_len); \
525 : unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
526 : unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
527 : unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
528 : unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
529 : } while (0)
530 :
531 : #define HAVE_GET_KERNEL_NOFAULT
532 :
533 : #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
534 : #define __get_kernel_nofault(dst, src, type, err_label) \
535 : __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
536 : sizeof(type), err_label)
537 : #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
538 : #define __get_kernel_nofault(dst, src, type, err_label) \
539 : do { \
540 : int __kr_err; \
541 : \
542 : __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
543 : sizeof(type), __kr_err); \
544 : if (unlikely(__kr_err)) \
545 : goto err_label; \
546 : } while (0)
547 : #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
548 :
549 : #define __put_kernel_nofault(dst, src, type, err_label) \
550 : __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
551 : sizeof(type), err_label)
552 :
553 : #endif /* _ASM_X86_UACCESS_H */
554 :
|