1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
5 * User space memory access functions
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/prefetch.h>
10 #include <linux/string.h>
15 * movsl can be slow when source and dest are not both 8-byte aligned
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 extern struct movsl_mask {
20 } ____cacheline_aligned_in_smp movsl_mask;
23 #define __addr_ok(addr) \
24 ((unsigned long __force)(addr) < \
25 (current_thread_info()->addr_limit.seg))
27 extern void __put_user_bad(void);
30 * Strange magic calling convention: pointer in %ecx,
31 * value in %eax(:%edx), return value in %eax, no clobbers.
33 extern void __put_user_1(void);
34 extern void __put_user_2(void);
35 extern void __put_user_4(void);
36 extern void __put_user_8(void);
38 #define __put_user_x(size, x, ptr) \
39 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
40 :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
42 #define __put_user_8(x, ptr) \
43 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
44 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
48 * put_user: - Write a simple value into user space.
49 * @x: Value to copy to user space.
50 * @ptr: Destination address, in user space.
52 * Context: User context only. This function may sleep.
54 * This macro copies a single simple value from kernel space to user
55 * space. It supports simple types like char and int, but not larger
56 * data types like structures or arrays.
58 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
59 * to the result of dereferencing @ptr.
61 * Returns zero on success, or -EFAULT on error.
63 #ifdef CONFIG_X86_WP_WORKS_OK
65 #define put_user(x, ptr) \
68 __typeof__(*(ptr)) __pu_val; \
69 __chk_user_ptr(ptr); \
71 switch (sizeof(*(ptr))) { \
73 __put_user_x(1, __pu_val, ptr); \
76 __put_user_x(2, __pu_val, ptr); \
79 __put_user_x(4, __pu_val, ptr); \
82 __put_user_8(__pu_val, ptr); \
85 __put_user_x(X, __pu_val, ptr); \
92 #define put_user(x, ptr) \
95 __typeof__(*(ptr))__pus_tmp = x; \
97 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \
98 sizeof(*(ptr))) != 0)) \
107 * __get_user: - Get a simple variable from user space, with less checking.
108 * @x: Variable to store result.
109 * @ptr: Source address, in user space.
111 * Context: User context only. This function may sleep.
113 * This macro copies a single simple variable from user space to kernel
114 * space. It supports simple types like char and int, but not larger
115 * data types like structures or arrays.
117 * @ptr must have pointer-to-simple-variable type, and the result of
118 * dereferencing @ptr must be assignable to @x without a cast.
120 * Caller must check the pointer with access_ok() before calling this
123 * Returns zero on success, or -EFAULT on error.
124 * On error, the variable @x is set to zero.
126 #define __get_user(x, ptr) \
127 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
131 * __put_user: - Write a simple value into user space, with less checking.
132 * @x: Value to copy to user space.
133 * @ptr: Destination address, in user space.
135 * Context: User context only. This function may sleep.
137 * This macro copies a single simple value from kernel space to user
138 * space. It supports simple types like char and int, but not larger
139 * data types like structures or arrays.
141 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
142 * to the result of dereferencing @ptr.
144 * Caller must check the pointer with access_ok() before calling this
147 * Returns zero on success, or -EFAULT on error.
149 #define __put_user(x, ptr) \
150 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
152 #define __put_user_nocheck(x, ptr, size) \
155 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
160 #define __put_user_u64(x, addr, err) \
161 asm volatile("1: movl %%eax,0(%2)\n" \
162 "2: movl %%edx,4(%2)\n" \
164 ".section .fixup,\"ax\"\n" \
168 _ASM_EXTABLE(1b, 4b) \
169 _ASM_EXTABLE(2b, 4b) \
171 : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
173 #ifdef CONFIG_X86_WP_WORKS_OK
175 #define __put_user_size(x, ptr, size, retval, errret) \
178 __chk_user_ptr(ptr); \
181 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
184 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
187 __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \
190 __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
199 #define __put_user_size(x, ptr, size, retval, errret) \
201 __typeof__(*(ptr))__pus_tmp = x; \
204 if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \
209 struct __large_struct { unsigned long buf[100]; };
210 #define __m(x) (*(struct __large_struct __user *)(x))
213 * Tell gcc we read from memory instead of writing: this is because
214 * we do not write to any memory gcc knows about, so there are no
217 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
218 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
220 ".section .fixup,\"ax\"\n" \
224 _ASM_EXTABLE(1b, 3b) \
226 : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
229 #define __get_user_nocheck(x, ptr, size) \
232 unsigned long __gu_val; \
233 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
234 (x) = (__typeof__(*(ptr)))__gu_val; \
238 #define __get_user_size(x, ptr, size, retval, errret) \
241 __chk_user_ptr(ptr); \
244 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
247 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
250 __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \
253 (x) = __get_user_bad(); \
257 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
258 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
260 ".section .fixup,\"ax\"\n" \
262 " xor"itype" %"rtype"1,%"rtype"1\n" \
265 _ASM_EXTABLE(1b, 3b) \
266 : "=r" (err), ltype (x) \
267 : "m" (__m(addr)), "i" (errret), "0" (err))
270 unsigned long __must_check __copy_to_user_ll
271 (void __user *to, const void *from, unsigned long n);
272 unsigned long __must_check __copy_from_user_ll
273 (void *to, const void __user *from, unsigned long n);
274 unsigned long __must_check __copy_from_user_ll_nozero
275 (void *to, const void __user *from, unsigned long n);
276 unsigned long __must_check __copy_from_user_ll_nocache
277 (void *to, const void __user *from, unsigned long n);
278 unsigned long __must_check __copy_from_user_ll_nocache_nozero
279 (void *to, const void __user *from, unsigned long n);
282 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
283 * @to: Destination address, in user space.
284 * @from: Source address, in kernel space.
285 * @n: Number of bytes to copy.
287 * Context: User context only.
289 * Copy data from kernel space to user space. Caller must check
290 * the specified block with access_ok() before calling this function.
291 * The caller should also make sure he pins the user space address
292 * so that the we don't result in page fault and sleep.
294 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
295 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
296 * If a store crosses a page boundary and gets a fault, the x86 will not write
297 * anything, so this is accurate.
300 static __always_inline unsigned long __must_check
301 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
303 if (__builtin_constant_p(n)) {
308 __put_user_size(*(u8 *)from, (u8 __user *)to,
312 __put_user_size(*(u16 *)from, (u16 __user *)to,
316 __put_user_size(*(u32 *)from, (u32 __user *)to,
321 return __copy_to_user_ll(to, from, n);
325 * __copy_to_user: - Copy a block of data into user space, with less checking.
326 * @to: Destination address, in user space.
327 * @from: Source address, in kernel space.
328 * @n: Number of bytes to copy.
330 * Context: User context only. This function may sleep.
332 * Copy data from kernel space to user space. Caller must check
333 * the specified block with access_ok() before calling this function.
335 * Returns number of bytes that could not be copied.
336 * On success, this will be zero.
338 static __always_inline unsigned long __must_check
339 __copy_to_user(void __user *to, const void *from, unsigned long n)
342 return __copy_to_user_inatomic(to, from, n);
345 static __always_inline unsigned long
346 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
348 /* Avoid zeroing the tail if the copy fails..
349 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
350 * but as the zeroing behaviour is only significant when n is not
351 * constant, that shouldn't be a problem.
353 if (__builtin_constant_p(n)) {
358 __get_user_size(*(u8 *)to, from, 1, ret, 1);
361 __get_user_size(*(u16 *)to, from, 2, ret, 2);
364 __get_user_size(*(u32 *)to, from, 4, ret, 4);
368 return __copy_from_user_ll_nozero(to, from, n);
372 * __copy_from_user: - Copy a block of data from user space, with less checking.
373 * @to: Destination address, in kernel space.
374 * @from: Source address, in user space.
375 * @n: Number of bytes to copy.
377 * Context: User context only. This function may sleep.
379 * Copy data from user space to kernel space. Caller must check
380 * the specified block with access_ok() before calling this function.
382 * Returns number of bytes that could not be copied.
383 * On success, this will be zero.
385 * If some data could not be copied, this function will pad the copied
386 * data to the requested size using zero bytes.
388 * An alternate version - __copy_from_user_inatomic() - may be called from
389 * atomic context and will fail rather than sleep. In this case the
390 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
391 * for explanation of why this is needed.
393 static __always_inline unsigned long
394 __copy_from_user(void *to, const void __user *from, unsigned long n)
397 if (__builtin_constant_p(n)) {
402 __get_user_size(*(u8 *)to, from, 1, ret, 1);
405 __get_user_size(*(u16 *)to, from, 2, ret, 2);
408 __get_user_size(*(u32 *)to, from, 4, ret, 4);
412 return __copy_from_user_ll(to, from, n);
415 #define ARCH_HAS_NOCACHE_UACCESS
417 static __always_inline unsigned long __copy_from_user_nocache(void *to,
418 const void __user *from, unsigned long n)
421 if (__builtin_constant_p(n)) {
426 __get_user_size(*(u8 *)to, from, 1, ret, 1);
429 __get_user_size(*(u16 *)to, from, 2, ret, 2);
432 __get_user_size(*(u32 *)to, from, 4, ret, 4);
436 return __copy_from_user_ll_nocache(to, from, n);
439 static __always_inline unsigned long
440 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
443 return __copy_from_user_ll_nocache_nozero(to, from, n);
446 unsigned long __must_check copy_to_user(void __user *to,
447 const void *from, unsigned long n);
448 unsigned long __must_check copy_from_user(void *to,
449 const void __user *from,
451 long __must_check strncpy_from_user(char *dst, const char __user *src,
453 long __must_check __strncpy_from_user(char *dst,
454 const char __user *src, long count);
457 * strlen_user: - Get the size of a string in user space.
458 * @str: The string to measure.
460 * Context: User context only. This function may sleep.
462 * Get the size of a NUL-terminated string in user space.
464 * Returns the size of the string INCLUDING the terminating NUL.
465 * On exception, returns 0.
467 * If there is a limit on the length of a valid string, you may wish to
468 * consider using strnlen_user() instead.
470 #define strlen_user(str) strnlen_user(str, LONG_MAX)
472 long strnlen_user(const char __user *str, long n);
473 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
474 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
476 #endif /* __i386_UACCESS_H */