1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
12 #define __addr_ok(addr) (!((unsigned long)(addr) & \
13 (current_thread_info()->addr_limit.seg)))
15 #define ARCH_HAS_SEARCH_EXTABLE
17 extern void __put_user_1(void);
18 extern void __put_user_2(void);
19 extern void __put_user_4(void);
20 extern void __put_user_8(void);
21 extern void __put_user_bad(void);
23 #define __put_user_x(size, ret, x, ptr) \
24 asm volatile("call __put_user_" #size \
29 #define put_user(x, ptr) \
30 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
32 #define __get_user(x, ptr) \
33 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
34 #define __put_user(x, ptr) \
35 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
37 #define __get_user_unaligned __get_user
38 #define __put_user_unaligned __put_user
40 #define __put_user_nocheck(x, ptr, size) \
43 __put_user_size((x), (ptr), (size), __pu_err); \
48 #define __put_user_check(x, ptr, size) \
51 typeof(*(ptr)) __user *__pu_addr = (ptr); \
54 __put_user_x(1, __pu_err, x, __pu_addr); \
57 __put_user_x(2, __pu_err, x, __pu_addr); \
60 __put_user_x(4, __pu_err, x, __pu_addr); \
63 __put_user_x(8, __pu_err, x, __pu_addr); \
71 #define __put_user_size(x, ptr, size, retval) \
74 __chk_user_ptr(ptr); \
77 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
80 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
83 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
86 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
93 /* FIXME: this hack is definitely wrong -AK */
94 struct __large_struct { unsigned long buf[100]; };
95 #define __m(x) (*(struct __large_struct __user *)(x))
98 * Tell gcc we read from memory instead of writing: this is because
99 * we do not write to any memory gcc knows about, so there are no
102 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
103 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
105 ".section .fixup, \"ax\"\n" \
109 _ASM_EXTABLE(1b, 3b) \
111 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
114 #define __get_user_nocheck(x, ptr, size) \
117 unsigned long __gu_val; \
118 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
119 (x) = (__force typeof(*(ptr)))__gu_val; \
123 #define __get_user_size(x, ptr, size, retval) \
126 __chk_user_ptr(ptr); \
129 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
132 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
135 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
138 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
141 (x) = __get_user_bad(); \
145 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
146 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
148 ".section .fixup, \"ax\"\n" \
150 " xor"itype" %"rtype"1,%"rtype"1\n" \
153 _ASM_EXTABLE(1b, 3b) \
154 : "=r" (err), ltype (x) \
155 : "m" (__m(addr)), "i"(errno), "0"(err))
158 * Copy To/From Userspace
161 /* Handles exceptions in both to and from, but doesn't do access_ok */
162 __must_check unsigned long
163 copy_user_generic(void *to, const void *from, unsigned len);
165 __must_check unsigned long
166 copy_to_user(void __user *to, const void *from, unsigned len);
167 __must_check unsigned long
168 copy_from_user(void *to, const void __user *from, unsigned len);
169 __must_check unsigned long
170 copy_in_user(void __user *to, const void __user *from, unsigned len);
172 static __always_inline __must_check
173 int __copy_from_user(void *dst, const void __user *src, unsigned size)
176 if (!__builtin_constant_p(size))
177 return copy_user_generic(dst, (__force void *)src, size);
179 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
180 ret, "b", "b", "=q", 1);
182 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
183 ret, "w", "w", "=r", 2);
185 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
186 ret, "l", "k", "=r", 4);
188 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
189 ret, "q", "", "=r", 8);
192 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
193 ret, "q", "", "=r", 16);
196 __get_user_asm(*(u16 *)(8 + (char *)dst),
197 (u16 __user *)(8 + (char __user *)src),
198 ret, "w", "w", "=r", 2);
201 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
202 ret, "q", "", "=r", 16);
205 __get_user_asm(*(u64 *)(8 + (char *)dst),
206 (u64 __user *)(8 + (char __user *)src),
207 ret, "q", "", "=r", 8);
210 return copy_user_generic(dst, (__force void *)src, size);
214 static __always_inline __must_check
215 int __copy_to_user(void __user *dst, const void *src, unsigned size)
218 if (!__builtin_constant_p(size))
219 return copy_user_generic((__force void *)dst, src, size);
221 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
222 ret, "b", "b", "iq", 1);
224 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
225 ret, "w", "w", "ir", 2);
227 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
228 ret, "l", "k", "ir", 4);
230 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
231 ret, "q", "", "ir", 8);
234 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
235 ret, "q", "", "ir", 10);
239 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
240 ret, "w", "w", "ir", 2);
243 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
244 ret, "q", "", "ir", 16);
248 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
249 ret, "q", "", "ir", 8);
252 return copy_user_generic((__force void *)dst, src, size);
256 static __always_inline __must_check
257 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
260 if (!__builtin_constant_p(size))
261 return copy_user_generic((__force void *)dst,
262 (__force void *)src, size);
266 __get_user_asm(tmp, (u8 __user *)src,
267 ret, "b", "b", "=q", 1);
269 __put_user_asm(tmp, (u8 __user *)dst,
270 ret, "b", "b", "iq", 1);
275 __get_user_asm(tmp, (u16 __user *)src,
276 ret, "w", "w", "=r", 2);
278 __put_user_asm(tmp, (u16 __user *)dst,
279 ret, "w", "w", "ir", 2);
285 __get_user_asm(tmp, (u32 __user *)src,
286 ret, "l", "k", "=r", 4);
288 __put_user_asm(tmp, (u32 __user *)dst,
289 ret, "l", "k", "ir", 4);
294 __get_user_asm(tmp, (u64 __user *)src,
295 ret, "q", "", "=r", 8);
297 __put_user_asm(tmp, (u64 __user *)dst,
298 ret, "q", "", "ir", 8);
302 return copy_user_generic((__force void *)dst,
303 (__force void *)src, size);
308 strncpy_from_user(char *dst, const char __user *src, long count);
310 __strncpy_from_user(char *dst, const char __user *src, long count);
311 __must_check long strnlen_user(const char __user *str, long n);
312 __must_check long __strnlen_user(const char __user *str, long n);
313 __must_check long strlen_user(const char __user *str);
314 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
315 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
317 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
320 static __must_check __always_inline int
321 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
323 return copy_user_generic((__force void *)dst, src, size);
326 #define ARCH_HAS_NOCACHE_UACCESS 1
327 extern long __copy_user_nocache(void *dst, const void __user *src,
328 unsigned size, int zerorest);
330 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
334 return __copy_user_nocache(dst, src, size, 1);
337 static inline int __copy_from_user_inatomic_nocache(void *dst,
338 const void __user *src,
341 return __copy_user_nocache(dst, src, size, 0);
344 #endif /* __X86_64_UACCESS_H */