x86: pass argument to putuser_64 functions in ax register.
[linux-2.6] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define VERIFY_READ 0
13 #define VERIFY_WRITE 1
14
15 /*
16  * The fs value determines whether argument validity checking should be
17  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18  * get_fs() == KERNEL_DS, checking is bypassed.
19  *
20  * For historical reasons, these macros are grossly misnamed.
21  */
22
23 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
27
28 #define get_ds()        (KERNEL_DS)
29 #define get_fs()        (current_thread_info()->addr_limit)
30 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
31
32 #define segment_eq(a, b)        ((a).seg == (b).seg)
33
34 #define __addr_ok(addr) (!((unsigned long)(addr) &                      \
35                            (current_thread_info()->addr_limit.seg)))
36
37 /*
38  * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39  */
40 #define __range_not_ok(addr, size)                                      \
41 ({                                                                      \
42         unsigned long flag, roksum;                                     \
43         __chk_user_ptr(addr);                                           \
44         asm("# range_ok\n\r"                                            \
45             "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"         \
46             : "=&r" (flag), "=r" (roksum)                               \
47             : "1" (addr), "g" ((long)(size)),                           \
48               "g" (current_thread_info()->addr_limit.seg));             \
49         flag;                                                           \
50 })
51
52 #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
53
54 /*
55  * The exception table consists of pairs of addresses: the first is the
56  * address of an instruction that is allowed to fault, and the second is
57  * the address at which the program should continue.  No registers are
58  * modified, so it is entirely up to the continuation code to figure out
59  * what to do.
60  *
61  * All the routines below use bits of fixup code that are out of line
62  * with the main instruction path.  This means when everything is well,
63  * we don't even have to jump over them.  Further, they do not intrude
64  * on our cache or tlb entries.
65  */
66
67 struct exception_table_entry {
68         unsigned long insn, fixup;
69 };
70
71 extern int fixup_exception(struct pt_regs *regs);
72
73 #define ARCH_HAS_SEARCH_EXTABLE
74
75 /*
76  * These are the main single-value transfer routines.  They automatically
77  * use the right size if we just have the right pointer type.
78  *
79  * This gets kind of ugly. We want to return _two_ values in "get_user()"
80  * and yet we don't want to do any pointers, because that is too much
81  * of a performance impact. Thus we have a few rather ugly macros here,
82  * and hide all the ugliness from the user.
83  *
84  * The "__xxx" versions of the user access functions are versions that
85  * do not verify the address space, that must have been done previously
86  * with a separate "access_ok()" call (this is used when we do multiple
87  * accesses to the same area of user memory).
88  */
89
90 #define __get_user_x(size, ret, x, ptr)               \
91         asm volatile("call __get_user_" #size         \
92                      : "=a" (ret),"=d" (x)            \
93                      : "0" (ptr))                     \
94
95 /* Careful: we have to cast the result to the type of the pointer
96  * for sign reasons */
97
98 #define get_user(x, ptr)                                                \
99 ({                                                                      \
100         unsigned long __val_gu;                                         \
101         int __ret_gu;                                                   \
102         __chk_user_ptr(ptr);                                            \
103         switch (sizeof(*(ptr))) {                                       \
104         case 1:                                                         \
105                 __get_user_x(1, __ret_gu, __val_gu, ptr);               \
106                 break;                                                  \
107         case 2:                                                         \
108                 __get_user_x(2, __ret_gu, __val_gu, ptr);               \
109                 break;                                                  \
110         case 4:                                                         \
111                 __get_user_x(4, __ret_gu, __val_gu, ptr);               \
112                 break;                                                  \
113         case 8:                                                         \
114                 __get_user_x(8, __ret_gu, __val_gu, ptr);               \
115                 break;                                                  \
116         default:                                                        \
117                 __get_user_bad();                                       \
118                 break;                                                  \
119         }                                                               \
120         (x) = (__force typeof(*(ptr)))__val_gu;                         \
121         __ret_gu;                                                       \
122 })
123
124 extern void __put_user_1(void);
125 extern void __put_user_2(void);
126 extern void __put_user_4(void);
127 extern void __put_user_8(void);
128 extern void __put_user_bad(void);
129
130 #define __put_user_x(size, ret, x, ptr)                                 \
131         asm volatile("call __put_user_" #size                           \
132                      :"=a" (ret)                                        \
133                      :"c" (ptr),"a" (x)                                 \
134                      :"ebx")
135
136 #define put_user(x, ptr)                                                \
137         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
138
139 #define __get_user(x, ptr)                                              \
140         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
141 #define __put_user(x, ptr)                                              \
142         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
143
144 #define __get_user_unaligned __get_user
145 #define __put_user_unaligned __put_user
146
147 #define __put_user_nocheck(x, ptr, size)                \
148 ({                                                      \
149         int __pu_err;                                   \
150         __put_user_size((x), (ptr), (size), __pu_err);  \
151         __pu_err;                                       \
152 })
153
154
155 #define __put_user_check(x, ptr, size)                          \
156 ({                                                              \
157         int __pu_err;                                           \
158         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
159         switch (size) {                                         \
160         case 1:                                                 \
161                 __put_user_x(1, __pu_err, x, __pu_addr);        \
162                 break;                                          \
163         case 2:                                                 \
164                 __put_user_x(2, __pu_err, x, __pu_addr);        \
165                 break;                                          \
166         case 4:                                                 \
167                 __put_user_x(4, __pu_err, x, __pu_addr);        \
168                 break;                                          \
169         case 8:                                                 \
170                 __put_user_x(8, __pu_err, x, __pu_addr);        \
171                 break;                                          \
172         default:                                                \
173                 __put_user_bad();                               \
174         }                                                       \
175         __pu_err;                                               \
176 })
177
178 #define __put_user_size(x, ptr, size, retval)                           \
179 do {                                                                    \
180         retval = 0;                                                     \
181         __chk_user_ptr(ptr);                                            \
182         switch (size) {                                                 \
183         case 1:                                                         \
184                 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
185                 break;                                                  \
186         case 2:                                                         \
187                 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
188                 break;                                                  \
189         case 4:                                                         \
190                 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
191                 break;                                                  \
192         case 8:                                                         \
193                 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
194                 break;                                                  \
195         default:                                                        \
196                 __put_user_bad();                                       \
197         }                                                               \
198 } while (0)
199
200 /* FIXME: this hack is definitely wrong -AK */
201 struct __large_struct { unsigned long buf[100]; };
202 #define __m(x) (*(struct __large_struct __user *)(x))
203
204 /*
205  * Tell gcc we read from memory instead of writing: this is because
206  * we do not write to any memory gcc knows about, so there are no
207  * aliasing issues.
208  */
209 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
210         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
211                      "2:\n"                                             \
212                      ".section .fixup, \"ax\"\n"                        \
213                      "3:        mov %3,%0\n"                            \
214                      "  jmp 2b\n"                                       \
215                      ".previous\n"                                      \
216                      _ASM_EXTABLE(1b, 3b)                               \
217                      : "=r"(err)                                        \
218                      : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
219
220
221 #define __get_user_nocheck(x, ptr, size)                        \
222 ({                                                              \
223         int __gu_err;                                           \
224         unsigned long __gu_val;                                 \
225         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
226         (x) = (__force typeof(*(ptr)))__gu_val;                 \
227         __gu_err;                                               \
228 })
229
230 extern int __get_user_1(void);
231 extern int __get_user_2(void);
232 extern int __get_user_4(void);
233 extern int __get_user_8(void);
234 extern int __get_user_bad(void);
235
236 #define __get_user_size(x, ptr, size, retval)                           \
237 do {                                                                    \
238         retval = 0;                                                     \
239         __chk_user_ptr(ptr);                                            \
240         switch (size) {                                                 \
241         case 1:                                                         \
242                 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
243                 break;                                                  \
244         case 2:                                                         \
245                 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
246                 break;                                                  \
247         case 4:                                                         \
248                 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
249                 break;                                                  \
250         case 8:                                                         \
251                 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
252                 break;                                                  \
253         default:                                                        \
254                 (x) = __get_user_bad();                                 \
255         }                                                               \
256 } while (0)
257
258 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
259         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
260                      "2:\n"                                             \
261                      ".section .fixup, \"ax\"\n"                        \
262                      "3:        mov %3,%0\n"                            \
263                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
264                      "  jmp 2b\n"                                       \
265                      ".previous\n"                                      \
266                      _ASM_EXTABLE(1b, 3b)                               \
267                      : "=r" (err), ltype (x)                            \
268                      : "m" (__m(addr)), "i"(errno), "0"(err))
269
270 /*
271  * Copy To/From Userspace
272  */
273
274 /* Handles exceptions in both to and from, but doesn't do access_ok */
275 __must_check unsigned long
276 copy_user_generic(void *to, const void *from, unsigned len);
277
278 __must_check unsigned long
279 copy_to_user(void __user *to, const void *from, unsigned len);
280 __must_check unsigned long
281 copy_from_user(void *to, const void __user *from, unsigned len);
282 __must_check unsigned long
283 copy_in_user(void __user *to, const void __user *from, unsigned len);
284
285 static __always_inline __must_check
286 int __copy_from_user(void *dst, const void __user *src, unsigned size)
287 {
288         int ret = 0;
289         if (!__builtin_constant_p(size))
290                 return copy_user_generic(dst, (__force void *)src, size);
291         switch (size) {
292         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
293                               ret, "b", "b", "=q", 1);
294                 return ret;
295         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
296                               ret, "w", "w", "=r", 2);
297                 return ret;
298         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
299                               ret, "l", "k", "=r", 4);
300                 return ret;
301         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
302                               ret, "q", "", "=r", 8);
303                 return ret;
304         case 10:
305                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
306                                ret, "q", "", "=r", 16);
307                 if (unlikely(ret))
308                         return ret;
309                 __get_user_asm(*(u16 *)(8 + (char *)dst),
310                                (u16 __user *)(8 + (char __user *)src),
311                                ret, "w", "w", "=r", 2);
312                 return ret;
313         case 16:
314                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
315                                ret, "q", "", "=r", 16);
316                 if (unlikely(ret))
317                         return ret;
318                 __get_user_asm(*(u64 *)(8 + (char *)dst),
319                                (u64 __user *)(8 + (char __user *)src),
320                                ret, "q", "", "=r", 8);
321                 return ret;
322         default:
323                 return copy_user_generic(dst, (__force void *)src, size);
324         }
325 }
326
327 static __always_inline __must_check
328 int __copy_to_user(void __user *dst, const void *src, unsigned size)
329 {
330         int ret = 0;
331         if (!__builtin_constant_p(size))
332                 return copy_user_generic((__force void *)dst, src, size);
333         switch (size) {
334         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
335                               ret, "b", "b", "iq", 1);
336                 return ret;
337         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
338                               ret, "w", "w", "ir", 2);
339                 return ret;
340         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
341                               ret, "l", "k", "ir", 4);
342                 return ret;
343         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
344                               ret, "q", "", "ir", 8);
345                 return ret;
346         case 10:
347                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
348                                ret, "q", "", "ir", 10);
349                 if (unlikely(ret))
350                         return ret;
351                 asm("":::"memory");
352                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
353                                ret, "w", "w", "ir", 2);
354                 return ret;
355         case 16:
356                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
357                                ret, "q", "", "ir", 16);
358                 if (unlikely(ret))
359                         return ret;
360                 asm("":::"memory");
361                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
362                                ret, "q", "", "ir", 8);
363                 return ret;
364         default:
365                 return copy_user_generic((__force void *)dst, src, size);
366         }
367 }
368
369 static __always_inline __must_check
370 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
371 {
372         int ret = 0;
373         if (!__builtin_constant_p(size))
374                 return copy_user_generic((__force void *)dst,
375                                          (__force void *)src, size);
376         switch (size) {
377         case 1: {
378                 u8 tmp;
379                 __get_user_asm(tmp, (u8 __user *)src,
380                                ret, "b", "b", "=q", 1);
381                 if (likely(!ret))
382                         __put_user_asm(tmp, (u8 __user *)dst,
383                                        ret, "b", "b", "iq", 1);
384                 return ret;
385         }
386         case 2: {
387                 u16 tmp;
388                 __get_user_asm(tmp, (u16 __user *)src,
389                                ret, "w", "w", "=r", 2);
390                 if (likely(!ret))
391                         __put_user_asm(tmp, (u16 __user *)dst,
392                                        ret, "w", "w", "ir", 2);
393                 return ret;
394         }
395
396         case 4: {
397                 u32 tmp;
398                 __get_user_asm(tmp, (u32 __user *)src,
399                                ret, "l", "k", "=r", 4);
400                 if (likely(!ret))
401                         __put_user_asm(tmp, (u32 __user *)dst,
402                                        ret, "l", "k", "ir", 4);
403                 return ret;
404         }
405         case 8: {
406                 u64 tmp;
407                 __get_user_asm(tmp, (u64 __user *)src,
408                                ret, "q", "", "=r", 8);
409                 if (likely(!ret))
410                         __put_user_asm(tmp, (u64 __user *)dst,
411                                        ret, "q", "", "ir", 8);
412                 return ret;
413         }
414         default:
415                 return copy_user_generic((__force void *)dst,
416                                          (__force void *)src, size);
417         }
418 }
419
420 __must_check long
421 strncpy_from_user(char *dst, const char __user *src, long count);
422 __must_check long
423 __strncpy_from_user(char *dst, const char __user *src, long count);
424 __must_check long strnlen_user(const char __user *str, long n);
425 __must_check long __strnlen_user(const char __user *str, long n);
426 __must_check long strlen_user(const char __user *str);
427 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
428 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
429
430 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
431                                             unsigned size);
432
433 static __must_check __always_inline int
434 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
435 {
436         return copy_user_generic((__force void *)dst, src, size);
437 }
438
439 #define ARCH_HAS_NOCACHE_UACCESS 1
440 extern long __copy_user_nocache(void *dst, const void __user *src,
441                                 unsigned size, int zerorest);
442
443 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
444                                            unsigned size)
445 {
446         might_sleep();
447         return __copy_user_nocache(dst, src, size, 1);
448 }
449
450 static inline int __copy_from_user_inatomic_nocache(void *dst,
451                                                     const void __user *src,
452                                                     unsigned size)
453 {
454         return __copy_user_nocache(dst, src, size, 0);
455 }
456
457 #endif /* __X86_64_UACCESS_H */