[XFRM]: xfrm_replay_advance() annotations
[linux-2.6] / include / asm-x86_64 / uaccess.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/prefetch.h>
11 #include <asm/page.h>
12
13 #define VERIFY_READ 0
14 #define VERIFY_WRITE 1
15
16 /*
17  * The fs value determines whether argument validity checking should be
18  * performed or not.  If get_fs() == USER_DS, checking is performed, with
19  * get_fs() == KERNEL_DS, checking is bypassed.
20  *
21  * For historical reasons, these macros are grossly misnamed.
22  */
23
24 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
25
26 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
27 #define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
28
29 #define get_ds()        (KERNEL_DS)
30 #define get_fs()        (current_thread_info()->addr_limit)
31 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
32
33 #define segment_eq(a,b) ((a).seg == (b).seg)
34
35 #define __addr_ok(addr) (!((unsigned long)(addr) & (current_thread_info()->addr_limit.seg)))
36
37 /*
38  * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39  */
40 #define __range_not_ok(addr,size) ({ \
41         unsigned long flag,sum; \
42         __chk_user_ptr(addr); \
43         asm("# range_ok\n\r" \
44                 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"  \
45                 :"=&r" (flag), "=r" (sum) \
46                 :"1" (addr),"g" ((long)(size)),"g" (current_thread_info()->addr_limit.seg)); \
47         flag; })
48
49 #define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
50
51 /*
52  * The exception table consists of pairs of addresses: the first is the
53  * address of an instruction that is allowed to fault, and the second is
54  * the address at which the program should continue.  No registers are
55  * modified, so it is entirely up to the continuation code to figure out
56  * what to do.
57  *
58  * All the routines below use bits of fixup code that are out of line
59  * with the main instruction path.  This means when everything is well,
60  * we don't even have to jump over them.  Further, they do not intrude
61  * on our cache or tlb entries.
62  */
63
64 struct exception_table_entry
65 {
66         unsigned long insn, fixup;
67 };
68
69 #define ARCH_HAS_SEARCH_EXTABLE
70
71 /*
72  * These are the main single-value transfer routines.  They automatically
73  * use the right size if we just have the right pointer type.
74  *
75  * This gets kind of ugly. We want to return _two_ values in "get_user()"
76  * and yet we don't want to do any pointers, because that is too much
77  * of a performance impact. Thus we have a few rather ugly macros here,
78  * and hide all the ugliness from the user.
79  *
80  * The "__xxx" versions of the user access functions are versions that
81  * do not verify the address space, that must have been done previously
82  * with a separate "access_ok()" call (this is used when we do multiple
83  * accesses to the same area of user memory).
84  */
85
86 #define __get_user_x(size,ret,x,ptr) \
87         asm volatile("call __get_user_" #size \
88                 :"=a" (ret),"=d" (x) \
89                 :"c" (ptr) \
90                 :"r8")
91
92 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
93 #define get_user(x,ptr)                                                 \
94 ({      unsigned long __val_gu;                                         \
95         int __ret_gu;                                                   \
96         __chk_user_ptr(ptr);                                            \
97         switch(sizeof (*(ptr))) {                                       \
98         case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;          \
99         case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;          \
100         case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;          \
101         case 8:  __get_user_x(8,__ret_gu,__val_gu,ptr); break;          \
102         default: __get_user_bad(); break;                               \
103         }                                                               \
104         (x) = (typeof(*(ptr)))__val_gu;                         \
105         __ret_gu;                                                       \
106 })
107
108 extern void __put_user_1(void);
109 extern void __put_user_2(void);
110 extern void __put_user_4(void);
111 extern void __put_user_8(void);
112 extern void __put_user_bad(void);
113
114 #define __put_user_x(size,ret,x,ptr)                                    \
115         asm volatile("call __put_user_" #size                   \
116                 :"=a" (ret)                                             \
117                 :"c" (ptr),"d" (x)                                      \
118                 :"r8")
119
120 #define put_user(x,ptr)                                                 \
121   __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
122
123 #define __get_user(x,ptr) \
124   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
125 #define __put_user(x,ptr) \
126   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
127
128 #define __get_user_unaligned __get_user
129 #define __put_user_unaligned __put_user
130
131 #define __put_user_nocheck(x,ptr,size)                  \
132 ({                                                      \
133         int __pu_err;                                   \
134         __put_user_size((x),(ptr),(size),__pu_err);     \
135         __pu_err;                                       \
136 })
137
138
139 #define __put_user_check(x,ptr,size)                    \
140 ({                                                      \
141         int __pu_err;                                   \
142         typeof(*(ptr)) __user *__pu_addr = (ptr);       \
143         switch (size) {                                 \
144         case 1: __put_user_x(1,__pu_err,x,__pu_addr); break;    \
145         case 2: __put_user_x(2,__pu_err,x,__pu_addr); break;    \
146         case 4: __put_user_x(4,__pu_err,x,__pu_addr); break;    \
147         case 8: __put_user_x(8,__pu_err,x,__pu_addr); break;    \
148         default: __put_user_bad();                      \
149         }                                               \
150         __pu_err;                                       \
151 })
152
153 #define __put_user_size(x,ptr,size,retval)                              \
154 do {                                                                    \
155         retval = 0;                                                     \
156         __chk_user_ptr(ptr);                                            \
157         switch (size) {                                                 \
158           case 1: __put_user_asm(x,ptr,retval,"b","b","iq",-EFAULT); break;\
159           case 2: __put_user_asm(x,ptr,retval,"w","w","ir",-EFAULT); break;\
160           case 4: __put_user_asm(x,ptr,retval,"l","k","ir",-EFAULT); break;\
161           case 8: __put_user_asm(x,ptr,retval,"q","","ir",-EFAULT); break;\
162           default: __put_user_bad();                                    \
163         }                                                               \
164 } while (0)
165
166 /* FIXME: this hack is definitely wrong -AK */
167 struct __large_struct { unsigned long buf[100]; };
168 #define __m(x) (*(struct __large_struct __user *)(x))
169
170 /*
171  * Tell gcc we read from memory instead of writing: this is because
172  * we do not write to any memory gcc knows about, so there are no
173  * aliasing issues.
174  */
175 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
176         asm volatile(                                   \
177                 "1:     mov"itype" %"rtype"1,%2\n"              \
178                 "2:\n"                                          \
179                 ".section .fixup,\"ax\"\n"                      \
180                 "3:     mov %3,%0\n"                            \
181                 "       jmp 2b\n"                               \
182                 ".previous\n"                                   \
183                 ".section __ex_table,\"a\"\n"                   \
184                 "       .align 8\n"                             \
185                 "       .quad 1b,3b\n"                          \
186                 ".previous"                                     \
187                 : "=r"(err)                                     \
188                 : ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
189
190
191 #define __get_user_nocheck(x,ptr,size)                          \
192 ({                                                              \
193         int __gu_err;                                           \
194         unsigned long __gu_val;                                 \
195         __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
196         (x) = (typeof(*(ptr)))__gu_val;                 \
197         __gu_err;                                               \
198 })
199
200 extern int __get_user_1(void);
201 extern int __get_user_2(void);
202 extern int __get_user_4(void);
203 extern int __get_user_8(void);
204 extern int __get_user_bad(void);
205
206 #define __get_user_size(x,ptr,size,retval)                              \
207 do {                                                                    \
208         retval = 0;                                                     \
209         __chk_user_ptr(ptr);                                            \
210         switch (size) {                                                 \
211           case 1: __get_user_asm(x,ptr,retval,"b","b","=q",-EFAULT); break;\
212           case 2: __get_user_asm(x,ptr,retval,"w","w","=r",-EFAULT); break;\
213           case 4: __get_user_asm(x,ptr,retval,"l","k","=r",-EFAULT); break;\
214           case 8: __get_user_asm(x,ptr,retval,"q","","=r",-EFAULT); break;\
215           default: (x) = __get_user_bad();                              \
216         }                                                               \
217 } while (0)
218
219 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
220         asm volatile(                                   \
221                 "1:     mov"itype" %2,%"rtype"1\n"              \
222                 "2:\n"                                          \
223                 ".section .fixup,\"ax\"\n"                      \
224                 "3:     mov %3,%0\n"                            \
225                 "       xor"itype" %"rtype"1,%"rtype"1\n"       \
226                 "       jmp 2b\n"                               \
227                 ".previous\n"                                   \
228                 ".section __ex_table,\"a\"\n"                   \
229                 "       .align 8\n"                             \
230                 "       .quad 1b,3b\n"                          \
231                 ".previous"                                     \
232                 : "=r"(err), ltype (x)                          \
233                 : "m"(__m(addr)), "i"(errno), "0"(err))
234
235 /*
236  * Copy To/From Userspace
237  */
238
239 /* Handles exceptions in both to and from, but doesn't do access_ok */
240 __must_check unsigned long
241 copy_user_generic(void *to, const void *from, unsigned len);
242
243 __must_check unsigned long
244 copy_to_user(void __user *to, const void *from, unsigned len);
245 __must_check unsigned long
246 copy_from_user(void *to, const void __user *from, unsigned len);
247 __must_check unsigned long
248 copy_in_user(void __user *to, const void __user *from, unsigned len);
249
250 static __always_inline __must_check
251 int __copy_from_user(void *dst, const void __user *src, unsigned size)
252
253         int ret = 0;
254         if (!__builtin_constant_p(size))
255                 return copy_user_generic(dst,(__force void *)src,size);
256         switch (size) { 
257         case 1:__get_user_asm(*(u8*)dst,(u8 __user *)src,ret,"b","b","=q",1); 
258                 return ret;
259         case 2:__get_user_asm(*(u16*)dst,(u16 __user *)src,ret,"w","w","=r",2);
260                 return ret;
261         case 4:__get_user_asm(*(u32*)dst,(u32 __user *)src,ret,"l","k","=r",4);
262                 return ret;
263         case 8:__get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",8);
264                 return ret; 
265         case 10:
266                 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
267                 if (unlikely(ret)) return ret;
268                 __get_user_asm(*(u16*)(8+(char*)dst),(u16 __user *)(8+(char __user *)src),ret,"w","w","=r",2);
269                 return ret; 
270         case 16:
271                 __get_user_asm(*(u64*)dst,(u64 __user *)src,ret,"q","","=r",16);
272                 if (unlikely(ret)) return ret;
273                 __get_user_asm(*(u64*)(8+(char*)dst),(u64 __user *)(8+(char __user *)src),ret,"q","","=r",8);
274                 return ret; 
275         default:
276                 return copy_user_generic(dst,(__force void *)src,size); 
277         }
278 }       
279
280 static __always_inline __must_check
281 int __copy_to_user(void __user *dst, const void *src, unsigned size)
282
283         int ret = 0;
284         if (!__builtin_constant_p(size))
285                 return copy_user_generic((__force void *)dst,src,size);
286         switch (size) { 
287         case 1:__put_user_asm(*(u8*)src,(u8 __user *)dst,ret,"b","b","iq",1); 
288                 return ret;
289         case 2:__put_user_asm(*(u16*)src,(u16 __user *)dst,ret,"w","w","ir",2);
290                 return ret;
291         case 4:__put_user_asm(*(u32*)src,(u32 __user *)dst,ret,"l","k","ir",4);
292                 return ret;
293         case 8:__put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",8);
294                 return ret; 
295         case 10:
296                 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",10);
297                 if (unlikely(ret)) return ret;
298                 asm("":::"memory");
299                 __put_user_asm(4[(u16*)src],4+(u16 __user *)dst,ret,"w","w","ir",2);
300                 return ret; 
301         case 16:
302                 __put_user_asm(*(u64*)src,(u64 __user *)dst,ret,"q","","ir",16);
303                 if (unlikely(ret)) return ret;
304                 asm("":::"memory");
305                 __put_user_asm(1[(u64*)src],1+(u64 __user *)dst,ret,"q","","ir",8);
306                 return ret; 
307         default:
308                 return copy_user_generic((__force void *)dst,src,size); 
309         }
310 }       
311
312 static __always_inline __must_check
313 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
314
315         int ret = 0;
316         if (!__builtin_constant_p(size))
317                 return copy_user_generic((__force void *)dst,(__force void *)src,size);
318         switch (size) { 
319         case 1: { 
320                 u8 tmp;
321                 __get_user_asm(tmp,(u8 __user *)src,ret,"b","b","=q",1); 
322                 if (likely(!ret))
323                         __put_user_asm(tmp,(u8 __user *)dst,ret,"b","b","iq",1); 
324                 return ret;
325         }
326         case 2: { 
327                 u16 tmp;
328                 __get_user_asm(tmp,(u16 __user *)src,ret,"w","w","=r",2); 
329                 if (likely(!ret))
330                         __put_user_asm(tmp,(u16 __user *)dst,ret,"w","w","ir",2); 
331                 return ret;
332         }
333
334         case 4: { 
335                 u32 tmp;
336                 __get_user_asm(tmp,(u32 __user *)src,ret,"l","k","=r",4); 
337                 if (likely(!ret))
338                         __put_user_asm(tmp,(u32 __user *)dst,ret,"l","k","ir",4); 
339                 return ret;
340         }
341         case 8: { 
342                 u64 tmp;
343                 __get_user_asm(tmp,(u64 __user *)src,ret,"q","","=r",8); 
344                 if (likely(!ret))
345                         __put_user_asm(tmp,(u64 __user *)dst,ret,"q","","ir",8); 
346                 return ret;
347         }
348         default:
349                 return copy_user_generic((__force void *)dst,(__force void *)src,size); 
350         }
351 }       
352
353 __must_check long 
354 strncpy_from_user(char *dst, const char __user *src, long count);
355 __must_check long 
356 __strncpy_from_user(char *dst, const char __user *src, long count);
357 __must_check long strnlen_user(const char __user *str, long n);
358 __must_check long __strnlen_user(const char __user *str, long n);
359 __must_check long strlen_user(const char __user *str);
360 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
361 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
362
363 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size);
364 #define __copy_to_user_inatomic copy_user_generic
365
366 #endif /* __X86_64_UACCESS_H */