1 /* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $
3 * User space memory access functions
5 * Copyright (C) 1999, 2002 Niibe Yutaka
6 * Copyright (C) 2003 Paul Mundt
9 * MIPS implementation version 1.15 by
10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
13 #ifndef __ASM_SH_UACCESS_H
14 #define __ASM_SH_UACCESS_H
16 #include <linux/errno.h>
17 #include <linux/sched.h>
20 * NOTE: Macro/functions in this file depends on threads_info.h implementation.
24 * USER_ADDR_LIMIT == 0x80000000
28 #define VERIFY_WRITE 1
31 unsigned int is_user_space;
35 * The fs value determines whether argument validity checking should be
36 * performed or not. If get_fs() == USER_DS, checking is performed, with
37 * get_fs() == KERNEL_DS, checking is bypassed.
39 * For historical reasons (Data Segment Register?), these macros are misnamed.
42 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
43 #define segment_eq(a,b) ((a).is_user_space == (b).is_user_space)
45 #define USER_ADDR_LIMIT 0x80000000
47 #define KERNEL_DS MAKE_MM_SEG(0)
48 #define USER_DS MAKE_MM_SEG(1)
50 #define get_ds() (KERNEL_DS)
52 #if !defined(CONFIG_MMU)
53 static inline mm_segment_t get_fs(void)
58 static inline void set_fs(mm_segment_t s)
63 * __access_ok: Check if address with size is OK or not.
65 * If we don't have an MMU (or if its disabled) the only thing we really have
66 * to look out for is if the address resides somewhere outside of what
67 * available RAM we have.
69 * TODO: This check could probably also stand to be restricted somewhat more..
70 * though it still does the Right Thing(tm) for the time being.
72 static inline int __access_ok(unsigned long addr, unsigned long size)
74 extern unsigned long memory_start, memory_end;
76 return ((addr >= memory_start) && ((addr + size) < memory_end));
78 #else /* CONFIG_MMU */
79 static inline mm_segment_t get_fs(void)
81 return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE));
84 static inline void set_fs(mm_segment_t s)
86 unsigned long ti, flag;
89 "mov.l @(8,%0), %1\n\t"
94 : "=&r" (ti), "=&r" (flag)
95 : "r" (s.is_user_space)
99 set_thread_flag(TIF_USERSPACE);
101 clear_thread_flag(TIF_USERSPACE);
106 * __access_ok: Check if address with size is OK or not.
108 * We do three checks:
109 * (1) is it user space?
110 * (2) addr + size --> carry?
111 * (3) addr + size >= 0x80000000 (USER_ADDR_LIMIT)
113 * (1) (2) (3) | RESULT
123 static inline int __access_ok(unsigned long addr, unsigned long size)
125 unsigned long flag, tmp;
127 __asm__("stc r7_bank, %0\n\t"
128 "mov.l @(8,%0), %0\n\t"
135 : "=&z" (flag), "=r" (tmp)
136 : "r" (addr), "1" (size)
141 #endif /* CONFIG_MMU */
143 static inline int access_ok(int type, const void __user *p, unsigned long size)
145 unsigned long addr = (unsigned long)p;
146 return __access_ok(addr, size);
149 /* this function will go away soon - use access_ok() instead */
150 static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
152 return access_ok(type,addr,size) ? 0 : -EFAULT;
156 * Uh, these should become the main single-value transfer routines ...
157 * They automatically use the right size if we just have the right
160 * As SuperH uses the same address space for kernel and user data, we
161 * can just do these as direct assignments.
164 * (a) re-use the arguments for side effects (sizeof is ok)
165 * (b) require any knowledge of processes at this stage
167 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
168 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
171 * The "__xxx" versions do not do address space checking, useful when
172 * doing multiple accesses to the same area (the user has to do the
173 * checks by hand with "access_ok()")
175 #define __put_user(x,ptr) \
176 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
177 #define __get_user(x,ptr) \
178 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
180 struct __large_struct { unsigned long buf[100]; };
181 #define __m(x) (*(struct __large_struct *)(x))
183 #define __get_user_size(x,ptr,size,retval) \
188 __get_user_asm(x, ptr, retval, "b"); \
191 __get_user_asm(x, ptr, retval, "w"); \
194 __get_user_asm(x, ptr, retval, "l"); \
197 __get_user_unknown(); \
202 #define __get_user_nocheck(x,ptr,size) \
204 long __gu_err, __gu_val; \
205 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
206 (x) = (__typeof__(*(ptr)))__gu_val; \
210 #define __get_user_check(x,ptr,size) \
212 long __gu_err, __gu_val; \
215 __get_user_1(__gu_val, (ptr), __gu_err); \
218 __get_user_2(__gu_val, (ptr), __gu_err); \
221 __get_user_4(__gu_val, (ptr), __gu_err); \
224 __get_user_unknown(); \
228 (x) = (__typeof__(*(ptr)))__gu_val; \
232 #define __get_user_1(x,addr,err) ({ \
233 __asm__("stc r7_bank, %1\n\t" \
234 "mov.l @(8,%1), %1\n\t" \
244 "mov.b @%2, %1\n\t" \
247 ".section __ex_table,\"a\"\n\t" \
250 : "=&r" (err), "=&r" (x) \
255 #define __get_user_2(x,addr,err) ({ \
256 __asm__("stc r7_bank, %1\n\t" \
257 "mov.l @(8,%1), %1\n\t" \
267 "mov.w @%2, %1\n\t" \
270 ".section __ex_table,\"a\"\n\t" \
273 : "=&r" (err), "=&r" (x) \
278 #define __get_user_4(x,addr,err) ({ \
279 __asm__("stc r7_bank, %1\n\t" \
280 "mov.l @(8,%1), %1\n\t" \
290 "mov.l @%2, %1\n\t" \
292 ".section __ex_table,\"a\"\n\t" \
295 : "=&r" (err), "=&r" (x) \
300 #define __get_user_asm(x, addr, err, insn) \
302 __asm__ __volatile__( \
304 "mov." insn " %2, %1\n\t" \
307 ".section .fixup,\"ax\"\n" \
315 ".section __ex_table,\"a\"\n\t" \
318 :"=&r" (err), "=&r" (x) \
319 :"m" (__m(addr)), "i" (-EFAULT)); })
321 extern void __get_user_unknown(void);
323 #define __put_user_size(x,ptr,size,retval) \
328 __put_user_asm(x, ptr, retval, "b"); \
331 __put_user_asm(x, ptr, retval, "w"); \
334 __put_user_asm(x, ptr, retval, "l"); \
337 __put_user_u64(x, ptr, retval); \
340 __put_user_unknown(); \
344 #define __put_user_nocheck(x,ptr,size) \
347 __put_user_size((x),(ptr),(size),__pu_err); \
351 #define __put_user_check(x,ptr,size) \
353 long __pu_err = -EFAULT; \
354 __typeof__(*(ptr)) *__pu_addr = (ptr); \
356 if (__access_ok((unsigned long)__pu_addr,size)) \
357 __put_user_size((x),__pu_addr,(size),__pu_err); \
361 #define __put_user_asm(x, addr, err, insn) \
363 __asm__ __volatile__( \
365 "mov." insn " %1, %2\n\t" \
368 ".section .fixup,\"ax\"\n" \
376 ".section __ex_table,\"a\"\n\t" \
380 :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \
383 #if defined(__LITTLE_ENDIAN__)
384 #define __put_user_u64(val,addr,retval) \
386 __asm__ __volatile__( \
389 "mov.l %S1,%T2\n\t" \
392 ".section .fixup,\"ax\"\n" \
400 ".section __ex_table,\"a\"\n\t" \
404 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
407 #define __put_user_u64(val,addr,retval) \
409 __asm__ __volatile__( \
412 "mov.l %R1,%T2\n\t" \
415 ".section .fixup,\"ax\"\n" \
423 ".section __ex_table,\"a\"\n\t" \
427 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
431 extern void __put_user_unknown(void);
433 /* Generic arbitrary sized copy. */
434 /* Return the number of bytes NOT copied */
435 extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
437 #define copy_to_user(to,from,n) ({ \
438 void *__copy_to = (void *) (to); \
439 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
440 __kernel_size_t __copy_res; \
441 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
442 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
443 } else __copy_res = __copy_size; \
446 #define __copy_to_user(to,from,n) \
447 __copy_user((void *)(to), \
450 #define __copy_to_user_inatomic __copy_to_user
451 #define __copy_from_user_inatomic __copy_from_user
454 #define copy_from_user(to,from,n) ({ \
455 void *__copy_to = (void *) (to); \
456 void *__copy_from = (void *) (from); \
457 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
458 __kernel_size_t __copy_res; \
459 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
460 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
461 } else __copy_res = __copy_size; \
464 #define __copy_from_user(to,from,n) \
465 __copy_user((void *)(to), \
469 * Clear the area and return remaining number of bytes
470 * (on failure. Usually it's 0.)
472 extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
474 #define clear_user(addr,n) ({ \
475 void * __cl_addr = (addr); \
476 unsigned long __cl_size = (n); \
477 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
478 __cl_size = __clear_user(__cl_addr, __cl_size); \
481 static __inline__ int
482 __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
485 unsigned long __dummy, _d, _s;
487 __asm__ __volatile__(
500 ".section .fixup,\"ax\"\n"
508 ".section __ex_table,\"a\"\n"
512 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d)
513 : "0" (__count), "2" (__src), "3" (__dest), "r" (__count),
520 #define strncpy_from_user(dest,src,count) ({ \
521 unsigned long __sfu_src = (unsigned long) (src); \
522 int __sfu_count = (int) (count); \
523 long __sfu_res = -EFAULT; \
524 if(__access_ok(__sfu_src, __sfu_count)) { \
525 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
529 * Return the size of a string (including the ending 0!)
531 static __inline__ long __strnlen_user(const char __user *__s, long __n)
534 unsigned long __dummy;
536 __asm__ __volatile__(
541 "mov.b @(%0,%3), %1\n\t"
546 ".section .fixup,\"ax\"\n"
554 ".section __ex_table,\"a\"\n"
558 : "=z" (res), "=&r" (__dummy)
559 : "0" (0), "r" (__s), "r" (__n), "i" (-EFAULT)
564 static __inline__ long strnlen_user(const char __user *s, long n)
566 if (!access_ok(VERIFY_READ, s, n))
569 return __strnlen_user(s, n);
572 static __inline__ long strlen_user(const char __user *s)
574 if (!access_ok(VERIFY_READ, s, 0))
577 return __strnlen_user(s, ~0UL >> 1);
581 * The exception table consists of pairs of addresses: the first is the
582 * address of an instruction that is allowed to fault, and the second is
583 * the address at which the program should continue. No registers are
584 * modified, so it is entirely up to the continuation code to figure out
587 * All the routines below use bits of fixup code that are out of line
588 * with the main instruction path. This means when everything is well,
589 * we don't even have to jump over them. Further, they do not intrude
590 * on our cache or tlb entries.
593 struct exception_table_entry
595 unsigned long insn, fixup;
598 extern int fixup_exception(struct pt_regs *regs);
600 #endif /* __ASM_SH_UACCESS_H */