2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on
5 * Copyright (C) 2000 VA Linux Co
6 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
7 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10 * Copyright (C) 2000 Hewlett-Packard Co.
11 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
12 * Copyright (C) 2000,2001,2002 Andi Kleen, SuSE Labs (x86-64 port)
14 * These routines maintain argument size conversion between 32bit and 64bit
15 * environment. In 2.5 most of this should be moved to a generic directory.
17 * This file assumes that there is a hole at the end of user address space.
19 * Some of the functions are LE specific currently. These are
20 * hopefully all marked. This should be fixed.
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
26 #include <linux/file.h>
27 #include <linux/signal.h>
28 #include <linux/syscalls.h>
29 #include <linux/times.h>
30 #include <linux/utsname.h>
31 #include <linux/smp_lock.h>
33 #include <linux/uio.h>
34 #include <linux/poll.h>
35 #include <linux/personality.h>
36 #include <linux/stat.h>
37 #include <linux/rwsem.h>
38 #include <linux/compat.h>
39 #include <linux/vfs.h>
40 #include <linux/ptrace.h>
41 #include <linux/highuid.h>
42 #include <linux/sysctl.h>
44 #include <asm/types.h>
45 #include <asm/uaccess.h>
46 #include <asm/atomic.h>
48 #include <asm/vgtod.h>
50 #define AA(__x) ((unsigned long)(__x))
53 asmlinkage long sys32_truncate64(char __user *filename,
54 unsigned long offset_low,
55 unsigned long offset_high)
57 return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
60 asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
61 unsigned long offset_high)
63 return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
67 * Another set for IA32/LFS -- x86_64 struct stat is different due to
68 * support for 64bit inode numbers.
70 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
72 typeof(ubuf->st_uid) uid = 0;
73 typeof(ubuf->st_gid) gid = 0;
74 SET_UID(uid, stat->uid);
75 SET_GID(gid, stat->gid);
76 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
77 __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
78 __put_user(stat->ino, &ubuf->__st_ino) ||
79 __put_user(stat->ino, &ubuf->st_ino) ||
80 __put_user(stat->mode, &ubuf->st_mode) ||
81 __put_user(stat->nlink, &ubuf->st_nlink) ||
82 __put_user(uid, &ubuf->st_uid) ||
83 __put_user(gid, &ubuf->st_gid) ||
84 __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
85 __put_user(stat->size, &ubuf->st_size) ||
86 __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
87 __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
88 __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
89 __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
90 __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
91 __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
92 __put_user(stat->blksize, &ubuf->st_blksize) ||
93 __put_user(stat->blocks, &ubuf->st_blocks))
98 asmlinkage long sys32_stat64(char __user *filename,
99 struct stat64 __user *statbuf)
102 int ret = vfs_stat(filename, &stat);
105 ret = cp_stat64(statbuf, &stat);
109 asmlinkage long sys32_lstat64(char __user *filename,
110 struct stat64 __user *statbuf)
113 int ret = vfs_lstat(filename, &stat);
115 ret = cp_stat64(statbuf, &stat);
119 asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
122 int ret = vfs_fstat(fd, &stat);
124 ret = cp_stat64(statbuf, &stat);
128 asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
129 struct stat64 __user *statbuf, int flag)
134 if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
137 if (flag & AT_SYMLINK_NOFOLLOW)
138 error = vfs_lstat_fd(dfd, filename, &stat);
140 error = vfs_stat_fd(dfd, filename, &stat);
143 error = cp_stat64(statbuf, &stat);
150 * Linux/i386 didn't use to be able to handle more than
151 * 4 system call parameters, so these system calls used a memory
152 * block for parameter passing..
155 struct mmap_arg_struct {
164 asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
166 struct mmap_arg_struct a;
167 struct file *file = NULL;
168 unsigned long retval;
169 struct mm_struct *mm ;
171 if (copy_from_user(&a, arg, sizeof(a)))
174 if (a.offset & ~PAGE_MASK)
177 if (!(a.flags & MAP_ANONYMOUS)) {
184 down_write(&mm->mmap_sem);
185 retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
186 a.offset>>PAGE_SHIFT);
190 up_write(&mm->mmap_sem);
195 asmlinkage long sys32_mprotect(unsigned long start, size_t len,
198 return sys_mprotect(start, len, prot);
201 asmlinkage long sys32_pipe(int __user *fd)
206 retval = do_pipe_flags(fds, 0);
209 if (copy_to_user(fd, fds, sizeof(fds)))
215 asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
216 struct sigaction32 __user *oact,
217 unsigned int sigsetsize)
219 struct k_sigaction new_ka, old_ka;
221 compat_sigset_t set32;
223 /* XXX: Don't preclude handling different sized sigset_t's. */
224 if (sigsetsize != sizeof(compat_sigset_t))
228 compat_uptr_t handler, restorer;
230 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
231 __get_user(handler, &act->sa_handler) ||
232 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
233 __get_user(restorer, &act->sa_restorer) ||
234 __copy_from_user(&set32, &act->sa_mask,
235 sizeof(compat_sigset_t)))
237 new_ka.sa.sa_handler = compat_ptr(handler);
238 new_ka.sa.sa_restorer = compat_ptr(restorer);
241 * FIXME: here we rely on _COMPAT_NSIG_WORS to be >=
242 * than _NSIG_WORDS << 1
244 switch (_NSIG_WORDS) {
245 case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
246 | (((long)set32.sig[7]) << 32);
247 case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
248 | (((long)set32.sig[5]) << 32);
249 case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
250 | (((long)set32.sig[3]) << 32);
251 case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
252 | (((long)set32.sig[1]) << 32);
256 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
260 * FIXME: here we rely on _COMPAT_NSIG_WORS to be >=
261 * than _NSIG_WORDS << 1
263 switch (_NSIG_WORDS) {
265 set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
266 set32.sig[6] = old_ka.sa.sa_mask.sig[3];
268 set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
269 set32.sig[4] = old_ka.sa.sa_mask.sig[2];
271 set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
272 set32.sig[2] = old_ka.sa.sa_mask.sig[1];
274 set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
275 set32.sig[0] = old_ka.sa.sa_mask.sig[0];
277 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
278 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
279 &oact->sa_handler) ||
280 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
281 &oact->sa_restorer) ||
282 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
283 __copy_to_user(&oact->sa_mask, &set32,
284 sizeof(compat_sigset_t)))
291 asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
292 struct old_sigaction32 __user *oact)
294 struct k_sigaction new_ka, old_ka;
298 compat_old_sigset_t mask;
299 compat_uptr_t handler, restorer;
301 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
302 __get_user(handler, &act->sa_handler) ||
303 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
304 __get_user(restorer, &act->sa_restorer) ||
305 __get_user(mask, &act->sa_mask))
308 new_ka.sa.sa_handler = compat_ptr(handler);
309 new_ka.sa.sa_restorer = compat_ptr(restorer);
311 siginitset(&new_ka.sa.sa_mask, mask);
314 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
317 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
318 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
319 &oact->sa_handler) ||
320 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
321 &oact->sa_restorer) ||
322 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
323 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
330 asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
331 compat_sigset_t __user *oset,
332 unsigned int sigsetsize)
337 mm_segment_t old_fs = get_fs();
340 if (copy_from_user(&s32, set, sizeof(compat_sigset_t)))
342 switch (_NSIG_WORDS) {
343 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
344 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
345 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
346 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
350 ret = sys_rt_sigprocmask(how,
351 set ? (sigset_t __user *)&s : NULL,
352 oset ? (sigset_t __user *)&s : NULL,
358 switch (_NSIG_WORDS) {
359 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
360 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
361 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
362 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
364 if (copy_to_user(oset, &s32, sizeof(compat_sigset_t)))
370 asmlinkage long sys32_alarm(unsigned int seconds)
372 return alarm_setitimer(seconds);
375 struct sel_arg_struct {
383 asmlinkage long sys32_old_select(struct sel_arg_struct __user *arg)
385 struct sel_arg_struct a;
387 if (copy_from_user(&a, arg, sizeof(a)))
389 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
390 compat_ptr(a.exp), compat_ptr(a.tvp));
393 asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
396 return compat_sys_wait4(pid, stat_addr, options, NULL);
399 /* 32-bit timeval and related flotsam. */
401 asmlinkage long sys32_sysfs(int option, u32 arg1, u32 arg2)
403 return sys_sysfs(option, arg1, arg2);
406 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
407 struct compat_timespec __user *interval)
411 mm_segment_t old_fs = get_fs();
414 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
416 if (put_compat_timespec(&t, interval))
421 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
422 compat_size_t sigsetsize)
427 mm_segment_t old_fs = get_fs();
430 ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
433 switch (_NSIG_WORDS) {
434 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
435 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
436 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
437 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
439 if (copy_to_user(set, &s32, sizeof(compat_sigset_t)))
445 asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
446 compat_siginfo_t __user *uinfo)
450 mm_segment_t old_fs = get_fs();
452 if (copy_siginfo_from_user32(&info, uinfo))
455 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
460 #ifdef CONFIG_SYSCTL_SYSCALL
465 unsigned int oldlenp;
468 unsigned int __unused[4];
472 asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *args32)
474 struct sysctl_ia32 a32;
475 mm_segment_t old_fs = get_fs();
476 void __user *oldvalp, *newvalp;
481 if (copy_from_user(&a32, args32, sizeof(a32)))
485 * We need to pre-validate these because we have to disable
486 * address checking before calling do_sysctl() because of
487 * OLDLEN but we can't run the risk of the user specifying bad
488 * addresses here. Well, since we're dealing with 32 bit
489 * addresses, we KNOW that access_ok() will always succeed, so
490 * this is an expensive NOP, but so what...
492 namep = compat_ptr(a32.name);
493 oldvalp = compat_ptr(a32.oldval);
494 newvalp = compat_ptr(a32.newval);
496 if ((oldvalp && get_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
497 || !access_ok(VERIFY_WRITE, namep, 0)
498 || !access_ok(VERIFY_WRITE, oldvalp, 0)
499 || !access_ok(VERIFY_WRITE, newvalp, 0))
504 ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *)&oldlen,
505 newvalp, (size_t) a32.newlen);
509 if (oldvalp && put_user(oldlen, (int __user *)compat_ptr(a32.oldlenp)))
516 /* warning: next two assume little endian */
517 asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
518 u32 poslo, u32 poshi)
520 return sys_pread64(fd, ubuf, count,
521 ((loff_t)AA(poshi) << 32) | AA(poslo));
524 asmlinkage long sys32_pwrite(unsigned int fd, char __user *ubuf, u32 count,
525 u32 poslo, u32 poshi)
527 return sys_pwrite64(fd, ubuf, count,
528 ((loff_t)AA(poshi) << 32) | AA(poslo));
532 asmlinkage long sys32_personality(unsigned long personality)
536 if (personality(current->personality) == PER_LINUX32 &&
537 personality == PER_LINUX)
538 personality = PER_LINUX32;
539 ret = sys_personality(personality);
540 if (ret == PER_LINUX32)
545 asmlinkage long sys32_sendfile(int out_fd, int in_fd,
546 compat_off_t __user *offset, s32 count)
548 mm_segment_t old_fs = get_fs();
552 if (offset && get_user(of, offset))
556 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
560 if (offset && put_user(of, offset))
565 asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
566 unsigned long prot, unsigned long flags,
567 unsigned long fd, unsigned long pgoff)
569 struct mm_struct *mm = current->mm;
571 struct file *file = NULL;
573 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
574 if (!(flags & MAP_ANONYMOUS)) {
580 down_write(&mm->mmap_sem);
581 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
582 up_write(&mm->mmap_sem);
589 asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
591 char *arch = "x86_64";
596 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
601 err = __copy_to_user(&name->sysname, &utsname()->sysname,
603 err |= __put_user(0, name->sysname+__OLD_UTS_LEN);
604 err |= __copy_to_user(&name->nodename, &utsname()->nodename,
606 err |= __put_user(0, name->nodename+__OLD_UTS_LEN);
607 err |= __copy_to_user(&name->release, &utsname()->release,
609 err |= __put_user(0, name->release+__OLD_UTS_LEN);
610 err |= __copy_to_user(&name->version, &utsname()->version,
612 err |= __put_user(0, name->version+__OLD_UTS_LEN);
614 if (personality(current->personality) == PER_LINUX32)
617 err |= __copy_to_user(&name->machine, arch, strlen(arch) + 1);
621 err = err ? -EFAULT : 0;
626 long sys32_uname(struct old_utsname __user *name)
633 err = copy_to_user(name, utsname(), sizeof(*name));
635 if (personality(current->personality) == PER_LINUX32)
636 err |= copy_to_user(&name->machine, "i686", 5);
638 return err ? -EFAULT : 0;
641 long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
649 ret = sys_ustat(dev, (struct ustat __user *)&u);
654 if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
655 __put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
656 __put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
657 __copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
658 __copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
663 asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
664 compat_uptr_t __user *envp, struct pt_regs *regs)
669 filename = getname(name);
670 error = PTR_ERR(filename);
671 if (IS_ERR(filename))
673 error = compat_do_execve(filename, argv, envp, regs);
678 asmlinkage long sys32_clone(unsigned int clone_flags, unsigned int newsp,
679 struct pt_regs *regs)
681 void __user *parent_tid = (void __user *)regs->dx;
682 void __user *child_tid = (void __user *)regs->di;
686 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
690 * Some system calls that need sign extended arguments. This could be
691 * done by a generic wrapper.
693 long sys32_lseek(unsigned int fd, int offset, unsigned int whence)
695 return sys_lseek(fd, offset, whence);
698 long sys32_kill(int pid, int sig)
700 return sys_kill(pid, sig);
703 long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
704 __u32 len_low, __u32 len_high, int advice)
706 return sys_fadvise64_64(fd,
707 (((u64)offset_high)<<32) | offset_low,
708 (((u64)len_high)<<32) | len_low,
712 long sys32_vm86_warning(void)
714 struct task_struct *me = current;
715 static char lastcomm[sizeof(me->comm)];
717 if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
718 compat_printk(KERN_INFO
719 "%s: vm86 mode not supported on 64 bit kernel\n",
721 strncpy(lastcomm, me->comm, sizeof(lastcomm));
726 long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
727 char __user *buf, size_t len)
729 return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
732 asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
735 return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
738 asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
739 unsigned n_low, unsigned n_hi, int flags)
741 return sys_sync_file_range(fd,
742 ((u64)off_hi << 32) | off_low,
743 ((u64)n_hi << 32) | n_low, flags);
746 asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
747 size_t len, int advice)
749 return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
753 asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
754 unsigned offset_hi, unsigned len_lo,
757 return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
758 ((u64)len_hi << 32) | len_lo);