2 * linux/arch/x86_64/kernel/sys_x86_64.c
5 #include <linux/errno.h>
6 #include <linux/sched.h>
7 #include <linux/syscalls.h>
10 #include <linux/sem.h>
11 #include <linux/msg.h>
12 #include <linux/shm.h>
13 #include <linux/stat.h>
14 #include <linux/mman.h>
15 #include <linux/file.h>
16 #include <linux/utsname.h>
17 #include <linux/personality.h>
19 #include <asm/uaccess.h>
23 * sys_pipe() is the normal C calling standard for creating
24 * a pipe. It's not the way Unix traditionally does this, though.
26 asmlinkage long sys_pipe(int __user *fildes)
33 if (copy_to_user(fildes, fd, 2*sizeof(int)))
39 asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
40 unsigned long fd, unsigned long off)
51 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
52 if (!(flags & MAP_ANONYMOUS)) {
57 down_write(¤t->mm->mmap_sem);
58 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
59 up_write(¤t->mm->mmap_sem);
67 static void find_start_end(unsigned long flags, unsigned long *begin,
70 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
71 /* This is usually used needed to map code in small
72 model, so it needs to be in the first 31bit. Limit
73 it to that. This means we need to move the
74 unmapped base down for this case. This can give
75 conflicts with the heap, but we assume that glibc
76 malloc knows how to fall back to mmap. Give it 1GB
77 of playground for now. -AK */
81 *begin = TASK_UNMAPPED_BASE;
87 arch_get_unmapped_area(struct file *filp, unsigned long addr,
88 unsigned long len, unsigned long pgoff, unsigned long flags)
90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma;
92 unsigned long start_addr;
93 unsigned long begin, end;
95 if (flags & MAP_FIXED)
98 find_start_end(flags, &begin, &end);
104 addr = PAGE_ALIGN(addr);
105 vma = find_vma(mm, addr);
106 if (end - len >= addr &&
107 (!vma || addr + len <= vma->vm_start))
110 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
111 && len <= mm->cached_hole_size) {
112 mm->cached_hole_size = 0;
113 mm->free_area_cache = begin;
115 addr = mm->free_area_cache;
121 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
122 /* At this point: (!vma || addr < vma->vm_end). */
123 if (end - len < addr) {
125 * Start a new search - just in case we missed
128 if (start_addr != begin) {
129 start_addr = addr = begin;
130 mm->cached_hole_size = 0;
135 if (!vma || addr + len <= vma->vm_start) {
137 * Remember the place where we stopped the search:
139 mm->free_area_cache = addr + len;
142 if (addr + mm->cached_hole_size < vma->vm_start)
143 mm->cached_hole_size = vma->vm_start - addr;
149 asmlinkage long sys_uname(struct new_utsname __user * name)
153 err = copy_to_user(name, utsname(), sizeof (*name));
155 if (personality(current->personality) == PER_LINUX32)
156 err |= copy_to_user(&name->machine, "i686", 5);
157 return err ? -EFAULT : 0;