1 /* $Id: sys_sparc.c,v 1.70 2001/04/14 01:12:02 davem Exp $
2 * linux/arch/sparc/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched.h>
14 #include <linux/file.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/utsname.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/ipc.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
29 /* #define DEBUG_UNIMP_SYSCALL */
31 /* XXX Make this per-binary type, this way we can detect the type of
32 * XXX a binary. Every Sparc executable calls this very early on.
34 asmlinkage unsigned long sys_getpagesize(void)
36 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
39 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
41 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
43 struct vm_area_struct * vmm;
45 if (flags & MAP_FIXED) {
46 /* We do not accept a shared mapping if it would violate
47 * cache aliasing constraints.
49 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
54 /* See asm-sparc/uaccess.h */
55 if (len > TASK_SIZE - PAGE_SIZE)
57 if (ARCH_SUN4C_SUN4 && len > 0x20000000)
60 addr = TASK_UNMAPPED_BASE;
62 if (flags & MAP_SHARED)
63 addr = COLOUR_ALIGN(addr);
65 addr = PAGE_ALIGN(addr);
67 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
68 /* At this point: (!vmm || addr < vmm->vm_end). */
69 if (ARCH_SUN4C_SUN4 && addr < 0xe0000000 && 0x20000000 - len < addr) {
71 vmm = find_vma(current->mm, PAGE_OFFSET);
73 if (TASK_SIZE - PAGE_SIZE - len < addr)
75 if (!vmm || addr + len <= vmm->vm_start)
78 if (flags & MAP_SHARED)
79 addr = COLOUR_ALIGN(addr);
83 asmlinkage unsigned long sparc_brk(unsigned long brk)
86 if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
87 return current->mm->brk;
93 * sys_pipe() is the normal C calling standard for creating
94 * a pipe. It's not the way unix traditionally does this, though.
96 asmlinkage int sparc_pipe(struct pt_regs *regs)
104 regs->u_regs[UREG_I1] = fd[1];
111 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
113 * This is really horribly ugly.
116 asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
120 version = call >> 16; /* hack for backward compatibility */
126 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
129 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, (const struct timespec __user *) fifth);
132 err = sys_semget (first, second, third);
140 if (get_user(fourth.__pad,
141 (void __user * __user *)ptr))
143 err = sys_semctl (first, second, third, fourth);
153 err = sys_msgsnd (first, (struct msgbuf __user *) ptr,
159 struct ipc_kludge tmp;
164 if (copy_from_user(&tmp, (struct ipc_kludge __user *) ptr, sizeof (tmp)))
166 err = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
170 err = sys_msgrcv (first,
171 (struct msgbuf __user *) ptr,
172 second, fifth, third);
176 err = sys_msgget ((key_t) first, second);
179 err = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
191 err = do_shmat (first, (char __user *) ptr, second, &raddr);
195 if (put_user (raddr, (ulong __user *) third))
200 case 1: /* iBCS2 emulator entry point */
205 err = sys_shmdt ((char __user *)ptr);
208 err = sys_shmget (first, second, third);
211 err = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
223 int sparc_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
225 if (ARCH_SUN4C_SUN4 &&
227 ((flags & MAP_FIXED) &&
228 addr < 0xe0000000 && addr + len > 0x20000000)))
231 /* See asm-sparc/uaccess.h */
232 if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
238 /* Linux version of mmap */
239 static unsigned long do_mmap2(unsigned long addr, unsigned long len,
240 unsigned long prot, unsigned long flags, unsigned long fd,
243 struct file * file = NULL;
244 unsigned long retval = -EBADF;
246 if (!(flags & MAP_ANONYMOUS)) {
252 len = PAGE_ALIGN(len);
253 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
255 down_write(¤t->mm->mmap_sem);
256 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
257 up_write(¤t->mm->mmap_sem);
265 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
266 unsigned long prot, unsigned long flags, unsigned long fd,
269 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
271 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
274 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
275 unsigned long prot, unsigned long flags, unsigned long fd,
278 return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
281 long sparc_remap_file_pages(unsigned long start, unsigned long size,
282 unsigned long prot, unsigned long pgoff,
285 /* This works on an existing mmap so we don't need to validate
286 * the range as that was done at the original mmap call.
288 return sys_remap_file_pages(start, size, prot,
289 (pgoff >> (PAGE_SHIFT - 12)), flags);
292 extern unsigned long do_mremap(unsigned long addr,
293 unsigned long old_len, unsigned long new_len,
294 unsigned long flags, unsigned long new_addr);
296 asmlinkage unsigned long sparc_mremap(unsigned long addr,
297 unsigned long old_len, unsigned long new_len,
298 unsigned long flags, unsigned long new_addr)
300 struct vm_area_struct *vma;
301 unsigned long ret = -EINVAL;
302 if (ARCH_SUN4C_SUN4) {
303 if (old_len > 0x20000000 || new_len > 0x20000000)
305 if (addr < 0xe0000000 && addr + old_len > 0x20000000)
308 if (old_len > TASK_SIZE - PAGE_SIZE ||
309 new_len > TASK_SIZE - PAGE_SIZE)
311 down_write(¤t->mm->mmap_sem);
312 if (flags & MREMAP_FIXED) {
313 if (ARCH_SUN4C_SUN4 &&
314 new_addr < 0xe0000000 &&
315 new_addr + new_len > 0x20000000)
317 if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
319 } else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
320 addr + new_len > 0x20000000) ||
321 addr + new_len > TASK_SIZE - PAGE_SIZE) {
322 unsigned long map_flags = 0;
323 struct file *file = NULL;
326 if (!(flags & MREMAP_MAYMOVE))
329 vma = find_vma(current->mm, addr);
331 if (vma->vm_flags & VM_SHARED)
332 map_flags |= MAP_SHARED;
336 new_addr = get_unmapped_area(file, addr, new_len,
337 vma ? vma->vm_pgoff : 0,
340 if (new_addr & ~PAGE_MASK)
342 flags |= MREMAP_FIXED;
344 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
346 up_write(¤t->mm->mmap_sem);
351 /* we come to here via sys_nis_syscall so it can setup the regs argument */
352 asmlinkage unsigned long
353 c_sys_nis_syscall (struct pt_regs *regs)
355 static int count = 0;
359 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
360 current->comm, current->pid, (int)regs->u_regs[1]);
361 #ifdef DEBUG_UNIMP_SYSCALL
367 /* #define DEBUG_SPARC_BREAKPOINT */
370 sparc_breakpoint (struct pt_regs *regs)
375 #ifdef DEBUG_SPARC_BREAKPOINT
376 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
378 info.si_signo = SIGTRAP;
380 info.si_code = TRAP_BRKPT;
381 info.si_addr = (void __user *)regs->pc;
383 force_sig_info(SIGTRAP, &info, current);
385 #ifdef DEBUG_SPARC_BREAKPOINT
386 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
392 sparc_sigaction (int sig, const struct old_sigaction __user *act,
393 struct old_sigaction __user *oact)
395 struct k_sigaction new_ka, old_ka;
399 current->thread.new_signal = 1;
406 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
407 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
408 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
410 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
411 __get_user(mask, &act->sa_mask);
412 siginitset(&new_ka.sa.sa_mask, mask);
413 new_ka.ka_restorer = NULL;
416 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
419 /* In the clone() case we could copy half consistent
420 * state to the user, however this could sleep and
421 * deadlock us if we held the signal lock on SMP. So for
422 * now I take the easy way out and do no locking.
424 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
425 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
426 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
428 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
429 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
436 sys_rt_sigaction(int sig,
437 const struct sigaction __user *act,
438 struct sigaction __user *oact,
439 void __user *restorer,
442 struct k_sigaction new_ka, old_ka;
445 /* XXX: Don't preclude handling different sized sigset_t's. */
446 if (sigsetsize != sizeof(sigset_t))
449 /* All tasks which use RT signals (effectively) use
452 current->thread.new_signal = 1;
455 new_ka.ka_restorer = restorer;
456 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
460 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
463 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
470 asmlinkage int sys_getdomainname(char __user *name, int len)
479 nlen = strlen(utsname()->domainname) + 1;
485 if (!copy_to_user(name, utsname()->domainname, nlen))
494 * Do a system call from kernel instead of calling sys_execve so we
495 * end up with proper pt_regs.
497 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
500 register long __g1 __asm__ ("g1") = __NR_execve;
501 register long __o0 __asm__ ("o0") = (long)(filename);
502 register long __o1 __asm__ ("o1") = (long)(argv);
503 register long __o2 __asm__ ("o2") = (long)(envp);
504 asm volatile ("t 0x10\n\t"
507 "sub %%g0, %%o0, %0\n\t"
509 : "=r" (__res), "=&r" (__o0)
510 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)