1 /* linux/arch/sparc/kernel/sys_sparc.c
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
13 #include <linux/file.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/syscalls.h>
19 #include <linux/mman.h>
20 #include <linux/utsname.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/ipc.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
28 /* #define DEBUG_UNIMP_SYSCALL */
30 /* XXX Make this per-binary type, this way we can detect the type of
31 * XXX a binary. Every Sparc executable calls this very early on.
33 asmlinkage unsigned long sys_getpagesize(void)
35 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
38 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
40 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
42 struct vm_area_struct * vmm;
44 if (flags & MAP_FIXED) {
45 /* We do not accept a shared mapping if it would violate
46 * cache aliasing constraints.
48 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
53 /* See asm-sparc/uaccess.h */
54 if (len > TASK_SIZE - PAGE_SIZE)
56 if (ARCH_SUN4C_SUN4 && len > 0x20000000)
59 addr = TASK_UNMAPPED_BASE;
61 if (flags & MAP_SHARED)
62 addr = COLOUR_ALIGN(addr);
64 addr = PAGE_ALIGN(addr);
66 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
67 /* At this point: (!vmm || addr < vmm->vm_end). */
68 if (ARCH_SUN4C_SUN4 && addr < 0xe0000000 && 0x20000000 - len < addr) {
70 vmm = find_vma(current->mm, PAGE_OFFSET);
72 if (TASK_SIZE - PAGE_SIZE - len < addr)
74 if (!vmm || addr + len <= vmm->vm_start)
77 if (flags & MAP_SHARED)
78 addr = COLOUR_ALIGN(addr);
82 asmlinkage unsigned long sparc_brk(unsigned long brk)
85 if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
86 return current->mm->brk;
92 * sys_pipe() is the normal C calling standard for creating
93 * a pipe. It's not the way unix traditionally does this, though.
95 asmlinkage int sparc_pipe(struct pt_regs *regs)
103 regs->u_regs[UREG_I1] = fd[1];
110 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
112 * This is really horribly ugly.
115 asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
119 version = call >> 16; /* hack for backward compatibility */
125 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
128 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, (const struct timespec __user *) fifth);
131 err = sys_semget (first, second, third);
139 if (get_user(fourth.__pad,
140 (void __user * __user *)ptr))
142 err = sys_semctl (first, second, third, fourth);
152 err = sys_msgsnd (first, (struct msgbuf __user *) ptr,
158 struct ipc_kludge tmp;
163 if (copy_from_user(&tmp, (struct ipc_kludge __user *) ptr, sizeof (tmp)))
165 err = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
169 err = sys_msgrcv (first,
170 (struct msgbuf __user *) ptr,
171 second, fifth, third);
175 err = sys_msgget ((key_t) first, second);
178 err = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
190 err = do_shmat (first, (char __user *) ptr, second, &raddr);
194 if (put_user (raddr, (ulong __user *) third))
199 case 1: /* iBCS2 emulator entry point */
204 err = sys_shmdt ((char __user *)ptr);
207 err = sys_shmget (first, second, third);
210 err = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
222 int sparc_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
224 if (ARCH_SUN4C_SUN4 &&
226 (addr < 0xe0000000 && addr + len > 0x20000000)))
229 /* See asm-sparc/uaccess.h */
230 if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
236 /* Linux version of mmap */
237 static unsigned long do_mmap2(unsigned long addr, unsigned long len,
238 unsigned long prot, unsigned long flags, unsigned long fd,
241 struct file * file = NULL;
242 unsigned long retval = -EBADF;
244 if (!(flags & MAP_ANONYMOUS)) {
250 len = PAGE_ALIGN(len);
251 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
253 down_write(¤t->mm->mmap_sem);
254 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
255 up_write(¤t->mm->mmap_sem);
263 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
264 unsigned long prot, unsigned long flags, unsigned long fd,
267 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
269 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
272 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
273 unsigned long prot, unsigned long flags, unsigned long fd,
276 return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
279 long sparc_remap_file_pages(unsigned long start, unsigned long size,
280 unsigned long prot, unsigned long pgoff,
283 /* This works on an existing mmap so we don't need to validate
284 * the range as that was done at the original mmap call.
286 return sys_remap_file_pages(start, size, prot,
287 (pgoff >> (PAGE_SHIFT - 12)), flags);
290 extern unsigned long do_mremap(unsigned long addr,
291 unsigned long old_len, unsigned long new_len,
292 unsigned long flags, unsigned long new_addr);
294 asmlinkage unsigned long sparc_mremap(unsigned long addr,
295 unsigned long old_len, unsigned long new_len,
296 unsigned long flags, unsigned long new_addr)
298 struct vm_area_struct *vma;
299 unsigned long ret = -EINVAL;
300 if (ARCH_SUN4C_SUN4) {
301 if (old_len > 0x20000000 || new_len > 0x20000000)
303 if (addr < 0xe0000000 && addr + old_len > 0x20000000)
306 if (old_len > TASK_SIZE - PAGE_SIZE ||
307 new_len > TASK_SIZE - PAGE_SIZE)
309 down_write(¤t->mm->mmap_sem);
310 if (flags & MREMAP_FIXED) {
311 if (ARCH_SUN4C_SUN4 &&
312 new_addr < 0xe0000000 &&
313 new_addr + new_len > 0x20000000)
315 if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
317 } else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
318 addr + new_len > 0x20000000) ||
319 addr + new_len > TASK_SIZE - PAGE_SIZE) {
320 unsigned long map_flags = 0;
321 struct file *file = NULL;
324 if (!(flags & MREMAP_MAYMOVE))
327 vma = find_vma(current->mm, addr);
329 if (vma->vm_flags & VM_SHARED)
330 map_flags |= MAP_SHARED;
334 new_addr = get_unmapped_area(file, addr, new_len,
335 vma ? vma->vm_pgoff : 0,
338 if (new_addr & ~PAGE_MASK)
340 flags |= MREMAP_FIXED;
342 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
344 up_write(¤t->mm->mmap_sem);
349 /* we come to here via sys_nis_syscall so it can setup the regs argument */
350 asmlinkage unsigned long
351 c_sys_nis_syscall (struct pt_regs *regs)
353 static int count = 0;
357 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
358 current->comm, task_pid_nr(current), (int)regs->u_regs[1]);
359 #ifdef DEBUG_UNIMP_SYSCALL
365 /* #define DEBUG_SPARC_BREAKPOINT */
368 sparc_breakpoint (struct pt_regs *regs)
373 #ifdef DEBUG_SPARC_BREAKPOINT
374 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
376 info.si_signo = SIGTRAP;
378 info.si_code = TRAP_BRKPT;
379 info.si_addr = (void __user *)regs->pc;
381 force_sig_info(SIGTRAP, &info, current);
383 #ifdef DEBUG_SPARC_BREAKPOINT
384 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
390 sparc_sigaction (int sig, const struct old_sigaction __user *act,
391 struct old_sigaction __user *oact)
393 struct k_sigaction new_ka, old_ka;
396 WARN_ON_ONCE(sig >= 0);
402 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
403 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
404 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
406 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
407 __get_user(mask, &act->sa_mask);
408 siginitset(&new_ka.sa.sa_mask, mask);
409 new_ka.ka_restorer = NULL;
412 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
415 /* In the clone() case we could copy half consistent
416 * state to the user, however this could sleep and
417 * deadlock us if we held the signal lock on SMP. So for
418 * now I take the easy way out and do no locking.
420 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
421 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
422 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
424 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
425 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
432 sys_rt_sigaction(int sig,
433 const struct sigaction __user *act,
434 struct sigaction __user *oact,
435 void __user *restorer,
438 struct k_sigaction new_ka, old_ka;
441 /* XXX: Don't preclude handling different sized sigset_t's. */
442 if (sigsetsize != sizeof(sigset_t))
446 new_ka.ka_restorer = restorer;
447 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
451 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
454 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
461 asmlinkage int sys_getdomainname(char __user *name, int len)
470 nlen = strlen(utsname()->domainname) + 1;
476 if (!copy_to_user(name, utsname()->domainname, nlen))
485 * Do a system call from kernel instead of calling sys_execve so we
486 * end up with proper pt_regs.
488 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
491 register long __g1 __asm__ ("g1") = __NR_execve;
492 register long __o0 __asm__ ("o0") = (long)(filename);
493 register long __o1 __asm__ ("o1") = (long)(argv);
494 register long __o2 __asm__ ("o2") = (long)(envp);
495 asm volatile ("t 0x10\n\t"
498 "sub %%g0, %%o0, %0\n\t"
500 : "=r" (__res), "=&r" (__o0)
501 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)