1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/config.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
14 #include <linux/file.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/mman.h>
21 #include <linux/utsname.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
29 #include <asm/uaccess.h>
31 #include <asm/utrap.h>
32 #include <asm/perfctr.h>
34 /* #define DEBUG_UNIMP_SYSCALL */
36 /* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
39 asmlinkage unsigned long sys_getpagesize(void)
44 #define COLOUR_ALIGN(addr,pgoff) \
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
48 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
50 struct mm_struct *mm = current->mm;
51 struct vm_area_struct * vma;
52 unsigned long task_size = TASK_SIZE;
53 unsigned long start_addr;
56 if (flags & MAP_FIXED) {
57 /* We do not accept a shared mapping if it would violate
58 * cache aliasing constraints.
60 if ((flags & MAP_SHARED) &&
61 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
66 if (test_thread_flag(TIF_32BIT))
67 task_size = 0xf0000000UL;
68 if (len > task_size || len > -PAGE_OFFSET)
72 if (filp || (flags & MAP_SHARED))
77 addr = COLOUR_ALIGN(addr, pgoff);
79 addr = PAGE_ALIGN(addr);
81 vma = find_vma(mm, addr);
82 if (task_size - len >= addr &&
83 (!vma || addr + len <= vma->vm_start))
87 if (len <= mm->cached_hole_size) {
88 mm->cached_hole_size = 0;
89 mm->free_area_cache = TASK_UNMAPPED_BASE;
91 start_addr = addr = mm->free_area_cache;
97 addr = COLOUR_ALIGN(addr, pgoff);
99 addr = PAGE_ALIGN(addr);
101 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
102 /* At this point: (!vma || addr < vma->vm_end). */
103 if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
105 vma = find_vma(mm, PAGE_OFFSET);
107 if (task_size < addr) {
108 if (start_addr != TASK_UNMAPPED_BASE) {
109 start_addr = addr = TASK_UNMAPPED_BASE;
110 mm->cached_hole_size = 0;
115 if (!vma || addr + len <= vma->vm_start) {
117 * Remember the place where we stopped the search:
119 mm->free_area_cache = addr + len;
122 if (addr + mm->cached_hole_size < vma->vm_start)
123 mm->cached_hole_size = vma->vm_start - addr;
127 addr = COLOUR_ALIGN(addr, pgoff);
131 /* Try to align mapping such that we align it as much as possible. */
132 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
134 unsigned long align_goal, addr = -ENOMEM;
136 if (flags & MAP_FIXED) {
137 /* Ok, don't mess with it. */
138 return get_unmapped_area(NULL, addr, len, pgoff, flags);
140 flags &= ~MAP_SHARED;
142 align_goal = PAGE_SIZE;
143 if (len >= (4UL * 1024 * 1024))
144 align_goal = (4UL * 1024 * 1024);
145 else if (len >= (512UL * 1024))
146 align_goal = (512UL * 1024);
147 else if (len >= (64UL * 1024))
148 align_goal = (64UL * 1024);
151 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
152 if (!(addr & ~PAGE_MASK)) {
153 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
157 if (align_goal == (4UL * 1024 * 1024))
158 align_goal = (512UL * 1024);
159 else if (align_goal == (512UL * 1024))
160 align_goal = (64UL * 1024);
162 align_goal = PAGE_SIZE;
163 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
165 /* Mapping is smaller than 64K or larger areas could not
168 if (addr & ~PAGE_MASK)
169 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
174 asmlinkage unsigned long sparc_brk(unsigned long brk)
176 /* People could try to be nasty and use ta 0x6d in 32bit programs */
177 if (test_thread_flag(TIF_32BIT) &&
179 return current->mm->brk;
181 if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
182 return current->mm->brk;
187 * sys_pipe() is the normal C calling standard for creating
188 * a pipe. It's not the way unix traditionally does this, though.
190 asmlinkage long sparc_pipe(struct pt_regs *regs)
198 regs->u_regs[UREG_I1] = fd[1];
205 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
207 * This is really horribly ugly.
210 asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
211 unsigned long third, void __user *ptr, long fifth)
215 /* No need for backward compatibility. We can start fresh... */
216 if (call <= SEMCTL) {
219 err = sys_semtimedop(first, ptr,
220 (unsigned)second, NULL);
223 err = sys_semtimedop(first, ptr, (unsigned)second,
224 (const struct timespec __user *) fifth);
227 err = sys_semget(first, (int)second, (int)third);
235 if (get_user(fourth.__pad,
236 (void __user * __user *) ptr))
238 err = sys_semctl(first, (int)second | IPC_64,
247 if (call <= MSGCTL) {
250 err = sys_msgsnd(first, ptr, (size_t)second,
254 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
258 err = sys_msgget((key_t)first, (int)second);
261 err = sys_msgctl(first, (int)second | IPC_64, ptr);
268 if (call <= SHMCTL) {
272 err = do_shmat(first, ptr, (int)second, &raddr);
275 (ulong __user *) third))
281 err = sys_shmdt(ptr);
284 err = sys_shmget(first, (size_t)second, (int)third);
287 err = sys_shmctl(first, (int)second | IPC_64, ptr);
300 asmlinkage long sparc64_newuname(struct new_utsname __user *name)
302 int ret = sys_newuname(name);
304 if (current->personality == PER_LINUX32 && !ret) {
305 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
311 asmlinkage long sparc64_personality(unsigned long personality)
315 if (current->personality == PER_LINUX32 &&
316 personality == PER_LINUX)
317 personality = PER_LINUX32;
318 ret = sys_personality(personality);
319 if (ret == PER_LINUX32)
325 /* Linux version of mmap */
326 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
327 unsigned long prot, unsigned long flags, unsigned long fd,
330 struct file * file = NULL;
331 unsigned long retval = -EBADF;
333 if (!(flags & MAP_ANONYMOUS)) {
338 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
339 len = PAGE_ALIGN(len);
342 if (test_thread_flag(TIF_32BIT)) {
343 if (len > 0xf0000000UL ||
344 ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
347 if (len > -PAGE_OFFSET ||
348 ((flags & MAP_FIXED) &&
349 addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
353 down_write(¤t->mm->mmap_sem);
354 retval = do_mmap(file, addr, len, prot, flags, off);
355 up_write(¤t->mm->mmap_sem);
364 asmlinkage long sys64_munmap(unsigned long addr, size_t len)
368 if (len > -PAGE_OFFSET ||
369 (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
371 down_write(¤t->mm->mmap_sem);
372 ret = do_munmap(current->mm, addr, len);
373 up_write(¤t->mm->mmap_sem);
377 extern unsigned long do_mremap(unsigned long addr,
378 unsigned long old_len, unsigned long new_len,
379 unsigned long flags, unsigned long new_addr);
381 asmlinkage unsigned long sys64_mremap(unsigned long addr,
382 unsigned long old_len, unsigned long new_len,
383 unsigned long flags, unsigned long new_addr)
385 struct vm_area_struct *vma;
386 unsigned long ret = -EINVAL;
387 if (test_thread_flag(TIF_32BIT))
389 if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
391 if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
393 down_write(¤t->mm->mmap_sem);
394 if (flags & MREMAP_FIXED) {
395 if (new_addr < PAGE_OFFSET &&
396 new_addr + new_len > -PAGE_OFFSET)
398 } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
399 unsigned long map_flags = 0;
400 struct file *file = NULL;
403 if (!(flags & MREMAP_MAYMOVE))
406 vma = find_vma(current->mm, addr);
408 if (vma->vm_flags & VM_SHARED)
409 map_flags |= MAP_SHARED;
413 /* MREMAP_FIXED checked above. */
414 new_addr = get_unmapped_area(file, addr, new_len,
415 vma ? vma->vm_pgoff : 0,
418 if (new_addr & ~PAGE_MASK)
420 flags |= MREMAP_FIXED;
422 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
424 up_write(¤t->mm->mmap_sem);
429 /* we come to here via sys_nis_syscall so it can setup the regs argument */
430 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
434 /* Don't make the system unusable, if someone goes stuck */
438 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
439 #ifdef DEBUG_UNIMP_SYSCALL
446 /* #define DEBUG_SPARC_BREAKPOINT */
448 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
452 if (test_thread_flag(TIF_32BIT)) {
453 regs->tpc &= 0xffffffff;
454 regs->tnpc &= 0xffffffff;
456 #ifdef DEBUG_SPARC_BREAKPOINT
457 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
459 info.si_signo = SIGTRAP;
461 info.si_code = TRAP_BRKPT;
462 info.si_addr = (void __user *)regs->tpc;
464 force_sig_info(SIGTRAP, &info, current);
465 #ifdef DEBUG_SPARC_BREAKPOINT
466 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
470 extern void check_pending(int signum);
472 asmlinkage long sys_getdomainname(char __user *name, int len)
479 nlen = strlen(system_utsname.domainname) + 1;
483 if (len > __NEW_UTS_LEN)
485 if (copy_to_user(name, system_utsname.domainname, len))
493 asmlinkage long solaris_syscall(struct pt_regs *regs)
497 regs->tpc = regs->tnpc;
499 if (test_thread_flag(TIF_32BIT)) {
500 regs->tpc &= 0xffffffff;
501 regs->tnpc &= 0xffffffff;
504 printk ("For Solaris binary emulation you need solaris module loaded\n");
507 send_sig(SIGSEGV, current, 1);
512 #ifndef CONFIG_SUNOS_EMUL
513 asmlinkage long sunos_syscall(struct pt_regs *regs)
517 regs->tpc = regs->tnpc;
519 if (test_thread_flag(TIF_32BIT)) {
520 regs->tpc &= 0xffffffff;
521 regs->tnpc &= 0xffffffff;
524 printk ("SunOS binary emulation not compiled in\n");
525 force_sig(SIGSEGV, current);
531 asmlinkage long sys_utrap_install(utrap_entry_t type,
532 utrap_handler_t new_p,
533 utrap_handler_t new_d,
534 utrap_handler_t __user *old_p,
535 utrap_handler_t __user *old_d)
537 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
539 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
541 if (!current_thread_info()->utraps) {
542 if (put_user(NULL, old_p))
545 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
550 if (put_user(NULL, old_d))
555 if (!current_thread_info()->utraps) {
556 current_thread_info()->utraps =
557 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
558 if (!current_thread_info()->utraps)
560 current_thread_info()->utraps[0] = 1;
561 memset(current_thread_info()->utraps+1, 0,
562 UT_TRAP_INSTRUCTION_31*sizeof(long));
564 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
565 current_thread_info()->utraps[0] > 1) {
566 long *p = current_thread_info()->utraps;
568 current_thread_info()->utraps =
569 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
571 if (!current_thread_info()->utraps) {
572 current_thread_info()->utraps = p;
576 current_thread_info()->utraps[0] = 1;
577 memcpy(current_thread_info()->utraps+1, p+1,
578 UT_TRAP_INSTRUCTION_31*sizeof(long));
582 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
586 if (put_user(NULL, old_d))
589 current_thread_info()->utraps[type] = (long)new_p;
594 long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
598 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
602 asmlinkage long sys_rt_sigaction(int sig,
603 const struct sigaction __user *act,
604 struct sigaction __user *oact,
605 void __user *restorer,
608 struct k_sigaction new_ka, old_ka;
611 /* XXX: Don't preclude handling different sized sigset_t's. */
612 if (sigsetsize != sizeof(sigset_t))
616 new_ka.ka_restorer = restorer;
617 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
621 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
624 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
631 /* Invoked by rtrap code to update performance counters in
634 asmlinkage void update_perfctrs(void)
636 unsigned long pic, tmp;
639 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
640 __put_user(tmp, current_thread_info()->user_cntd0);
641 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
642 __put_user(tmp, current_thread_info()->user_cntd1);
646 asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
652 current_thread_info()->pcr_reg = arg2;
653 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
654 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
655 current_thread_info()->kernel_cntd0 =
656 current_thread_info()->kernel_cntd1 = 0;
659 set_thread_flag(TIF_PERFCTR);
664 if (test_thread_flag(TIF_PERFCTR)) {
665 current_thread_info()->user_cntd0 =
666 current_thread_info()->user_cntd1 = NULL;
667 current_thread_info()->pcr_reg = 0;
669 clear_thread_flag(TIF_PERFCTR);
675 unsigned long pic, tmp;
677 if (!test_thread_flag(TIF_PERFCTR)) {
682 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
683 err |= __put_user(tmp, current_thread_info()->user_cntd0);
684 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
685 err |= __put_user(tmp, current_thread_info()->user_cntd1);
691 if (!test_thread_flag(TIF_PERFCTR)) {
695 current_thread_info()->kernel_cntd0 =
696 current_thread_info()->kernel_cntd1 = 0;
700 case PERFCTR_SETPCR: {
701 u64 __user *user_pcr = (u64 __user *)arg0;
703 if (!test_thread_flag(TIF_PERFCTR)) {
707 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
708 write_pcr(current_thread_info()->pcr_reg);
709 current_thread_info()->kernel_cntd0 =
710 current_thread_info()->kernel_cntd1 = 0;
715 case PERFCTR_GETPCR: {
716 u64 __user *user_pcr = (u64 __user *)arg0;
718 if (!test_thread_flag(TIF_PERFCTR)) {
722 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);