sparc64: Fix wedged irq regression.
[linux-2.6] / arch / sparc64 / kernel / sys_sparc.c
1 /* linux/arch/sparc64/kernel/sys_sparc.c
2  *
3  * This file contains various random system calls that
4  * have a non-standard calling sequence on the Linux/sparc
5  * platform.
6  */
7
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26
27 #include <asm/uaccess.h>
28 #include <asm/utrap.h>
29 #include <asm/perfctr.h>
30 #include <asm/unistd.h>
31
32 #include "entry.h"
33 #include "systbls.h"
34
35 /* #define DEBUG_UNIMP_SYSCALL */
36
37 asmlinkage unsigned long sys_getpagesize(void)
38 {
39         return PAGE_SIZE;
40 }
41
42 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
43 #define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
44
45 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
46  * overflow past the end of the 64-bit address space?
47  */
48 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
49 {
50         unsigned long va_exclude_start, va_exclude_end;
51
52         va_exclude_start = VA_EXCLUDE_START;
53         va_exclude_end   = VA_EXCLUDE_END;
54
55         if (unlikely(len >= va_exclude_start))
56                 return 1;
57
58         if (unlikely((addr + len) < addr))
59                 return 1;
60
61         if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
62                      ((addr + len) >= va_exclude_start &&
63                       (addr + len) < va_exclude_end)))
64                 return 1;
65
66         return 0;
67 }
68
69 /* Does start,end straddle the VA-space hole?  */
70 static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
71 {
72         unsigned long va_exclude_start, va_exclude_end;
73
74         va_exclude_start = VA_EXCLUDE_START;
75         va_exclude_end   = VA_EXCLUDE_END;
76
77         if (likely(start < va_exclude_start && end < va_exclude_start))
78                 return 0;
79
80         if (likely(start >= va_exclude_end && end >= va_exclude_end))
81                 return 0;
82
83         return 1;
84 }
85
86 /* These functions differ from the default implementations in
87  * mm/mmap.c in two ways:
88  *
89  * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
90  *    for fixed such mappings we just validate what the user gave us.
91  * 2) For 64-bit tasks we avoid mapping anything within 4GB of
92  *    the spitfire/niagara VA-hole.
93  */
94
95 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
96                                          unsigned long pgoff)
97 {
98         unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
99         unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
100
101         return base + off;
102 }
103
104 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
105                                               unsigned long pgoff)
106 {
107         unsigned long base = addr & ~(SHMLBA-1);
108         unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
109
110         if (base + off <= addr)
111                 return base + off;
112         return base - off;
113 }
114
115 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
116 {
117         struct mm_struct *mm = current->mm;
118         struct vm_area_struct * vma;
119         unsigned long task_size = TASK_SIZE;
120         unsigned long start_addr;
121         int do_color_align;
122
123         if (flags & MAP_FIXED) {
124                 /* We do not accept a shared mapping if it would violate
125                  * cache aliasing constraints.
126                  */
127                 if ((flags & MAP_SHARED) &&
128                     ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
129                         return -EINVAL;
130                 return addr;
131         }
132
133         if (test_thread_flag(TIF_32BIT))
134                 task_size = STACK_TOP32;
135         if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
136                 return -ENOMEM;
137
138         do_color_align = 0;
139         if (filp || (flags & MAP_SHARED))
140                 do_color_align = 1;
141
142         if (addr) {
143                 if (do_color_align)
144                         addr = COLOUR_ALIGN(addr, pgoff);
145                 else
146                         addr = PAGE_ALIGN(addr);
147
148                 vma = find_vma(mm, addr);
149                 if (task_size - len >= addr &&
150                     (!vma || addr + len <= vma->vm_start))
151                         return addr;
152         }
153
154         if (len > mm->cached_hole_size) {
155                 start_addr = addr = mm->free_area_cache;
156         } else {
157                 start_addr = addr = TASK_UNMAPPED_BASE;
158                 mm->cached_hole_size = 0;
159         }
160
161         task_size -= len;
162
163 full_search:
164         if (do_color_align)
165                 addr = COLOUR_ALIGN(addr, pgoff);
166         else
167                 addr = PAGE_ALIGN(addr);
168
169         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
170                 /* At this point:  (!vma || addr < vma->vm_end). */
171                 if (addr < VA_EXCLUDE_START &&
172                     (addr + len) >= VA_EXCLUDE_START) {
173                         addr = VA_EXCLUDE_END;
174                         vma = find_vma(mm, VA_EXCLUDE_END);
175                 }
176                 if (unlikely(task_size < addr)) {
177                         if (start_addr != TASK_UNMAPPED_BASE) {
178                                 start_addr = addr = TASK_UNMAPPED_BASE;
179                                 mm->cached_hole_size = 0;
180                                 goto full_search;
181                         }
182                         return -ENOMEM;
183                 }
184                 if (likely(!vma || addr + len <= vma->vm_start)) {
185                         /*
186                          * Remember the place where we stopped the search:
187                          */
188                         mm->free_area_cache = addr + len;
189                         return addr;
190                 }
191                 if (addr + mm->cached_hole_size < vma->vm_start)
192                         mm->cached_hole_size = vma->vm_start - addr;
193
194                 addr = vma->vm_end;
195                 if (do_color_align)
196                         addr = COLOUR_ALIGN(addr, pgoff);
197         }
198 }
199
200 unsigned long
201 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
202                           const unsigned long len, const unsigned long pgoff,
203                           const unsigned long flags)
204 {
205         struct vm_area_struct *vma;
206         struct mm_struct *mm = current->mm;
207         unsigned long task_size = STACK_TOP32;
208         unsigned long addr = addr0;
209         int do_color_align;
210
211         /* This should only ever run for 32-bit processes.  */
212         BUG_ON(!test_thread_flag(TIF_32BIT));
213
214         if (flags & MAP_FIXED) {
215                 /* We do not accept a shared mapping if it would violate
216                  * cache aliasing constraints.
217                  */
218                 if ((flags & MAP_SHARED) &&
219                     ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
220                         return -EINVAL;
221                 return addr;
222         }
223
224         if (unlikely(len > task_size))
225                 return -ENOMEM;
226
227         do_color_align = 0;
228         if (filp || (flags & MAP_SHARED))
229                 do_color_align = 1;
230
231         /* requesting a specific address */
232         if (addr) {
233                 if (do_color_align)
234                         addr = COLOUR_ALIGN(addr, pgoff);
235                 else
236                         addr = PAGE_ALIGN(addr);
237
238                 vma = find_vma(mm, addr);
239                 if (task_size - len >= addr &&
240                     (!vma || addr + len <= vma->vm_start))
241                         return addr;
242         }
243
244         /* check if free_area_cache is useful for us */
245         if (len <= mm->cached_hole_size) {
246                 mm->cached_hole_size = 0;
247                 mm->free_area_cache = mm->mmap_base;
248         }
249
250         /* either no address requested or can't fit in requested address hole */
251         addr = mm->free_area_cache;
252         if (do_color_align) {
253                 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
254
255                 addr = base + len;
256         }
257
258         /* make sure it can fit in the remaining address space */
259         if (likely(addr > len)) {
260                 vma = find_vma(mm, addr-len);
261                 if (!vma || addr <= vma->vm_start) {
262                         /* remember the address as a hint for next time */
263                         return (mm->free_area_cache = addr-len);
264                 }
265         }
266
267         if (unlikely(mm->mmap_base < len))
268                 goto bottomup;
269
270         addr = mm->mmap_base-len;
271         if (do_color_align)
272                 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
273
274         do {
275                 /*
276                  * Lookup failure means no vma is above this address,
277                  * else if new region fits below vma->vm_start,
278                  * return with success:
279                  */
280                 vma = find_vma(mm, addr);
281                 if (likely(!vma || addr+len <= vma->vm_start)) {
282                         /* remember the address as a hint for next time */
283                         return (mm->free_area_cache = addr);
284                 }
285
286                 /* remember the largest hole we saw so far */
287                 if (addr + mm->cached_hole_size < vma->vm_start)
288                         mm->cached_hole_size = vma->vm_start - addr;
289
290                 /* try just below the current vma->vm_start */
291                 addr = vma->vm_start-len;
292                 if (do_color_align)
293                         addr = COLOUR_ALIGN_DOWN(addr, pgoff);
294         } while (likely(len < vma->vm_start));
295
296 bottomup:
297         /*
298          * A failed mmap() very likely causes application failure,
299          * so fall back to the bottom-up function here. This scenario
300          * can happen with large stack limits and large mmap()
301          * allocations.
302          */
303         mm->cached_hole_size = ~0UL;
304         mm->free_area_cache = TASK_UNMAPPED_BASE;
305         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
306         /*
307          * Restore the topdown base:
308          */
309         mm->free_area_cache = mm->mmap_base;
310         mm->cached_hole_size = ~0UL;
311
312         return addr;
313 }
314
315 /* Try to align mapping such that we align it as much as possible. */
316 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
317 {
318         unsigned long align_goal, addr = -ENOMEM;
319
320         if (flags & MAP_FIXED) {
321                 /* Ok, don't mess with it. */
322                 return get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
323         }
324         flags &= ~MAP_SHARED;
325
326         align_goal = PAGE_SIZE;
327         if (len >= (4UL * 1024 * 1024))
328                 align_goal = (4UL * 1024 * 1024);
329         else if (len >= (512UL * 1024))
330                 align_goal = (512UL * 1024);
331         else if (len >= (64UL * 1024))
332                 align_goal = (64UL * 1024);
333
334         do {
335                 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
336                 if (!(addr & ~PAGE_MASK)) {
337                         addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
338                         break;
339                 }
340
341                 if (align_goal == (4UL * 1024 * 1024))
342                         align_goal = (512UL * 1024);
343                 else if (align_goal == (512UL * 1024))
344                         align_goal = (64UL * 1024);
345                 else
346                         align_goal = PAGE_SIZE;
347         } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
348
349         /* Mapping is smaller than 64K or larger areas could not
350          * be obtained.
351          */
352         if (addr & ~PAGE_MASK)
353                 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
354
355         return addr;
356 }
357
358 /* Essentially the same as PowerPC... */
359 void arch_pick_mmap_layout(struct mm_struct *mm)
360 {
361         unsigned long random_factor = 0UL;
362
363         if (current->flags & PF_RANDOMIZE) {
364                 random_factor = get_random_int();
365                 if (test_thread_flag(TIF_32BIT))
366                         random_factor &= ((1 * 1024 * 1024) - 1);
367                 else
368                         random_factor = ((random_factor << PAGE_SHIFT) &
369                                          0xffffffffUL);
370         }
371
372         /*
373          * Fall back to the standard layout if the personality
374          * bit is set, or if the expected stack growth is unlimited:
375          */
376         if (!test_thread_flag(TIF_32BIT) ||
377             (current->personality & ADDR_COMPAT_LAYOUT) ||
378             current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
379             sysctl_legacy_va_layout) {
380                 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
381                 mm->get_unmapped_area = arch_get_unmapped_area;
382                 mm->unmap_area = arch_unmap_area;
383         } else {
384                 /* We know it's 32-bit */
385                 unsigned long task_size = STACK_TOP32;
386                 unsigned long gap;
387
388                 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
389                 if (gap < 128 * 1024 * 1024)
390                         gap = 128 * 1024 * 1024;
391                 if (gap > (task_size / 6 * 5))
392                         gap = (task_size / 6 * 5);
393
394                 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
395                 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
396                 mm->unmap_area = arch_unmap_area_topdown;
397         }
398 }
399
400 asmlinkage unsigned long sparc_brk(unsigned long brk)
401 {
402         /* People could try to be nasty and use ta 0x6d in 32bit programs */
403         if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
404                 return current->mm->brk;
405
406         if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
407                 return current->mm->brk;
408
409         return sys_brk(brk);
410 }
411                                                                 
412 /*
413  * sys_pipe() is the normal C calling standard for creating
414  * a pipe. It's not the way unix traditionally does this, though.
415  */
416 asmlinkage long sparc_pipe(struct pt_regs *regs)
417 {
418         int fd[2];
419         int error;
420
421         error = do_pipe(fd);
422         if (error)
423                 goto out;
424         regs->u_regs[UREG_I1] = fd[1];
425         error = fd[0];
426 out:
427         return error;
428 }
429
430 /*
431  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
432  *
433  * This is really horribly ugly.
434  */
435
436 asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
437                         unsigned long third, void __user *ptr, long fifth)
438 {
439         long err;
440
441         /* No need for backward compatibility. We can start fresh... */
442         if (call <= SEMCTL) {
443                 switch (call) {
444                 case SEMOP:
445                         err = sys_semtimedop(first, ptr,
446                                              (unsigned)second, NULL);
447                         goto out;
448                 case SEMTIMEDOP:
449                         err = sys_semtimedop(first, ptr, (unsigned)second,
450                                 (const struct timespec __user *)
451                                              (unsigned long) fifth);
452                         goto out;
453                 case SEMGET:
454                         err = sys_semget(first, (int)second, (int)third);
455                         goto out;
456                 case SEMCTL: {
457                         err = sys_semctl(first, second,
458                                          (int)third | IPC_64,
459                                          (union semun) ptr);
460                         goto out;
461                 }
462                 default:
463                         err = -ENOSYS;
464                         goto out;
465                 };
466         }
467         if (call <= MSGCTL) {
468                 switch (call) {
469                 case MSGSND:
470                         err = sys_msgsnd(first, ptr, (size_t)second,
471                                          (int)third);
472                         goto out;
473                 case MSGRCV:
474                         err = sys_msgrcv(first, ptr, (size_t)second, fifth,
475                                          (int)third);
476                         goto out;
477                 case MSGGET:
478                         err = sys_msgget((key_t)first, (int)second);
479                         goto out;
480                 case MSGCTL:
481                         err = sys_msgctl(first, (int)second | IPC_64, ptr);
482                         goto out;
483                 default:
484                         err = -ENOSYS;
485                         goto out;
486                 };
487         }
488         if (call <= SHMCTL) {
489                 switch (call) {
490                 case SHMAT: {
491                         ulong raddr;
492                         err = do_shmat(first, ptr, (int)second, &raddr);
493                         if (!err) {
494                                 if (put_user(raddr,
495                                              (ulong __user *) third))
496                                         err = -EFAULT;
497                         }
498                         goto out;
499                 }
500                 case SHMDT:
501                         err = sys_shmdt(ptr);
502                         goto out;
503                 case SHMGET:
504                         err = sys_shmget(first, (size_t)second, (int)third);
505                         goto out;
506                 case SHMCTL:
507                         err = sys_shmctl(first, (int)second | IPC_64, ptr);
508                         goto out;
509                 default:
510                         err = -ENOSYS;
511                         goto out;
512                 };
513         } else {
514                 err = -ENOSYS;
515         }
516 out:
517         return err;
518 }
519
520 asmlinkage long sparc64_newuname(struct new_utsname __user *name)
521 {
522         int ret = sys_newuname(name);
523         
524         if (current->personality == PER_LINUX32 && !ret) {
525                 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
526                        ? -EFAULT : 0);
527         }
528         return ret;
529 }
530
531 asmlinkage long sparc64_personality(unsigned long personality)
532 {
533         int ret;
534
535         if (current->personality == PER_LINUX32 &&
536             personality == PER_LINUX)
537                 personality = PER_LINUX32;
538         ret = sys_personality(personality);
539         if (ret == PER_LINUX32)
540                 ret = PER_LINUX;
541
542         return ret;
543 }
544
545 int sparc64_mmap_check(unsigned long addr, unsigned long len,
546                 unsigned long flags)
547 {
548         if (test_thread_flag(TIF_32BIT)) {
549                 if (len >= STACK_TOP32)
550                         return -EINVAL;
551
552                 if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
553                         return -EINVAL;
554         } else {
555                 if (len >= VA_EXCLUDE_START)
556                         return -EINVAL;
557
558                 if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
559                         return -EINVAL;
560         }
561
562         return 0;
563 }
564
565 /* Linux version of mmap */
566 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
567         unsigned long prot, unsigned long flags, unsigned long fd,
568         unsigned long off)
569 {
570         struct file * file = NULL;
571         unsigned long retval = -EBADF;
572
573         if (!(flags & MAP_ANONYMOUS)) {
574                 file = fget(fd);
575                 if (!file)
576                         goto out;
577         }
578         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
579         len = PAGE_ALIGN(len);
580
581         down_write(&current->mm->mmap_sem);
582         retval = do_mmap(file, addr, len, prot, flags, off);
583         up_write(&current->mm->mmap_sem);
584
585         if (file)
586                 fput(file);
587 out:
588         return retval;
589 }
590
591 asmlinkage long sys64_munmap(unsigned long addr, size_t len)
592 {
593         long ret;
594
595         if (invalid_64bit_range(addr, len))
596                 return -EINVAL;
597
598         down_write(&current->mm->mmap_sem);
599         ret = do_munmap(current->mm, addr, len);
600         up_write(&current->mm->mmap_sem);
601         return ret;
602 }
603
604 extern unsigned long do_mremap(unsigned long addr,
605         unsigned long old_len, unsigned long new_len,
606         unsigned long flags, unsigned long new_addr);
607                 
608 asmlinkage unsigned long sys64_mremap(unsigned long addr,
609         unsigned long old_len, unsigned long new_len,
610         unsigned long flags, unsigned long new_addr)
611 {
612         struct vm_area_struct *vma;
613         unsigned long ret = -EINVAL;
614
615         if (test_thread_flag(TIF_32BIT))
616                 goto out;
617         if (unlikely(new_len >= VA_EXCLUDE_START))
618                 goto out;
619         if (unlikely(invalid_64bit_range(addr, old_len)))
620                 goto out;
621
622         down_write(&current->mm->mmap_sem);
623         if (flags & MREMAP_FIXED) {
624                 if (invalid_64bit_range(new_addr, new_len))
625                         goto out_sem;
626         } else if (invalid_64bit_range(addr, new_len)) {
627                 unsigned long map_flags = 0;
628                 struct file *file = NULL;
629
630                 ret = -ENOMEM;
631                 if (!(flags & MREMAP_MAYMOVE))
632                         goto out_sem;
633
634                 vma = find_vma(current->mm, addr);
635                 if (vma) {
636                         if (vma->vm_flags & VM_SHARED)
637                                 map_flags |= MAP_SHARED;
638                         file = vma->vm_file;
639                 }
640
641                 /* MREMAP_FIXED checked above. */
642                 new_addr = get_unmapped_area(file, addr, new_len,
643                                     vma ? vma->vm_pgoff : 0,
644                                     map_flags);
645                 ret = new_addr;
646                 if (new_addr & ~PAGE_MASK)
647                         goto out_sem;
648                 flags |= MREMAP_FIXED;
649         }
650         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
651 out_sem:
652         up_write(&current->mm->mmap_sem);
653 out:
654         return ret;       
655 }
656
657 /* we come to here via sys_nis_syscall so it can setup the regs argument */
658 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
659 {
660         static int count;
661         
662         /* Don't make the system unusable, if someone goes stuck */
663         if (count++ > 5)
664                 return -ENOSYS;
665
666         printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
667 #ifdef DEBUG_UNIMP_SYSCALL      
668         show_regs (regs);
669 #endif
670
671         return -ENOSYS;
672 }
673
674 /* #define DEBUG_SPARC_BREAKPOINT */
675
676 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
677 {
678         siginfo_t info;
679
680         if (test_thread_flag(TIF_32BIT)) {
681                 regs->tpc &= 0xffffffff;
682                 regs->tnpc &= 0xffffffff;
683         }
684 #ifdef DEBUG_SPARC_BREAKPOINT
685         printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
686 #endif
687         info.si_signo = SIGTRAP;
688         info.si_errno = 0;
689         info.si_code = TRAP_BRKPT;
690         info.si_addr = (void __user *)regs->tpc;
691         info.si_trapno = 0;
692         force_sig_info(SIGTRAP, &info, current);
693 #ifdef DEBUG_SPARC_BREAKPOINT
694         printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
695 #endif
696 }
697
698 extern void check_pending(int signum);
699
700 asmlinkage long sys_getdomainname(char __user *name, int len)
701 {
702         int nlen, err;
703
704         if (len < 0)
705                 return -EINVAL;
706
707         down_read(&uts_sem);
708         
709         nlen = strlen(utsname()->domainname) + 1;
710         err = -EINVAL;
711         if (nlen > len)
712                 goto out;
713
714         err = -EFAULT;
715         if (!copy_to_user(name, utsname()->domainname, nlen))
716                 err = 0;
717
718 out:
719         up_read(&uts_sem);
720         return err;
721 }
722
723 asmlinkage long sys_utrap_install(utrap_entry_t type,
724                                   utrap_handler_t new_p,
725                                   utrap_handler_t new_d,
726                                   utrap_handler_t __user *old_p,
727                                   utrap_handler_t __user *old_d)
728 {
729         if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
730                 return -EINVAL;
731         if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
732                 if (old_p) {
733                         if (!current_thread_info()->utraps) {
734                                 if (put_user(NULL, old_p))
735                                         return -EFAULT;
736                         } else {
737                                 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
738                                         return -EFAULT;
739                         }
740                 }
741                 if (old_d) {
742                         if (put_user(NULL, old_d))
743                                 return -EFAULT;
744                 }
745                 return 0;
746         }
747         if (!current_thread_info()->utraps) {
748                 current_thread_info()->utraps =
749                         kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
750                 if (!current_thread_info()->utraps)
751                         return -ENOMEM;
752                 current_thread_info()->utraps[0] = 1;
753         } else {
754                 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
755                     current_thread_info()->utraps[0] > 1) {
756                         unsigned long *p = current_thread_info()->utraps;
757
758                         current_thread_info()->utraps =
759                                 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
760                                         GFP_KERNEL);
761                         if (!current_thread_info()->utraps) {
762                                 current_thread_info()->utraps = p;
763                                 return -ENOMEM;
764                         }
765                         p[0]--;
766                         current_thread_info()->utraps[0] = 1;
767                         memcpy(current_thread_info()->utraps+1, p+1,
768                                UT_TRAP_INSTRUCTION_31*sizeof(long));
769                 }
770         }
771         if (old_p) {
772                 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
773                         return -EFAULT;
774         }
775         if (old_d) {
776                 if (put_user(NULL, old_d))
777                         return -EFAULT;
778         }
779         current_thread_info()->utraps[type] = (long)new_p;
780
781         return 0;
782 }
783
784 asmlinkage long sparc_memory_ordering(unsigned long model,
785                                       struct pt_regs *regs)
786 {
787         if (model >= 3)
788                 return -EINVAL;
789         regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
790         return 0;
791 }
792
793 asmlinkage long sys_rt_sigaction(int sig,
794                                  const struct sigaction __user *act,
795                                  struct sigaction __user *oact,
796                                  void __user *restorer,
797                                  size_t sigsetsize)
798 {
799         struct k_sigaction new_ka, old_ka;
800         int ret;
801
802         /* XXX: Don't preclude handling different sized sigset_t's.  */
803         if (sigsetsize != sizeof(sigset_t))
804                 return -EINVAL;
805
806         if (act) {
807                 new_ka.ka_restorer = restorer;
808                 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
809                         return -EFAULT;
810         }
811
812         ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
813
814         if (!ret && oact) {
815                 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
816                         return -EFAULT;
817         }
818
819         return ret;
820 }
821
822 /* Invoked by rtrap code to update performance counters in
823  * user space.
824  */
825 asmlinkage void update_perfctrs(void)
826 {
827         unsigned long pic, tmp;
828
829         read_pic(pic);
830         tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
831         __put_user(tmp, current_thread_info()->user_cntd0);
832         tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
833         __put_user(tmp, current_thread_info()->user_cntd1);
834         reset_pic();
835 }
836
837 asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
838 {
839         int err = 0;
840
841         switch(opcode) {
842         case PERFCTR_ON:
843                 current_thread_info()->pcr_reg = arg2;
844                 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
845                 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
846                 current_thread_info()->kernel_cntd0 =
847                         current_thread_info()->kernel_cntd1 = 0;
848                 write_pcr(arg2);
849                 reset_pic();
850                 set_thread_flag(TIF_PERFCTR);
851                 break;
852
853         case PERFCTR_OFF:
854                 err = -EINVAL;
855                 if (test_thread_flag(TIF_PERFCTR)) {
856                         current_thread_info()->user_cntd0 =
857                                 current_thread_info()->user_cntd1 = NULL;
858                         current_thread_info()->pcr_reg = 0;
859                         write_pcr(0);
860                         clear_thread_flag(TIF_PERFCTR);
861                         err = 0;
862                 }
863                 break;
864
865         case PERFCTR_READ: {
866                 unsigned long pic, tmp;
867
868                 if (!test_thread_flag(TIF_PERFCTR)) {
869                         err = -EINVAL;
870                         break;
871                 }
872                 read_pic(pic);
873                 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
874                 err |= __put_user(tmp, current_thread_info()->user_cntd0);
875                 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
876                 err |= __put_user(tmp, current_thread_info()->user_cntd1);
877                 reset_pic();
878                 break;
879         }
880
881         case PERFCTR_CLRPIC:
882                 if (!test_thread_flag(TIF_PERFCTR)) {
883                         err = -EINVAL;
884                         break;
885                 }
886                 current_thread_info()->kernel_cntd0 =
887                         current_thread_info()->kernel_cntd1 = 0;
888                 reset_pic();
889                 break;
890
891         case PERFCTR_SETPCR: {
892                 u64 __user *user_pcr = (u64 __user *)arg0;
893
894                 if (!test_thread_flag(TIF_PERFCTR)) {
895                         err = -EINVAL;
896                         break;
897                 }
898                 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
899                 write_pcr(current_thread_info()->pcr_reg);
900                 current_thread_info()->kernel_cntd0 =
901                         current_thread_info()->kernel_cntd1 = 0;
902                 reset_pic();
903                 break;
904         }
905
906         case PERFCTR_GETPCR: {
907                 u64 __user *user_pcr = (u64 __user *)arg0;
908
909                 if (!test_thread_flag(TIF_PERFCTR)) {
910                         err = -EINVAL;
911                         break;
912                 }
913                 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
914                 break;
915         }
916
917         default:
918                 err = -EINVAL;
919                 break;
920         };
921         return err;
922 }
923
924 /*
925  * Do a system call from kernel instead of calling sys_execve so we
926  * end up with proper pt_regs.
927  */
928 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
929 {
930         long __res;
931         register long __g1 __asm__ ("g1") = __NR_execve;
932         register long __o0 __asm__ ("o0") = (long)(filename);
933         register long __o1 __asm__ ("o1") = (long)(argv);
934         register long __o2 __asm__ ("o2") = (long)(envp);
935         asm volatile ("t 0x6d\n\t"
936                       "sub %%g0, %%o0, %0\n\t"
937                       "movcc %%xcc, %%o0, %0\n\t"
938                       : "=r" (__res), "=&r" (__o0)
939                       : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
940                       : "cc");
941         return __res;
942 }