Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / arch / sparc64 / kernel / process.c
1 /*  arch/sparc64/kernel/process.c
2  *
3  *  Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
4  *  Copyright (C) 1996       Eddie C. Dost   (ecd@skynet.be)
5  *  Copyright (C) 1997, 1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
6  */
7
8 /*
9  * This file handles the architecture-dependent parts of process handling..
10  */
11
12 #include <stdarg.h>
13
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/reboot.h>
27 #include <linux/delay.h>
28 #include <linux/compat.h>
29 #include <linux/tick.h>
30 #include <linux/init.h>
31 #include <linux/cpu.h>
32 #include <linux/elfcore.h>
33
34 #include <asm/oplib.h>
35 #include <asm/uaccess.h>
36 #include <asm/system.h>
37 #include <asm/page.h>
38 #include <asm/pgalloc.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
41 #include <asm/pstate.h>
42 #include <asm/elf.h>
43 #include <asm/fpumacro.h>
44 #include <asm/head.h>
45 #include <asm/cpudata.h>
46 #include <asm/mmu_context.h>
47 #include <asm/unistd.h>
48 #include <asm/hypervisor.h>
49 #include <asm/sstate.h>
50 #include <asm/reboot.h>
51 #include <asm/syscalls.h>
52
53 /* #define VERBOSE_SHOWREGS */
54
55 static void sparc64_yield(int cpu)
56 {
57         if (tlb_type != hypervisor)
58                 return;
59
60         clear_thread_flag(TIF_POLLING_NRFLAG);
61         smp_mb__after_clear_bit();
62
63         while (!need_resched() && !cpu_is_offline(cpu)) {
64                 unsigned long pstate;
65
66                 /* Disable interrupts. */
67                 __asm__ __volatile__(
68                         "rdpr %%pstate, %0\n\t"
69                         "andn %0, %1, %0\n\t"
70                         "wrpr %0, %%g0, %%pstate"
71                         : "=&r" (pstate)
72                         : "i" (PSTATE_IE));
73
74                 if (!need_resched() && !cpu_is_offline(cpu))
75                         sun4v_cpu_yield();
76
77                 /* Re-enable interrupts. */
78                 __asm__ __volatile__(
79                         "rdpr %%pstate, %0\n\t"
80                         "or %0, %1, %0\n\t"
81                         "wrpr %0, %%g0, %%pstate"
82                         : "=&r" (pstate)
83                         : "i" (PSTATE_IE));
84         }
85
86         set_thread_flag(TIF_POLLING_NRFLAG);
87 }
88
89 /* The idle loop on sparc64. */
90 void cpu_idle(void)
91 {
92         int cpu = smp_processor_id();
93
94         set_thread_flag(TIF_POLLING_NRFLAG);
95
96         while(1) {
97                 tick_nohz_stop_sched_tick();
98
99                 while (!need_resched() && !cpu_is_offline(cpu))
100                         sparc64_yield(cpu);
101
102                 tick_nohz_restart_sched_tick();
103
104                 preempt_enable_no_resched();
105
106 #ifdef CONFIG_HOTPLUG_CPU
107                 if (cpu_is_offline(cpu))
108                         cpu_play_dead();
109 #endif
110
111                 schedule();
112                 preempt_disable();
113         }
114 }
115
116 void machine_halt(void)
117 {
118         sstate_halt();
119         prom_halt();
120         panic("Halt failed!");
121 }
122
123 void machine_alt_power_off(void)
124 {
125         sstate_poweroff();
126         prom_halt_power_off();
127         panic("Power-off failed!");
128 }
129
130 void machine_restart(char * cmd)
131 {
132         char *p;
133         
134         sstate_reboot();
135         p = strchr (reboot_command, '\n');
136         if (p) *p = 0;
137         if (cmd)
138                 prom_reboot(cmd);
139         if (*reboot_command)
140                 prom_reboot(reboot_command);
141         prom_reboot("");
142         panic("Reboot failed!");
143 }
144
145 #ifdef CONFIG_COMPAT
146 static void show_regwindow32(struct pt_regs *regs)
147 {
148         struct reg_window32 __user *rw;
149         struct reg_window32 r_w;
150         mm_segment_t old_fs;
151         
152         __asm__ __volatile__ ("flushw");
153         rw = compat_ptr((unsigned)regs->u_regs[14]);
154         old_fs = get_fs();
155         set_fs (USER_DS);
156         if (copy_from_user (&r_w, rw, sizeof(r_w))) {
157                 set_fs (old_fs);
158                 return;
159         }
160
161         set_fs (old_fs);                        
162         printk("l0: %08x l1: %08x l2: %08x l3: %08x "
163                "l4: %08x l5: %08x l6: %08x l7: %08x\n",
164                r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
165                r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
166         printk("i0: %08x i1: %08x i2: %08x i3: %08x "
167                "i4: %08x i5: %08x i6: %08x i7: %08x\n",
168                r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
169                r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
170 }
171 #else
172 #define show_regwindow32(regs)  do { } while (0)
173 #endif
174
175 static void show_regwindow(struct pt_regs *regs)
176 {
177         struct reg_window __user *rw;
178         struct reg_window *rwk;
179         struct reg_window r_w;
180         mm_segment_t old_fs;
181
182         if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
183                 __asm__ __volatile__ ("flushw");
184                 rw = (struct reg_window __user *)
185                         (regs->u_regs[14] + STACK_BIAS);
186                 rwk = (struct reg_window *)
187                         (regs->u_regs[14] + STACK_BIAS);
188                 if (!(regs->tstate & TSTATE_PRIV)) {
189                         old_fs = get_fs();
190                         set_fs (USER_DS);
191                         if (copy_from_user (&r_w, rw, sizeof(r_w))) {
192                                 set_fs (old_fs);
193                                 return;
194                         }
195                         rwk = &r_w;
196                         set_fs (old_fs);                        
197                 }
198         } else {
199                 show_regwindow32(regs);
200                 return;
201         }
202         printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
203                rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
204         printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
205                rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
206         printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
207                rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
208         printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
209                rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
210         if (regs->tstate & TSTATE_PRIV)
211                 print_symbol("I7: <%s>\n", rwk->ins[7]);
212 }
213
214 #ifdef CONFIG_SMP
215 static DEFINE_SPINLOCK(regdump_lock);
216 #endif
217
218 void __show_regs(struct pt_regs * regs)
219 {
220 #ifdef CONFIG_SMP
221         unsigned long flags;
222
223         /* Protect against xcall ipis which might lead to livelock on the lock */
224         __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
225                              "wrpr      %0, %1, %%pstate"
226                              : "=r" (flags)
227                              : "i" (PSTATE_IE));
228         spin_lock(&regdump_lock);
229 #endif
230         printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
231                regs->tpc, regs->tnpc, regs->y, print_tainted());
232         print_symbol("TPC: <%s>\n", regs->tpc);
233         printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
234                regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
235                regs->u_regs[3]);
236         printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
237                regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
238                regs->u_regs[7]);
239         printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
240                regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
241                regs->u_regs[11]);
242         printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
243                regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
244                regs->u_regs[15]);
245         print_symbol("RPC: <%s>\n", regs->u_regs[15]);
246         show_regwindow(regs);
247 #ifdef CONFIG_SMP
248         spin_unlock(&regdump_lock);
249         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
250                              : : "r" (flags));
251 #endif
252 }
253
254 #ifdef VERBOSE_SHOWREGS
255 static void idump_from_user (unsigned int *pc)
256 {
257         int i;
258         int code;
259         
260         if((((unsigned long) pc) & 3))
261                 return;
262         
263         pc -= 3;
264         for(i = -3; i < 6; i++) {
265                 get_user(code, pc);
266                 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
267                 pc++;
268         }
269         printk("\n");
270 }
271 #endif
272
273 void show_regs(struct pt_regs *regs)
274 {
275 #ifdef VERBOSE_SHOWREGS
276         extern long etrap, etraptl1;
277 #endif
278         __show_regs(regs);
279 #if 0
280 #ifdef CONFIG_SMP
281         {
282                 extern void smp_report_regs(void);
283
284                 smp_report_regs();
285         }
286 #endif
287 #endif
288
289 #ifdef VERBOSE_SHOWREGS 
290         if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
291             regs->u_regs[14] >= (long)current - PAGE_SIZE &&
292             regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
293                 printk ("*********parent**********\n");
294                 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
295                 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
296                 printk ("*********endpar**********\n");
297         }
298 #endif
299 }
300
301 unsigned long thread_saved_pc(struct task_struct *tsk)
302 {
303         struct thread_info *ti = task_thread_info(tsk);
304         unsigned long ret = 0xdeadbeefUL;
305         
306         if (ti && ti->ksp) {
307                 unsigned long *sp;
308                 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
309                 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
310                     sp[14]) {
311                         unsigned long *fp;
312                         fp = (unsigned long *)(sp[14] + STACK_BIAS);
313                         if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
314                                 ret = fp[15];
315                 }
316         }
317         return ret;
318 }
319
320 /* Free current thread data structures etc.. */
321 void exit_thread(void)
322 {
323         struct thread_info *t = current_thread_info();
324
325         if (t->utraps) {
326                 if (t->utraps[0] < 2)
327                         kfree (t->utraps);
328                 else
329                         t->utraps[0]--;
330         }
331
332         if (test_and_clear_thread_flag(TIF_PERFCTR)) {
333                 t->user_cntd0 = t->user_cntd1 = NULL;
334                 t->pcr_reg = 0;
335                 write_pcr(0);
336         }
337 }
338
339 void flush_thread(void)
340 {
341         struct thread_info *t = current_thread_info();
342         struct mm_struct *mm;
343
344         if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
345                 clear_ti_thread_flag(t, TIF_ABI_PENDING);
346                 if (test_ti_thread_flag(t, TIF_32BIT))
347                         clear_ti_thread_flag(t, TIF_32BIT);
348                 else
349                         set_ti_thread_flag(t, TIF_32BIT);
350         }
351
352         mm = t->task->mm;
353         if (mm)
354                 tsb_context_switch(mm);
355
356         set_thread_wsaved(0);
357
358         /* Turn off performance counters if on. */
359         if (test_and_clear_thread_flag(TIF_PERFCTR)) {
360                 t->user_cntd0 = t->user_cntd1 = NULL;
361                 t->pcr_reg = 0;
362                 write_pcr(0);
363         }
364
365         /* Clear FPU register state. */
366         t->fpsaved[0] = 0;
367         
368         if (get_thread_current_ds() != ASI_AIUS)
369                 set_fs(USER_DS);
370 }
371
372 /* It's a bit more tricky when 64-bit tasks are involved... */
373 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
374 {
375         unsigned long fp, distance, rval;
376
377         if (!(test_thread_flag(TIF_32BIT))) {
378                 csp += STACK_BIAS;
379                 psp += STACK_BIAS;
380                 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
381                 fp += STACK_BIAS;
382         } else
383                 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
384
385         /* Now 8-byte align the stack as this is mandatory in the
386          * Sparc ABI due to how register windows work.  This hides
387          * the restriction from thread libraries etc.  -DaveM
388          */
389         csp &= ~7UL;
390
391         distance = fp - psp;
392         rval = (csp - distance);
393         if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
394                 rval = 0;
395         else if (test_thread_flag(TIF_32BIT)) {
396                 if (put_user(((u32)csp),
397                              &(((struct reg_window32 __user *)rval)->ins[6])))
398                         rval = 0;
399         } else {
400                 if (put_user(((u64)csp - STACK_BIAS),
401                              &(((struct reg_window __user *)rval)->ins[6])))
402                         rval = 0;
403                 else
404                         rval = rval - STACK_BIAS;
405         }
406
407         return rval;
408 }
409
410 /* Standard stuff. */
411 static inline void shift_window_buffer(int first_win, int last_win,
412                                        struct thread_info *t)
413 {
414         int i;
415
416         for (i = first_win; i < last_win; i++) {
417                 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
418                 memcpy(&t->reg_window[i], &t->reg_window[i+1],
419                        sizeof(struct reg_window));
420         }
421 }
422
423 void synchronize_user_stack(void)
424 {
425         struct thread_info *t = current_thread_info();
426         unsigned long window;
427
428         flush_user_windows();
429         if ((window = get_thread_wsaved()) != 0) {
430                 int winsize = sizeof(struct reg_window);
431                 int bias = 0;
432
433                 if (test_thread_flag(TIF_32BIT))
434                         winsize = sizeof(struct reg_window32);
435                 else
436                         bias = STACK_BIAS;
437
438                 window -= 1;
439                 do {
440                         unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
441                         struct reg_window *rwin = &t->reg_window[window];
442
443                         if (!copy_to_user((char __user *)sp, rwin, winsize)) {
444                                 shift_window_buffer(window, get_thread_wsaved() - 1, t);
445                                 set_thread_wsaved(get_thread_wsaved() - 1);
446                         }
447                 } while (window--);
448         }
449 }
450
451 static void stack_unaligned(unsigned long sp)
452 {
453         siginfo_t info;
454
455         info.si_signo = SIGBUS;
456         info.si_errno = 0;
457         info.si_code = BUS_ADRALN;
458         info.si_addr = (void __user *) sp;
459         info.si_trapno = 0;
460         force_sig_info(SIGBUS, &info, current);
461 }
462
463 void fault_in_user_windows(void)
464 {
465         struct thread_info *t = current_thread_info();
466         unsigned long window;
467         int winsize = sizeof(struct reg_window);
468         int bias = 0;
469
470         if (test_thread_flag(TIF_32BIT))
471                 winsize = sizeof(struct reg_window32);
472         else
473                 bias = STACK_BIAS;
474
475         flush_user_windows();
476         window = get_thread_wsaved();
477
478         if (likely(window != 0)) {
479                 window -= 1;
480                 do {
481                         unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
482                         struct reg_window *rwin = &t->reg_window[window];
483
484                         if (unlikely(sp & 0x7UL))
485                                 stack_unaligned(sp);
486
487                         if (unlikely(copy_to_user((char __user *)sp,
488                                                   rwin, winsize)))
489                                 goto barf;
490                 } while (window--);
491         }
492         set_thread_wsaved(0);
493         return;
494
495 barf:
496         set_thread_wsaved(window + 1);
497         do_exit(SIGILL);
498 }
499
500 asmlinkage long sparc_do_fork(unsigned long clone_flags,
501                               unsigned long stack_start,
502                               struct pt_regs *regs,
503                               unsigned long stack_size)
504 {
505         int __user *parent_tid_ptr, *child_tid_ptr;
506
507 #ifdef CONFIG_COMPAT
508         if (test_thread_flag(TIF_32BIT)) {
509                 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
510                 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
511         } else
512 #endif
513         {
514                 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
515                 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
516         }
517
518         return do_fork(clone_flags, stack_start,
519                        regs, stack_size,
520                        parent_tid_ptr, child_tid_ptr);
521 }
522
523 /* Copy a Sparc thread.  The fork() return value conventions
524  * under SunOS are nothing short of bletcherous:
525  * Parent -->  %o0 == childs  pid, %o1 == 0
526  * Child  -->  %o0 == parents pid, %o1 == 1
527  */
528 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
529                 unsigned long unused,
530                 struct task_struct *p, struct pt_regs *regs)
531 {
532         struct thread_info *t = task_thread_info(p);
533         char *child_trap_frame;
534
535         /* Calculate offset to stack_frame & pt_regs */
536         child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
537         memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
538
539         t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
540                 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
541         t->new_child = 1;
542         t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
543         t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
544         t->fpsaved[0] = 0;
545
546         if (regs->tstate & TSTATE_PRIV) {
547                 /* Special case, if we are spawning a kernel thread from
548                  * a userspace task (via KMOD, NFS, or similar) we must
549                  * disable performance counters in the child because the
550                  * address space and protection realm are changing.
551                  */
552                 if (t->flags & _TIF_PERFCTR) {
553                         t->user_cntd0 = t->user_cntd1 = NULL;
554                         t->pcr_reg = 0;
555                         t->flags &= ~_TIF_PERFCTR;
556                 }
557                 t->kregs->u_regs[UREG_FP] = t->ksp;
558                 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
559                 flush_register_windows();
560                 memcpy((void *)(t->ksp + STACK_BIAS),
561                        (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
562                        sizeof(struct sparc_stackf));
563                 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
564                 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
565         } else {
566                 if (t->flags & _TIF_32BIT) {
567                         sp &= 0x00000000ffffffffUL;
568                         regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
569                 }
570                 t->kregs->u_regs[UREG_FP] = sp;
571                 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
572                 if (sp != regs->u_regs[UREG_FP]) {
573                         unsigned long csp;
574
575                         csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
576                         if (!csp)
577                                 return -EFAULT;
578                         t->kregs->u_regs[UREG_FP] = csp;
579                 }
580                 if (t->utraps)
581                         t->utraps[0]++;
582         }
583
584         /* Set the return value for the child. */
585         t->kregs->u_regs[UREG_I0] = current->pid;
586         t->kregs->u_regs[UREG_I1] = 1;
587
588         /* Set the second return value for the parent. */
589         regs->u_regs[UREG_I1] = 0;
590
591         if (clone_flags & CLONE_SETTLS)
592                 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
593
594         return 0;
595 }
596
597 /*
598  * This is the mechanism for creating a new kernel thread.
599  *
600  * NOTE! Only a kernel-only process(ie the swapper or direct descendants
601  * who haven't done an "execve()") should use this: it will work within
602  * a system call from a "real" process, but the process memory space will
603  * not be freed until both the parent and the child have exited.
604  */
605 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
606 {
607         long retval;
608
609         /* If the parent runs before fn(arg) is called by the child,
610          * the input registers of this function can be clobbered.
611          * So we stash 'fn' and 'arg' into global registers which
612          * will not be modified by the parent.
613          */
614         __asm__ __volatile__("mov %4, %%g2\n\t"    /* Save FN into global */
615                              "mov %5, %%g3\n\t"    /* Save ARG into global */
616                              "mov %1, %%g1\n\t"    /* Clone syscall nr. */
617                              "mov %2, %%o0\n\t"    /* Clone flags. */
618                              "mov 0, %%o1\n\t"     /* usp arg == 0 */
619                              "t 0x6d\n\t"          /* Linux/Sparc clone(). */
620                              "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
621                              " mov %%o0, %0\n\t"
622                              "jmpl %%g2, %%o7\n\t"   /* Call the function. */
623                              " mov %%g3, %%o0\n\t"   /* Set arg in delay. */
624                              "mov %3, %%g1\n\t"
625                              "t 0x6d\n\t"          /* Linux/Sparc exit(). */
626                              /* Notreached by child. */
627                              "1:" :
628                              "=r" (retval) :
629                              "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
630                              "i" (__NR_exit),  "r" (fn), "r" (arg) :
631                              "g1", "g2", "g3", "o0", "o1", "memory", "cc");
632         return retval;
633 }
634
635 typedef struct {
636         union {
637                 unsigned int    pr_regs[32];
638                 unsigned long   pr_dregs[16];
639         } pr_fr;
640         unsigned int __unused;
641         unsigned int    pr_fsr;
642         unsigned char   pr_qcnt;
643         unsigned char   pr_q_entrysize;
644         unsigned char   pr_en;
645         unsigned int    pr_q[64];
646 } elf_fpregset_t32;
647
648 /*
649  * fill in the fpu structure for a core dump.
650  */
651 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
652 {
653         unsigned long *kfpregs = current_thread_info()->fpregs;
654         unsigned long fprs = current_thread_info()->fpsaved[0];
655
656         if (test_thread_flag(TIF_32BIT)) {
657                 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
658
659                 if (fprs & FPRS_DL)
660                         memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
661                                sizeof(unsigned int) * 32);
662                 else
663                         memset(&fpregs32->pr_fr.pr_regs[0], 0,
664                                sizeof(unsigned int) * 32);
665                 fpregs32->pr_qcnt = 0;
666                 fpregs32->pr_q_entrysize = 8;
667                 memset(&fpregs32->pr_q[0], 0,
668                        (sizeof(unsigned int) * 64));
669                 if (fprs & FPRS_FEF) {
670                         fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
671                         fpregs32->pr_en = 1;
672                 } else {
673                         fpregs32->pr_fsr = 0;
674                         fpregs32->pr_en = 0;
675                 }
676         } else {
677                 if(fprs & FPRS_DL)
678                         memcpy(&fpregs->pr_regs[0], kfpregs,
679                                sizeof(unsigned int) * 32);
680                 else
681                         memset(&fpregs->pr_regs[0], 0,
682                                sizeof(unsigned int) * 32);
683                 if(fprs & FPRS_DU)
684                         memcpy(&fpregs->pr_regs[16], kfpregs+16,
685                                sizeof(unsigned int) * 32);
686                 else
687                         memset(&fpregs->pr_regs[16], 0,
688                                sizeof(unsigned int) * 32);
689                 if(fprs & FPRS_FEF) {
690                         fpregs->pr_fsr = current_thread_info()->xfsr[0];
691                         fpregs->pr_gsr = current_thread_info()->gsr[0];
692                 } else {
693                         fpregs->pr_fsr = fpregs->pr_gsr = 0;
694                 }
695                 fpregs->pr_fprs = fprs;
696         }
697         return 1;
698 }
699
700 /*
701  * sparc_execve() executes a new program after the asm stub has set
702  * things up for us.  This should basically do what I want it to.
703  */
704 asmlinkage int sparc_execve(struct pt_regs *regs)
705 {
706         int error, base = 0;
707         char *filename;
708
709         /* User register window flush is done by entry.S */
710
711         /* Check for indirect call. */
712         if (regs->u_regs[UREG_G1] == 0)
713                 base = 1;
714
715         filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
716         error = PTR_ERR(filename);
717         if (IS_ERR(filename))
718                 goto out;
719         error = do_execve(filename,
720                           (char __user * __user *)
721                           regs->u_regs[base + UREG_I1],
722                           (char __user * __user *)
723                           regs->u_regs[base + UREG_I2], regs);
724         putname(filename);
725         if (!error) {
726                 fprs_write(0);
727                 current_thread_info()->xfsr[0] = 0;
728                 current_thread_info()->fpsaved[0] = 0;
729                 regs->tstate &= ~TSTATE_PEF;
730         }
731 out:
732         return error;
733 }
734
735 unsigned long get_wchan(struct task_struct *task)
736 {
737         unsigned long pc, fp, bias = 0;
738         unsigned long thread_info_base;
739         struct reg_window *rw;
740         unsigned long ret = 0;
741         int count = 0; 
742
743         if (!task || task == current ||
744             task->state == TASK_RUNNING)
745                 goto out;
746
747         thread_info_base = (unsigned long) task_stack_page(task);
748         bias = STACK_BIAS;
749         fp = task_thread_info(task)->ksp + bias;
750
751         do {
752                 /* Bogus frame pointer? */
753                 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
754                     fp >= (thread_info_base + THREAD_SIZE))
755                         break;
756                 rw = (struct reg_window *) fp;
757                 pc = rw->ins[7];
758                 if (!in_sched_functions(pc)) {
759                         ret = pc;
760                         goto out;
761                 }
762                 fp = rw->ins[6] + bias;
763         } while (++count < 16);
764
765 out:
766         return ret;
767 }