Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[linux-2.6] / arch / powerpc / kernel / process.c
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/module.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36
37 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/io.h>
41 #include <asm/processor.h>
42 #include <asm/mmu.h>
43 #include <asm/prom.h>
44 #include <asm/machdep.h>
45 #include <asm/time.h>
46 #include <asm/syscalls.h>
47 #ifdef CONFIG_PPC64
48 #include <asm/firmware.h>
49 #endif
50
51 extern unsigned long _get_SP(void);
52
53 #ifndef CONFIG_SMP
54 struct task_struct *last_task_used_math = NULL;
55 struct task_struct *last_task_used_altivec = NULL;
56 struct task_struct *last_task_used_spe = NULL;
57 #endif
58
59 /*
60  * Make sure the floating-point register state in the
61  * the thread_struct is up to date for task tsk.
62  */
63 void flush_fp_to_thread(struct task_struct *tsk)
64 {
65         if (tsk->thread.regs) {
66                 /*
67                  * We need to disable preemption here because if we didn't,
68                  * another process could get scheduled after the regs->msr
69                  * test but before we have finished saving the FP registers
70                  * to the thread_struct.  That process could take over the
71                  * FPU, and then when we get scheduled again we would store
72                  * bogus values for the remaining FP registers.
73                  */
74                 preempt_disable();
75                 if (tsk->thread.regs->msr & MSR_FP) {
76 #ifdef CONFIG_SMP
77                         /*
78                          * This should only ever be called for current or
79                          * for a stopped child process.  Since we save away
80                          * the FP register state on context switch on SMP,
81                          * there is something wrong if a stopped child appears
82                          * to still have its FP state in the CPU registers.
83                          */
84                         BUG_ON(tsk != current);
85 #endif
86                         giveup_fpu(tsk);
87                 }
88                 preempt_enable();
89         }
90 }
91
92 void enable_kernel_fp(void)
93 {
94         WARN_ON(preemptible());
95
96 #ifdef CONFIG_SMP
97         if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
98                 giveup_fpu(current);
99         else
100                 giveup_fpu(NULL);       /* just enables FP for kernel */
101 #else
102         giveup_fpu(last_task_used_math);
103 #endif /* CONFIG_SMP */
104 }
105 EXPORT_SYMBOL(enable_kernel_fp);
106
107 int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
108 {
109         if (!tsk->thread.regs)
110                 return 0;
111         flush_fp_to_thread(current);
112
113         memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
114
115         return 1;
116 }
117
118 #ifdef CONFIG_ALTIVEC
119 void enable_kernel_altivec(void)
120 {
121         WARN_ON(preemptible());
122
123 #ifdef CONFIG_SMP
124         if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125                 giveup_altivec(current);
126         else
127                 giveup_altivec(NULL);   /* just enable AltiVec for kernel - force */
128 #else
129         giveup_altivec(last_task_used_altivec);
130 #endif /* CONFIG_SMP */
131 }
132 EXPORT_SYMBOL(enable_kernel_altivec);
133
134 /*
135  * Make sure the VMX/Altivec register state in the
136  * the thread_struct is up to date for task tsk.
137  */
138 void flush_altivec_to_thread(struct task_struct *tsk)
139 {
140         if (tsk->thread.regs) {
141                 preempt_disable();
142                 if (tsk->thread.regs->msr & MSR_VEC) {
143 #ifdef CONFIG_SMP
144                         BUG_ON(tsk != current);
145 #endif
146                         giveup_altivec(tsk);
147                 }
148                 preempt_enable();
149         }
150 }
151
152 int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
153 {
154         /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
155          * separately, see below */
156         const int nregs = ELF_NVRREG - 2;
157         elf_vrreg_t *reg;
158         u32 *dest;
159
160         if (tsk == current)
161                 flush_altivec_to_thread(tsk);
162
163         reg = (elf_vrreg_t *)vrregs;
164
165         /* copy the 32 vr registers */
166         memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg));
167         reg += nregs;
168
169         /* copy the vscr */
170         memcpy(reg, &tsk->thread.vscr, sizeof(*reg));
171         reg++;
172
173         /* vrsave is stored in the high 32bit slot of the final 128bits */
174         memset(reg, 0, sizeof(*reg));
175         dest = (u32 *)reg;
176         *dest = tsk->thread.vrsave;
177
178         return 1;
179 }
180 #endif /* CONFIG_ALTIVEC */
181
182 #ifdef CONFIG_SPE
183
184 void enable_kernel_spe(void)
185 {
186         WARN_ON(preemptible());
187
188 #ifdef CONFIG_SMP
189         if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
190                 giveup_spe(current);
191         else
192                 giveup_spe(NULL);       /* just enable SPE for kernel - force */
193 #else
194         giveup_spe(last_task_used_spe);
195 #endif /* __SMP __ */
196 }
197 EXPORT_SYMBOL(enable_kernel_spe);
198
199 void flush_spe_to_thread(struct task_struct *tsk)
200 {
201         if (tsk->thread.regs) {
202                 preempt_disable();
203                 if (tsk->thread.regs->msr & MSR_SPE) {
204 #ifdef CONFIG_SMP
205                         BUG_ON(tsk != current);
206 #endif
207                         giveup_spe(tsk);
208                 }
209                 preempt_enable();
210         }
211 }
212
213 int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
214 {
215         flush_spe_to_thread(current);
216         /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
217         memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
218         return 1;
219 }
220 #endif /* CONFIG_SPE */
221
222 #ifndef CONFIG_SMP
223 /*
224  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
225  * and the current task has some state, discard it.
226  */
227 void discard_lazy_cpu_state(void)
228 {
229         preempt_disable();
230         if (last_task_used_math == current)
231                 last_task_used_math = NULL;
232 #ifdef CONFIG_ALTIVEC
233         if (last_task_used_altivec == current)
234                 last_task_used_altivec = NULL;
235 #endif /* CONFIG_ALTIVEC */
236 #ifdef CONFIG_SPE
237         if (last_task_used_spe == current)
238                 last_task_used_spe = NULL;
239 #endif
240         preempt_enable();
241 }
242 #endif /* CONFIG_SMP */
243
244 static DEFINE_PER_CPU(unsigned long, current_dabr);
245
246 int set_dabr(unsigned long dabr)
247 {
248         __get_cpu_var(current_dabr) = dabr;
249
250 #ifdef CONFIG_PPC_MERGE         /* XXX for now */
251         if (ppc_md.set_dabr)
252                 return ppc_md.set_dabr(dabr);
253 #endif
254
255         /* XXX should we have a CPU_FTR_HAS_DABR ? */
256 #if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
257         mtspr(SPRN_DABR, dabr);
258 #endif
259         return 0;
260 }
261
262 #ifdef CONFIG_PPC64
263 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
264 #endif
265
266 struct task_struct *__switch_to(struct task_struct *prev,
267         struct task_struct *new)
268 {
269         struct thread_struct *new_thread, *old_thread;
270         unsigned long flags;
271         struct task_struct *last;
272
273 #ifdef CONFIG_SMP
274         /* avoid complexity of lazy save/restore of fpu
275          * by just saving it every time we switch out if
276          * this task used the fpu during the last quantum.
277          *
278          * If it tries to use the fpu again, it'll trap and
279          * reload its fp regs.  So we don't have to do a restore
280          * every switch, just a save.
281          *  -- Cort
282          */
283         if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
284                 giveup_fpu(prev);
285 #ifdef CONFIG_ALTIVEC
286         /*
287          * If the previous thread used altivec in the last quantum
288          * (thus changing altivec regs) then save them.
289          * We used to check the VRSAVE register but not all apps
290          * set it, so we don't rely on it now (and in fact we need
291          * to save & restore VSCR even if VRSAVE == 0).  -- paulus
292          *
293          * On SMP we always save/restore altivec regs just to avoid the
294          * complexity of changing processors.
295          *  -- Cort
296          */
297         if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
298                 giveup_altivec(prev);
299 #endif /* CONFIG_ALTIVEC */
300 #ifdef CONFIG_SPE
301         /*
302          * If the previous thread used spe in the last quantum
303          * (thus changing spe regs) then save them.
304          *
305          * On SMP we always save/restore spe regs just to avoid the
306          * complexity of changing processors.
307          */
308         if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
309                 giveup_spe(prev);
310 #endif /* CONFIG_SPE */
311
312 #else  /* CONFIG_SMP */
313 #ifdef CONFIG_ALTIVEC
314         /* Avoid the trap.  On smp this this never happens since
315          * we don't set last_task_used_altivec -- Cort
316          */
317         if (new->thread.regs && last_task_used_altivec == new)
318                 new->thread.regs->msr |= MSR_VEC;
319 #endif /* CONFIG_ALTIVEC */
320 #ifdef CONFIG_SPE
321         /* Avoid the trap.  On smp this this never happens since
322          * we don't set last_task_used_spe
323          */
324         if (new->thread.regs && last_task_used_spe == new)
325                 new->thread.regs->msr |= MSR_SPE;
326 #endif /* CONFIG_SPE */
327
328 #endif /* CONFIG_SMP */
329
330         if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
331                 set_dabr(new->thread.dabr);
332
333         new_thread = &new->thread;
334         old_thread = &current->thread;
335
336 #ifdef CONFIG_PPC64
337         /*
338          * Collect processor utilization data per process
339          */
340         if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
341                 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
342                 long unsigned start_tb, current_tb;
343                 start_tb = old_thread->start_tb;
344                 cu->current_tb = current_tb = mfspr(SPRN_PURR);
345                 old_thread->accum_tb += (current_tb - start_tb);
346                 new_thread->start_tb = current_tb;
347         }
348 #endif
349
350         local_irq_save(flags);
351
352         account_system_vtime(current);
353         account_process_vtime(current);
354         calculate_steal_time();
355
356         /*
357          * We can't take a PMU exception inside _switch() since there is a
358          * window where the kernel stack SLB and the kernel stack are out
359          * of sync. Hard disable here.
360          */
361         hard_irq_disable();
362         last = _switch(old_thread, new_thread);
363
364         local_irq_restore(flags);
365
366         return last;
367 }
368
369 static int instructions_to_print = 16;
370
371 static void show_instructions(struct pt_regs *regs)
372 {
373         int i;
374         unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
375                         sizeof(int));
376
377         printk("Instruction dump:");
378
379         for (i = 0; i < instructions_to_print; i++) {
380                 int instr;
381
382                 if (!(i % 8))
383                         printk("\n");
384
385 #if !defined(CONFIG_BOOKE)
386                 /* If executing with the IMMU off, adjust pc rather
387                  * than print XXXXXXXX.
388                  */
389                 if (!(regs->msr & MSR_IR))
390                         pc = (unsigned long)phys_to_virt(pc);
391 #endif
392
393                 /* We use __get_user here *only* to avoid an OOPS on a
394                  * bad address because the pc *should* only be a
395                  * kernel address.
396                  */
397                 if (!__kernel_text_address(pc) ||
398                      __get_user(instr, (unsigned int __user *)pc)) {
399                         printk("XXXXXXXX ");
400                 } else {
401                         if (regs->nip == pc)
402                                 printk("<%08x> ", instr);
403                         else
404                                 printk("%08x ", instr);
405                 }
406
407                 pc += sizeof(int);
408         }
409
410         printk("\n");
411 }
412
413 static struct regbit {
414         unsigned long bit;
415         const char *name;
416 } msr_bits[] = {
417         {MSR_EE,        "EE"},
418         {MSR_PR,        "PR"},
419         {MSR_FP,        "FP"},
420         {MSR_ME,        "ME"},
421         {MSR_IR,        "IR"},
422         {MSR_DR,        "DR"},
423         {0,             NULL}
424 };
425
426 static void printbits(unsigned long val, struct regbit *bits)
427 {
428         const char *sep = "";
429
430         printk("<");
431         for (; bits->bit; ++bits)
432                 if (val & bits->bit) {
433                         printk("%s%s", sep, bits->name);
434                         sep = ",";
435                 }
436         printk(">");
437 }
438
439 #ifdef CONFIG_PPC64
440 #define REG             "%016lx"
441 #define REGS_PER_LINE   4
442 #define LAST_VOLATILE   13
443 #else
444 #define REG             "%08lx"
445 #define REGS_PER_LINE   8
446 #define LAST_VOLATILE   12
447 #endif
448
449 void show_regs(struct pt_regs * regs)
450 {
451         int i, trap;
452
453         printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
454                regs->nip, regs->link, regs->ctr);
455         printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
456                regs, regs->trap, print_tainted(), init_utsname()->release);
457         printk("MSR: "REG" ", regs->msr);
458         printbits(regs->msr, msr_bits);
459         printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
460         trap = TRAP(regs);
461         if (trap == 0x300 || trap == 0x600)
462 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
463                 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
464 #else
465                 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
466 #endif
467         printk("TASK = %p[%d] '%s' THREAD: %p",
468                current, task_pid_nr(current), current->comm, task_thread_info(current));
469
470 #ifdef CONFIG_SMP
471         printk(" CPU: %d", raw_smp_processor_id());
472 #endif /* CONFIG_SMP */
473
474         for (i = 0;  i < 32;  i++) {
475                 if ((i % REGS_PER_LINE) == 0)
476                         printk("\n" KERN_INFO "GPR%02d: ", i);
477                 printk(REG " ", regs->gpr[i]);
478                 if (i == LAST_VOLATILE && !FULL_REGS(regs))
479                         break;
480         }
481         printk("\n");
482 #ifdef CONFIG_KALLSYMS
483         /*
484          * Lookup NIP late so we have the best change of getting the
485          * above info out without failing
486          */
487         printk("NIP ["REG"] ", regs->nip);
488         print_symbol("%s\n", regs->nip);
489         printk("LR ["REG"] ", regs->link);
490         print_symbol("%s\n", regs->link);
491 #endif
492         show_stack(current, (unsigned long *) regs->gpr[1]);
493         if (!user_mode(regs))
494                 show_instructions(regs);
495 }
496
497 void exit_thread(void)
498 {
499         discard_lazy_cpu_state();
500 }
501
502 void flush_thread(void)
503 {
504 #ifdef CONFIG_PPC64
505         struct thread_info *t = current_thread_info();
506
507         if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
508                 clear_ti_thread_flag(t, TIF_ABI_PENDING);
509                 if (test_ti_thread_flag(t, TIF_32BIT))
510                         clear_ti_thread_flag(t, TIF_32BIT);
511                 else
512                         set_ti_thread_flag(t, TIF_32BIT);
513         }
514 #endif
515
516         discard_lazy_cpu_state();
517
518         if (current->thread.dabr) {
519                 current->thread.dabr = 0;
520                 set_dabr(0);
521         }
522 }
523
524 void
525 release_thread(struct task_struct *t)
526 {
527 }
528
529 /*
530  * This gets called before we allocate a new thread and copy
531  * the current task into it.
532  */
533 void prepare_to_copy(struct task_struct *tsk)
534 {
535         flush_fp_to_thread(current);
536         flush_altivec_to_thread(current);
537         flush_spe_to_thread(current);
538 }
539
540 /*
541  * Copy a thread..
542  */
543 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
544                 unsigned long unused, struct task_struct *p,
545                 struct pt_regs *regs)
546 {
547         struct pt_regs *childregs, *kregs;
548         extern void ret_from_fork(void);
549         unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
550
551         CHECK_FULL_REGS(regs);
552         /* Copy registers */
553         sp -= sizeof(struct pt_regs);
554         childregs = (struct pt_regs *) sp;
555         *childregs = *regs;
556         if ((childregs->msr & MSR_PR) == 0) {
557                 /* for kernel thread, set `current' and stackptr in new task */
558                 childregs->gpr[1] = sp + sizeof(struct pt_regs);
559 #ifdef CONFIG_PPC32
560                 childregs->gpr[2] = (unsigned long) p;
561 #else
562                 clear_tsk_thread_flag(p, TIF_32BIT);
563 #endif
564                 p->thread.regs = NULL;  /* no user register state */
565         } else {
566                 childregs->gpr[1] = usp;
567                 p->thread.regs = childregs;
568                 if (clone_flags & CLONE_SETTLS) {
569 #ifdef CONFIG_PPC64
570                         if (!test_thread_flag(TIF_32BIT))
571                                 childregs->gpr[13] = childregs->gpr[6];
572                         else
573 #endif
574                                 childregs->gpr[2] = childregs->gpr[6];
575                 }
576         }
577         childregs->gpr[3] = 0;  /* Result from fork() */
578         sp -= STACK_FRAME_OVERHEAD;
579
580         /*
581          * The way this works is that at some point in the future
582          * some task will call _switch to switch to the new task.
583          * That will pop off the stack frame created below and start
584          * the new task running at ret_from_fork.  The new task will
585          * do some house keeping and then return from the fork or clone
586          * system call, using the stack frame created above.
587          */
588         sp -= sizeof(struct pt_regs);
589         kregs = (struct pt_regs *) sp;
590         sp -= STACK_FRAME_OVERHEAD;
591         p->thread.ksp = sp;
592
593 #ifdef CONFIG_PPC64
594         if (cpu_has_feature(CPU_FTR_SLB)) {
595                 unsigned long sp_vsid;
596                 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
597
598                 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
599                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
600                                 << SLB_VSID_SHIFT_1T;
601                 else
602                         sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
603                                 << SLB_VSID_SHIFT;
604                 sp_vsid |= SLB_VSID_KERNEL | llp;
605                 p->thread.ksp_vsid = sp_vsid;
606         }
607
608         /*
609          * The PPC64 ABI makes use of a TOC to contain function 
610          * pointers.  The function (ret_from_except) is actually a pointer
611          * to the TOC entry.  The first entry is a pointer to the actual
612          * function.
613          */
614         kregs->nip = *((unsigned long *)ret_from_fork);
615 #else
616         kregs->nip = (unsigned long)ret_from_fork;
617 #endif
618
619         return 0;
620 }
621
622 /*
623  * Set up a thread for executing a new program
624  */
625 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
626 {
627 #ifdef CONFIG_PPC64
628         unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
629 #endif
630
631         set_fs(USER_DS);
632
633         /*
634          * If we exec out of a kernel thread then thread.regs will not be
635          * set.  Do it now.
636          */
637         if (!current->thread.regs) {
638                 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
639                 current->thread.regs = regs - 1;
640         }
641
642         memset(regs->gpr, 0, sizeof(regs->gpr));
643         regs->ctr = 0;
644         regs->link = 0;
645         regs->xer = 0;
646         regs->ccr = 0;
647         regs->gpr[1] = sp;
648
649         /*
650          * We have just cleared all the nonvolatile GPRs, so make
651          * FULL_REGS(regs) return true.  This is necessary to allow
652          * ptrace to examine the thread immediately after exec.
653          */
654         regs->trap &= ~1UL;
655
656 #ifdef CONFIG_PPC32
657         regs->mq = 0;
658         regs->nip = start;
659         regs->msr = MSR_USER;
660 #else
661         if (!test_thread_flag(TIF_32BIT)) {
662                 unsigned long entry, toc;
663
664                 /* start is a relocated pointer to the function descriptor for
665                  * the elf _start routine.  The first entry in the function
666                  * descriptor is the entry address of _start and the second
667                  * entry is the TOC value we need to use.
668                  */
669                 __get_user(entry, (unsigned long __user *)start);
670                 __get_user(toc, (unsigned long __user *)start+1);
671
672                 /* Check whether the e_entry function descriptor entries
673                  * need to be relocated before we can use them.
674                  */
675                 if (load_addr != 0) {
676                         entry += load_addr;
677                         toc   += load_addr;
678                 }
679                 regs->nip = entry;
680                 regs->gpr[2] = toc;
681                 regs->msr = MSR_USER64;
682         } else {
683                 regs->nip = start;
684                 regs->gpr[2] = 0;
685                 regs->msr = MSR_USER32;
686         }
687 #endif
688
689         discard_lazy_cpu_state();
690         memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
691         current->thread.fpscr.val = 0;
692 #ifdef CONFIG_ALTIVEC
693         memset(current->thread.vr, 0, sizeof(current->thread.vr));
694         memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
695         current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
696         current->thread.vrsave = 0;
697         current->thread.used_vr = 0;
698 #endif /* CONFIG_ALTIVEC */
699 #ifdef CONFIG_SPE
700         memset(current->thread.evr, 0, sizeof(current->thread.evr));
701         current->thread.acc = 0;
702         current->thread.spefscr = 0;
703         current->thread.used_spe = 0;
704 #endif /* CONFIG_SPE */
705 }
706
707 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
708                 | PR_FP_EXC_RES | PR_FP_EXC_INV)
709
710 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
711 {
712         struct pt_regs *regs = tsk->thread.regs;
713
714         /* This is a bit hairy.  If we are an SPE enabled  processor
715          * (have embedded fp) we store the IEEE exception enable flags in
716          * fpexc_mode.  fpexc_mode is also used for setting FP exception
717          * mode (asyn, precise, disabled) for 'Classic' FP. */
718         if (val & PR_FP_EXC_SW_ENABLE) {
719 #ifdef CONFIG_SPE
720                 if (cpu_has_feature(CPU_FTR_SPE)) {
721                         tsk->thread.fpexc_mode = val &
722                                 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
723                         return 0;
724                 } else {
725                         return -EINVAL;
726                 }
727 #else
728                 return -EINVAL;
729 #endif
730         }
731
732         /* on a CONFIG_SPE this does not hurt us.  The bits that
733          * __pack_fe01 use do not overlap with bits used for
734          * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
735          * on CONFIG_SPE implementations are reserved so writing to
736          * them does not change anything */
737         if (val > PR_FP_EXC_PRECISE)
738                 return -EINVAL;
739         tsk->thread.fpexc_mode = __pack_fe01(val);
740         if (regs != NULL && (regs->msr & MSR_FP) != 0)
741                 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
742                         | tsk->thread.fpexc_mode;
743         return 0;
744 }
745
746 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
747 {
748         unsigned int val;
749
750         if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
751 #ifdef CONFIG_SPE
752                 if (cpu_has_feature(CPU_FTR_SPE))
753                         val = tsk->thread.fpexc_mode;
754                 else
755                         return -EINVAL;
756 #else
757                 return -EINVAL;
758 #endif
759         else
760                 val = __unpack_fe01(tsk->thread.fpexc_mode);
761         return put_user(val, (unsigned int __user *) adr);
762 }
763
764 int set_endian(struct task_struct *tsk, unsigned int val)
765 {
766         struct pt_regs *regs = tsk->thread.regs;
767
768         if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
769             (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
770                 return -EINVAL;
771
772         if (regs == NULL)
773                 return -EINVAL;
774
775         if (val == PR_ENDIAN_BIG)
776                 regs->msr &= ~MSR_LE;
777         else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
778                 regs->msr |= MSR_LE;
779         else
780                 return -EINVAL;
781
782         return 0;
783 }
784
785 int get_endian(struct task_struct *tsk, unsigned long adr)
786 {
787         struct pt_regs *regs = tsk->thread.regs;
788         unsigned int val;
789
790         if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
791             !cpu_has_feature(CPU_FTR_REAL_LE))
792                 return -EINVAL;
793
794         if (regs == NULL)
795                 return -EINVAL;
796
797         if (regs->msr & MSR_LE) {
798                 if (cpu_has_feature(CPU_FTR_REAL_LE))
799                         val = PR_ENDIAN_LITTLE;
800                 else
801                         val = PR_ENDIAN_PPC_LITTLE;
802         } else
803                 val = PR_ENDIAN_BIG;
804
805         return put_user(val, (unsigned int __user *)adr);
806 }
807
808 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
809 {
810         tsk->thread.align_ctl = val;
811         return 0;
812 }
813
814 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
815 {
816         return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
817 }
818
819 #define TRUNC_PTR(x)    ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
820
821 int sys_clone(unsigned long clone_flags, unsigned long usp,
822               int __user *parent_tidp, void __user *child_threadptr,
823               int __user *child_tidp, int p6,
824               struct pt_regs *regs)
825 {
826         CHECK_FULL_REGS(regs);
827         if (usp == 0)
828                 usp = regs->gpr[1];     /* stack pointer for child */
829 #ifdef CONFIG_PPC64
830         if (test_thread_flag(TIF_32BIT)) {
831                 parent_tidp = TRUNC_PTR(parent_tidp);
832                 child_tidp = TRUNC_PTR(child_tidp);
833         }
834 #endif
835         return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
836 }
837
838 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
839              unsigned long p4, unsigned long p5, unsigned long p6,
840              struct pt_regs *regs)
841 {
842         CHECK_FULL_REGS(regs);
843         return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
844 }
845
846 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
847               unsigned long p4, unsigned long p5, unsigned long p6,
848               struct pt_regs *regs)
849 {
850         CHECK_FULL_REGS(regs);
851         return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
852                         regs, 0, NULL, NULL);
853 }
854
855 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
856                unsigned long a3, unsigned long a4, unsigned long a5,
857                struct pt_regs *regs)
858 {
859         int error;
860         char *filename;
861
862         filename = getname((char __user *) a0);
863         error = PTR_ERR(filename);
864         if (IS_ERR(filename))
865                 goto out;
866         flush_fp_to_thread(current);
867         flush_altivec_to_thread(current);
868         flush_spe_to_thread(current);
869         error = do_execve(filename, (char __user * __user *) a1,
870                           (char __user * __user *) a2, regs);
871         putname(filename);
872 out:
873         return error;
874 }
875
876 #ifdef CONFIG_IRQSTACKS
877 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
878                                   unsigned long nbytes)
879 {
880         unsigned long stack_page;
881         unsigned long cpu = task_cpu(p);
882
883         /*
884          * Avoid crashing if the stack has overflowed and corrupted
885          * task_cpu(p), which is in the thread_info struct.
886          */
887         if (cpu < NR_CPUS && cpu_possible(cpu)) {
888                 stack_page = (unsigned long) hardirq_ctx[cpu];
889                 if (sp >= stack_page + sizeof(struct thread_struct)
890                     && sp <= stack_page + THREAD_SIZE - nbytes)
891                         return 1;
892
893                 stack_page = (unsigned long) softirq_ctx[cpu];
894                 if (sp >= stack_page + sizeof(struct thread_struct)
895                     && sp <= stack_page + THREAD_SIZE - nbytes)
896                         return 1;
897         }
898         return 0;
899 }
900
901 #else
902 #define valid_irq_stack(sp, p, nb)      0
903 #endif /* CONFIG_IRQSTACKS */
904
905 int validate_sp(unsigned long sp, struct task_struct *p,
906                        unsigned long nbytes)
907 {
908         unsigned long stack_page = (unsigned long)task_stack_page(p);
909
910         if (sp >= stack_page + sizeof(struct thread_struct)
911             && sp <= stack_page + THREAD_SIZE - nbytes)
912                 return 1;
913
914         return valid_irq_stack(sp, p, nbytes);
915 }
916
917 EXPORT_SYMBOL(validate_sp);
918
919 unsigned long get_wchan(struct task_struct *p)
920 {
921         unsigned long ip, sp;
922         int count = 0;
923
924         if (!p || p == current || p->state == TASK_RUNNING)
925                 return 0;
926
927         sp = p->thread.ksp;
928         if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
929                 return 0;
930
931         do {
932                 sp = *(unsigned long *)sp;
933                 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
934                         return 0;
935                 if (count > 0) {
936                         ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
937                         if (!in_sched_functions(ip))
938                                 return ip;
939                 }
940         } while (count++ < 16);
941         return 0;
942 }
943
944 static int kstack_depth_to_print = 64;
945
946 void show_stack(struct task_struct *tsk, unsigned long *stack)
947 {
948         unsigned long sp, ip, lr, newsp;
949         int count = 0;
950         int firstframe = 1;
951
952         sp = (unsigned long) stack;
953         if (tsk == NULL)
954                 tsk = current;
955         if (sp == 0) {
956                 if (tsk == current)
957                         asm("mr %0,1" : "=r" (sp));
958                 else
959                         sp = tsk->thread.ksp;
960         }
961
962         lr = 0;
963         printk("Call Trace:\n");
964         do {
965                 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
966                         return;
967
968                 stack = (unsigned long *) sp;
969                 newsp = stack[0];
970                 ip = stack[STACK_FRAME_LR_SAVE];
971                 if (!firstframe || ip != lr) {
972                         printk("["REG"] ["REG"] ", sp, ip);
973                         print_symbol("%s", ip);
974                         if (firstframe)
975                                 printk(" (unreliable)");
976                         printk("\n");
977                 }
978                 firstframe = 0;
979
980                 /*
981                  * See if this is an exception frame.
982                  * We look for the "regshere" marker in the current frame.
983                  */
984                 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
985                     && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
986                         struct pt_regs *regs = (struct pt_regs *)
987                                 (sp + STACK_FRAME_OVERHEAD);
988                         printk("--- Exception: %lx", regs->trap);
989                         print_symbol(" at %s\n", regs->nip);
990                         lr = regs->link;
991                         print_symbol("    LR = %s\n", lr);
992                         firstframe = 1;
993                 }
994
995                 sp = newsp;
996         } while (count++ < kstack_depth_to_print);
997 }
998
999 void dump_stack(void)
1000 {
1001         show_stack(current, NULL);
1002 }
1003 EXPORT_SYMBOL(dump_stack);
1004
1005 #ifdef CONFIG_PPC64
1006 void ppc64_runlatch_on(void)
1007 {
1008         unsigned long ctrl;
1009
1010         if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1011                 HMT_medium();
1012
1013                 ctrl = mfspr(SPRN_CTRLF);
1014                 ctrl |= CTRL_RUNLATCH;
1015                 mtspr(SPRN_CTRLT, ctrl);
1016
1017                 set_thread_flag(TIF_RUNLATCH);
1018         }
1019 }
1020
1021 void ppc64_runlatch_off(void)
1022 {
1023         unsigned long ctrl;
1024
1025         if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
1026                 HMT_medium();
1027
1028                 clear_thread_flag(TIF_RUNLATCH);
1029
1030                 ctrl = mfspr(SPRN_CTRLF);
1031                 ctrl &= ~CTRL_RUNLATCH;
1032                 mtspr(SPRN_CTRLT, ctrl);
1033         }
1034 }
1035 #endif
1036
1037 #if THREAD_SHIFT < PAGE_SHIFT
1038
1039 static struct kmem_cache *thread_info_cache;
1040
1041 struct thread_info *alloc_thread_info(struct task_struct *tsk)
1042 {
1043         struct thread_info *ti;
1044
1045         ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
1046         if (unlikely(ti == NULL))
1047                 return NULL;
1048 #ifdef CONFIG_DEBUG_STACK_USAGE
1049         memset(ti, 0, THREAD_SIZE);
1050 #endif
1051         return ti;
1052 }
1053
1054 void free_thread_info(struct thread_info *ti)
1055 {
1056         kmem_cache_free(thread_info_cache, ti);
1057 }
1058
1059 void thread_info_cache_init(void)
1060 {
1061         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1062                                               THREAD_SIZE, 0, NULL);
1063         BUG_ON(thread_info_cache == NULL);
1064 }
1065
1066 #endif /* THREAD_SHIFT < PAGE_SHIFT */