2 * arch/ppc/kernel/process.c
4 * Derived from "arch/i386/kernel/process.c"
5 * Copyright (C) 1995 Linus Torvalds
7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8 * Paul Mackerras (paulus@cs.anu.edu.au)
11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/config.h>
21 #include <linux/errno.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
25 #include <linux/smp.h>
26 #include <linux/smp_lock.h>
27 #include <linux/stddef.h>
28 #include <linux/unistd.h>
29 #include <linux/ptrace.h>
30 #include <linux/slab.h>
31 #include <linux/user.h>
32 #include <linux/elf.h>
33 #include <linux/init.h>
34 #include <linux/prctl.h>
35 #include <linux/init_task.h>
36 #include <linux/module.h>
37 #include <linux/kallsyms.h>
38 #include <linux/mqueue.h>
39 #include <linux/hardirq.h>
41 #include <asm/pgtable.h>
42 #include <asm/uaccess.h>
43 #include <asm/system.h>
45 #include <asm/processor.h>
49 extern unsigned long _get_SP(void);
51 struct task_struct *last_task_used_math = NULL;
52 struct task_struct *last_task_used_altivec = NULL;
53 struct task_struct *last_task_used_spe = NULL;
55 static struct fs_struct init_fs = INIT_FS;
56 static struct files_struct init_files = INIT_FILES;
57 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
58 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
59 struct mm_struct init_mm = INIT_MM(init_mm);
60 EXPORT_SYMBOL(init_mm);
62 /* this is 8kB-aligned so we can get to the thread_info struct
63 at the base of it from the stack pointer with 1 integer instruction. */
64 union thread_union init_thread_union
65 __attribute__((__section__(".data.init_task"))) =
66 { INIT_THREAD_INFO(init_task) };
68 /* initial task structure */
69 struct task_struct init_task = INIT_TASK(init_task);
70 EXPORT_SYMBOL(init_task);
72 /* only used to get secondary processor up */
73 struct task_struct *current_set[NR_CPUS] = {&init_task, };
75 #undef SHOW_TASK_SWITCHES
78 #if defined(CHECK_STACK)
80 kernel_stack_top(struct task_struct *tsk)
82 return ((unsigned long)tsk) + sizeof(union task_union);
86 task_top(struct task_struct *tsk)
88 return ((unsigned long)tsk) + sizeof(struct thread_info);
91 /* check to make sure the kernel stack is healthy */
92 int check_stack(struct task_struct *tsk)
94 unsigned long stack_top = kernel_stack_top(tsk);
95 unsigned long tsk_top = task_top(tsk);
99 /* check thread magic */
100 if ( tsk->thread.magic != THREAD_MAGIC )
103 printk("thread.magic bad: %08x\n", tsk->thread.magic);
108 printk("check_stack(): tsk bad tsk %p\n",tsk);
110 /* check if stored ksp is bad */
111 if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )
113 printk("stack out of bounds: %s/%d\n"
114 " tsk_top %08lx ksp %08lx stack_top %08lx\n",
116 tsk_top, tsk->thread.ksp, stack_top);
120 /* check if stack ptr RIGHT NOW is bad */
121 if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )
123 printk("current stack ptr out of bounds: %s/%d\n"
124 " tsk_top %08lx sp %08lx stack_top %08lx\n",
125 current->comm,current->pid,
126 tsk_top, _get_SP(), stack_top);
131 /* check amount of free stack */
132 for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ )
135 printk("check_stack(): i = %p\n", i);
138 /* only notify if it's less than 900 bytes */
139 if ( (i - (unsigned long *)task_top(tsk)) < 900 )
140 printk("%d bytes free on stack\n",
149 panic("bad kernel stack");
153 #endif /* defined(CHECK_STACK) */
155 #ifdef CONFIG_ALTIVEC
157 dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
159 if (regs->msr & MSR_VEC)
160 giveup_altivec(current);
161 memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs));
166 enable_kernel_altivec(void)
168 WARN_ON(preemptible());
171 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
172 giveup_altivec(current);
174 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
176 giveup_altivec(last_task_used_altivec);
177 #endif /* __SMP __ */
179 EXPORT_SYMBOL(enable_kernel_altivec);
180 #endif /* CONFIG_ALTIVEC */
184 dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
186 if (regs->msr & MSR_SPE)
188 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
189 memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35);
194 enable_kernel_spe(void)
196 WARN_ON(preemptible());
199 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
202 giveup_spe(NULL); /* just enable SPE for kernel - force */
204 giveup_spe(last_task_used_spe);
205 #endif /* __SMP __ */
207 EXPORT_SYMBOL(enable_kernel_spe);
208 #endif /* CONFIG_SPE */
211 enable_kernel_fp(void)
213 WARN_ON(preemptible());
216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
219 giveup_fpu(NULL); /* just enables FP for kernel */
221 giveup_fpu(last_task_used_math);
222 #endif /* CONFIG_SMP */
224 EXPORT_SYMBOL(enable_kernel_fp);
227 dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
230 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
233 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
237 struct task_struct *__switch_to(struct task_struct *prev,
238 struct task_struct *new)
240 struct thread_struct *new_thread, *old_thread;
242 struct task_struct *last;
251 /* avoid complexity of lazy save/restore of fpu
252 * by just saving it every time we switch out if
253 * this task used the fpu during the last quantum.
255 * If it tries to use the fpu again, it'll trap and
256 * reload its fp regs. So we don't have to do a restore
257 * every switch, just a save.
260 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
262 #ifdef CONFIG_ALTIVEC
264 * If the previous thread used altivec in the last quantum
265 * (thus changing altivec regs) then save them.
266 * We used to check the VRSAVE register but not all apps
267 * set it, so we don't rely on it now (and in fact we need
268 * to save & restore VSCR even if VRSAVE == 0). -- paulus
270 * On SMP we always save/restore altivec regs just to avoid the
271 * complexity of changing processors.
274 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
275 giveup_altivec(prev);
276 #endif /* CONFIG_ALTIVEC */
279 * If the previous thread used spe in the last quantum
280 * (thus changing spe regs) then save them.
282 * On SMP we always save/restore spe regs just to avoid the
283 * complexity of changing processors.
285 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
287 #endif /* CONFIG_SPE */
288 #endif /* CONFIG_SMP */
290 /* Avoid the trap. On smp this this never happens since
291 * we don't set last_task_used_altivec -- Cort
293 if (new->thread.regs && last_task_used_altivec == new)
294 new->thread.regs->msr |= MSR_VEC;
296 /* Avoid the trap. On smp this this never happens since
297 * we don't set last_task_used_spe
299 if (new->thread.regs && last_task_used_spe == new)
300 new->thread.regs->msr |= MSR_SPE;
301 #endif /* CONFIG_SPE */
302 new_thread = &new->thread;
303 old_thread = ¤t->thread;
304 last = _switch(old_thread, new_thread);
305 local_irq_restore(s);
309 void show_regs(struct pt_regs * regs)
313 printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
314 regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
316 printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
317 regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
318 regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
319 regs->msr&MSR_IR ? 1 : 0,
320 regs->msr&MSR_DR ? 1 : 0);
322 if (trap == 0x300 || trap == 0x600)
323 printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
324 printk("TASK = %p[%d] '%s' THREAD: %p\n",
325 current, current->pid, current->comm, current->thread_info);
326 printk("Last syscall: %ld ", current->thread.last_syscall);
329 printk(" CPU: %d", smp_processor_id());
330 #endif /* CONFIG_SMP */
332 for (i = 0; i < 32; i++) {
335 printk("\n" KERN_INFO "GPR%02d: ", i);
336 if (__get_user(r, ®s->gpr[i]))
339 if (i == 12 && !FULL_REGS(regs))
343 #ifdef CONFIG_KALLSYMS
345 * Lookup NIP late so we have the best change of getting the
346 * above info out without failing
348 printk("NIP [%08lx] ", regs->nip);
349 print_symbol("%s\n", regs->nip);
350 printk("LR [%08lx] ", regs->link);
351 print_symbol("%s\n", regs->link);
353 show_stack(current, (unsigned long *) regs->gpr[1]);
356 void exit_thread(void)
358 if (last_task_used_math == current)
359 last_task_used_math = NULL;
360 if (last_task_used_altivec == current)
361 last_task_used_altivec = NULL;
363 if (last_task_used_spe == current)
364 last_task_used_spe = NULL;
368 void flush_thread(void)
370 if (last_task_used_math == current)
371 last_task_used_math = NULL;
372 if (last_task_used_altivec == current)
373 last_task_used_altivec = NULL;
375 if (last_task_used_spe == current)
376 last_task_used_spe = NULL;
381 release_thread(struct task_struct *t)
386 * This gets called before we allocate a new thread and copy
387 * the current task into it.
389 void prepare_to_copy(struct task_struct *tsk)
391 struct pt_regs *regs = tsk->thread.regs;
396 if (regs->msr & MSR_FP)
398 #ifdef CONFIG_ALTIVEC
399 if (regs->msr & MSR_VEC)
400 giveup_altivec(current);
401 #endif /* CONFIG_ALTIVEC */
403 if (regs->msr & MSR_SPE)
405 #endif /* CONFIG_SPE */
413 copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
414 unsigned long unused,
415 struct task_struct *p, struct pt_regs *regs)
417 struct pt_regs *childregs, *kregs;
418 extern void ret_from_fork(void);
419 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
420 unsigned long childframe;
422 CHECK_FULL_REGS(regs);
424 sp -= sizeof(struct pt_regs);
425 childregs = (struct pt_regs *) sp;
427 if ((childregs->msr & MSR_PR) == 0) {
428 /* for kernel thread, set `current' and stackptr in new task */
429 childregs->gpr[1] = sp + sizeof(struct pt_regs);
430 childregs->gpr[2] = (unsigned long) p;
431 p->thread.regs = NULL; /* no user register state */
433 childregs->gpr[1] = usp;
434 p->thread.regs = childregs;
435 if (clone_flags & CLONE_SETTLS)
436 childregs->gpr[2] = childregs->gpr[6];
438 childregs->gpr[3] = 0; /* Result from fork() */
439 sp -= STACK_FRAME_OVERHEAD;
443 * The way this works is that at some point in the future
444 * some task will call _switch to switch to the new task.
445 * That will pop off the stack frame created below and start
446 * the new task running at ret_from_fork. The new task will
447 * do some house keeping and then return from the fork or clone
448 * system call, using the stack frame created above.
450 sp -= sizeof(struct pt_regs);
451 kregs = (struct pt_regs *) sp;
452 sp -= STACK_FRAME_OVERHEAD;
454 kregs->nip = (unsigned long)ret_from_fork;
456 p->thread.last_syscall = -1;
462 * Set up a thread for executing a new program
464 void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
467 memset(regs->gpr, 0, sizeof(regs->gpr));
475 regs->msr = MSR_USER;
476 if (last_task_used_math == current)
477 last_task_used_math = NULL;
478 if (last_task_used_altivec == current)
479 last_task_used_altivec = NULL;
481 if (last_task_used_spe == current)
482 last_task_used_spe = NULL;
484 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
485 current->thread.fpscr = 0;
486 #ifdef CONFIG_ALTIVEC
487 memset(current->thread.vr, 0, sizeof(current->thread.vr));
488 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
489 current->thread.vrsave = 0;
490 current->thread.used_vr = 0;
491 #endif /* CONFIG_ALTIVEC */
493 memset(current->thread.evr, 0, sizeof(current->thread.evr));
494 current->thread.acc = 0;
495 current->thread.spefscr = 0;
496 current->thread.used_spe = 0;
497 #endif /* CONFIG_SPE */
500 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
501 | PR_FP_EXC_RES | PR_FP_EXC_INV)
503 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
505 struct pt_regs *regs = tsk->thread.regs;
507 /* This is a bit hairy. If we are an SPE enabled processor
508 * (have embedded fp) we store the IEEE exception enable flags in
509 * fpexc_mode. fpexc_mode is also used for setting FP exception
510 * mode (asyn, precise, disabled) for 'Classic' FP. */
511 if (val & PR_FP_EXC_SW_ENABLE) {
513 tsk->thread.fpexc_mode = val &
514 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
519 /* on a CONFIG_SPE this does not hurt us. The bits that
520 * __pack_fe01 use do not overlap with bits used for
521 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
522 * on CONFIG_SPE implementations are reserved so writing to
523 * them does not change anything */
524 if (val > PR_FP_EXC_PRECISE)
526 tsk->thread.fpexc_mode = __pack_fe01(val);
527 if (regs != NULL && (regs->msr & MSR_FP) != 0)
528 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
529 | tsk->thread.fpexc_mode;
534 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
538 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
540 val = tsk->thread.fpexc_mode;
545 val = __unpack_fe01(tsk->thread.fpexc_mode);
546 return put_user(val, (unsigned int __user *) adr);
549 int sys_clone(unsigned long clone_flags, unsigned long usp,
550 int __user *parent_tidp, void __user *child_threadptr,
551 int __user *child_tidp, int p6,
552 struct pt_regs *regs)
554 CHECK_FULL_REGS(regs);
556 usp = regs->gpr[1]; /* stack pointer for child */
557 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
560 int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
561 struct pt_regs *regs)
563 CHECK_FULL_REGS(regs);
564 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
567 int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
568 struct pt_regs *regs)
570 CHECK_FULL_REGS(regs);
571 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
572 regs, 0, NULL, NULL);
575 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
576 unsigned long a3, unsigned long a4, unsigned long a5,
577 struct pt_regs *regs)
582 filename = getname((char __user *) a0);
583 error = PTR_ERR(filename);
584 if (IS_ERR(filename))
587 if (regs->msr & MSR_FP)
589 #ifdef CONFIG_ALTIVEC
590 if (regs->msr & MSR_VEC)
591 giveup_altivec(current);
592 #endif /* CONFIG_ALTIVEC */
594 if (regs->msr & MSR_SPE)
596 #endif /* CONFIG_SPE */
598 error = do_execve(filename, (char __user *__user *) a1,
599 (char __user *__user *) a2, regs);
602 current->ptrace &= ~PT_DTRACE;
603 task_unlock(current);
610 void dump_stack(void)
612 show_stack(current, NULL);
615 EXPORT_SYMBOL(dump_stack);
617 void show_stack(struct task_struct *tsk, unsigned long *stack)
619 unsigned long sp, stack_top, prev_sp, ret;
621 unsigned long next_exc = 0;
622 struct pt_regs *regs;
623 extern char ret_from_except, ret_from_except_full, ret_from_syscall;
625 sp = (unsigned long) stack;
630 asm("mr %0,1" : "=r" (sp));
632 sp = tsk->thread.ksp;
635 prev_sp = (unsigned long) (tsk->thread_info + 1);
636 stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
637 while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
639 printk("Call trace:");
640 #ifdef CONFIG_KALLSYMS
648 ret = *(unsigned long *)(sp + 4);
649 printk(" [%08lx] ", ret);
650 #ifdef CONFIG_KALLSYMS
651 print_symbol("%s", ret);
654 if (ret == (unsigned long) &ret_from_except
655 || ret == (unsigned long) &ret_from_except_full
656 || ret == (unsigned long) &ret_from_syscall) {
657 /* sp + 16 points to an exception frame */
658 regs = (struct pt_regs *) (sp + 16);
659 if (sp + 16 + sizeof(*regs) <= stack_top)
660 next_exc = regs->nip;
664 sp = *(unsigned long *)sp;
666 #ifndef CONFIG_KALLSYMS
674 * Low level print for debugging - Cort
676 int __init ll_printk(const char *fmt, ...)
683 i=vsprintf(buf,fmt,args);
689 int lines = 24, cols = 80;
690 int orig_x = 0, orig_y = 0;
692 void puthex(unsigned long val)
694 unsigned char buf[10];
696 for (i = 7; i >= 0; i--)
698 buf[i] = "0123456789ABCDEF"[val & 0x0F];
705 void __init ll_puts(const char *s)
708 char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000;
710 extern int mem_init_done;
712 if ( mem_init_done ) /* assume this means we can printk */
727 * can't ll_puts on chrp without openfirmware yet.
728 * vidmem just needs to be setup for it.
731 if ( _machine != _MACH_prep )
736 while ( ( c = *s++ ) != '\0' ) {
739 if ( ++y >= lines ) {
745 vidmem [ ( x + cols * y ) * 2 ] = c;
748 if ( ++y >= lines ) {
762 unsigned long get_wchan(struct task_struct *p)
764 unsigned long ip, sp;
765 unsigned long stack_page = (unsigned long) p->thread_info;
767 if (!p || p == current || p->state == TASK_RUNNING)
771 sp = *(unsigned long *)sp;
772 if (sp < stack_page || sp >= stack_page + 8188)
775 ip = *(unsigned long *)(sp + 4);
776 if (!in_sched_functions(ip))
779 } while (count++ < 16);