2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
14 #include <linux/bug.h>
15 #include <linux/compiler.h>
16 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sched.h>
20 #include <linux/smp.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 #include <linux/bootmem.h>
24 #include <linux/interrupt.h>
26 #include <asm/bootinfo.h>
27 #include <asm/branch.h>
28 #include <asm/break.h>
32 #include <asm/mipsregs.h>
33 #include <asm/mipsmtregs.h>
34 #include <asm/module.h>
35 #include <asm/pgtable.h>
36 #include <asm/ptrace.h>
37 #include <asm/sections.h>
38 #include <asm/system.h>
39 #include <asm/tlbdebug.h>
40 #include <asm/traps.h>
41 #include <asm/uaccess.h>
42 #include <asm/mmu_context.h>
43 #include <asm/types.h>
44 #include <asm/stacktrace.h>
46 extern asmlinkage void handle_int(void);
47 extern asmlinkage void handle_tlbm(void);
48 extern asmlinkage void handle_tlbl(void);
49 extern asmlinkage void handle_tlbs(void);
50 extern asmlinkage void handle_adel(void);
51 extern asmlinkage void handle_ades(void);
52 extern asmlinkage void handle_ibe(void);
53 extern asmlinkage void handle_dbe(void);
54 extern asmlinkage void handle_sys(void);
55 extern asmlinkage void handle_bp(void);
56 extern asmlinkage void handle_ri(void);
57 extern asmlinkage void handle_ri_rdhwr_vivt(void);
58 extern asmlinkage void handle_ri_rdhwr(void);
59 extern asmlinkage void handle_cpu(void);
60 extern asmlinkage void handle_ov(void);
61 extern asmlinkage void handle_tr(void);
62 extern asmlinkage void handle_fpe(void);
63 extern asmlinkage void handle_mdmx(void);
64 extern asmlinkage void handle_watch(void);
65 extern asmlinkage void handle_mt(void);
66 extern asmlinkage void handle_dsp(void);
67 extern asmlinkage void handle_mcheck(void);
68 extern asmlinkage void handle_reserved(void);
70 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
71 struct mips_fpu_struct *ctx, int has_fpu);
73 void (*board_watchpoint_handler)(struct pt_regs *regs);
74 void (*board_be_init)(void);
75 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
76 void (*board_nmi_handler_setup)(void);
77 void (*board_ejtag_handler_setup)(void);
78 void (*board_bind_eic_interrupt)(int irq, int regset);
81 static void show_raw_backtrace(unsigned long reg29)
83 unsigned long *sp = (unsigned long *)reg29;
86 printk("Call Trace:");
87 #ifdef CONFIG_KALLSYMS
90 while (!kstack_end(sp)) {
92 if (__kernel_text_address(addr))
98 #ifdef CONFIG_KALLSYMS
100 static int __init set_raw_show_trace(char *str)
105 __setup("raw_show_trace", set_raw_show_trace);
108 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
110 unsigned long sp = regs->regs[29];
111 unsigned long ra = regs->regs[31];
112 unsigned long pc = regs->cp0_epc;
114 if (raw_show_trace || !__kernel_text_address(pc)) {
115 show_raw_backtrace(sp);
118 printk("Call Trace:\n");
121 pc = unwind_stack(task, &sp, pc, &ra);
127 * This routine abuses get_user()/put_user() to reference pointers
128 * with at least a bit of error checking ...
130 static void show_stacktrace(struct task_struct *task,
131 const struct pt_regs *regs)
133 const int field = 2 * sizeof(unsigned long);
136 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
140 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
141 if (i && ((i % (64 / field)) == 0))
148 if (__get_user(stackdata, sp++)) {
149 printk(" (Bad stack address)");
153 printk(" %0*lx", field, stackdata);
157 show_backtrace(task, regs);
160 void show_stack(struct task_struct *task, unsigned long *sp)
164 regs.regs[29] = (unsigned long)sp;
168 if (task && task != current) {
169 regs.regs[29] = task->thread.reg29;
171 regs.cp0_epc = task->thread.reg31;
173 prepare_frametrace(®s);
176 show_stacktrace(task, ®s);
180 * The architecture-independent dump_stack generator
182 void dump_stack(void)
186 prepare_frametrace(®s);
187 show_backtrace(current, ®s);
190 EXPORT_SYMBOL(dump_stack);
192 static void show_code(unsigned int __user *pc)
198 for(i = -3 ; i < 6 ; i++) {
200 if (__get_user(insn, pc + i)) {
201 printk(" (Bad address in epc)\n");
204 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
208 static void __show_regs(const struct pt_regs *regs)
210 const int field = 2 * sizeof(unsigned long);
211 unsigned int cause = regs->cp0_cause;
214 printk("Cpu %d\n", smp_processor_id());
217 * Saved main processor registers
219 for (i = 0; i < 32; ) {
223 printk(" %0*lx", field, 0UL);
224 else if (i == 26 || i == 27)
225 printk(" %*s", field, "");
227 printk(" %0*lx", field, regs->regs[i]);
234 #ifdef CONFIG_CPU_HAS_SMARTMIPS
235 printk("Acx : %0*lx\n", field, regs->acx);
237 printk("Hi : %0*lx\n", field, regs->hi);
238 printk("Lo : %0*lx\n", field, regs->lo);
241 * Saved cp0 registers
243 printk("epc : %0*lx ", field, regs->cp0_epc);
244 print_symbol("%s ", regs->cp0_epc);
245 printk(" %s\n", print_tainted());
246 printk("ra : %0*lx ", field, regs->regs[31]);
247 print_symbol("%s\n", regs->regs[31]);
249 printk("Status: %08x ", (uint32_t) regs->cp0_status);
251 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
252 if (regs->cp0_status & ST0_KUO)
254 if (regs->cp0_status & ST0_IEO)
256 if (regs->cp0_status & ST0_KUP)
258 if (regs->cp0_status & ST0_IEP)
260 if (regs->cp0_status & ST0_KUC)
262 if (regs->cp0_status & ST0_IEC)
265 if (regs->cp0_status & ST0_KX)
267 if (regs->cp0_status & ST0_SX)
269 if (regs->cp0_status & ST0_UX)
271 switch (regs->cp0_status & ST0_KSU) {
276 printk("SUPERVISOR ");
285 if (regs->cp0_status & ST0_ERL)
287 if (regs->cp0_status & ST0_EXL)
289 if (regs->cp0_status & ST0_IE)
294 printk("Cause : %08x\n", cause);
296 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
297 if (1 <= cause && cause <= 5)
298 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
300 printk("PrId : %08x (%s)\n", read_c0_prid(),
305 * FIXME: really the generic show_regs should take a const pointer argument.
307 void show_regs(struct pt_regs *regs)
309 __show_regs((struct pt_regs *)regs);
312 void show_registers(const struct pt_regs *regs)
316 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
317 current->comm, task_pid_nr(current), current_thread_info(), current);
318 show_stacktrace(current, regs);
319 show_code((unsigned int __user *) regs->cp0_epc);
323 static DEFINE_SPINLOCK(die_lock);
325 void __noreturn die(const char * str, const struct pt_regs * regs)
327 static int die_counter;
328 #ifdef CONFIG_MIPS_MT_SMTC
329 unsigned long dvpret = dvpe();
330 #endif /* CONFIG_MIPS_MT_SMTC */
333 spin_lock_irq(&die_lock);
335 #ifdef CONFIG_MIPS_MT_SMTC
336 mips_mt_regdump(dvpret);
337 #endif /* CONFIG_MIPS_MT_SMTC */
338 printk("%s[#%d]:\n", str, ++die_counter);
339 show_registers(regs);
340 add_taint(TAINT_DIE);
341 spin_unlock_irq(&die_lock);
344 panic("Fatal exception in interrupt");
347 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
349 panic("Fatal exception");
355 extern const struct exception_table_entry __start___dbe_table[];
356 extern const struct exception_table_entry __stop___dbe_table[];
359 " .section __dbe_table, \"a\"\n"
362 /* Given an address, look for it in the exception tables. */
363 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
365 const struct exception_table_entry *e;
367 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
369 e = search_module_dbetables(addr);
373 asmlinkage void do_be(struct pt_regs *regs)
375 const int field = 2 * sizeof(unsigned long);
376 const struct exception_table_entry *fixup = NULL;
377 int data = regs->cp0_cause & 4;
378 int action = MIPS_BE_FATAL;
380 /* XXX For now. Fixme, this searches the wrong table ... */
381 if (data && !user_mode(regs))
382 fixup = search_dbe_tables(exception_epc(regs));
385 action = MIPS_BE_FIXUP;
387 if (board_be_handler)
388 action = board_be_handler(regs, fixup != NULL);
391 case MIPS_BE_DISCARD:
395 regs->cp0_epc = fixup->nextinsn;
404 * Assume it would be too dangerous to continue ...
406 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
407 data ? "Data" : "Instruction",
408 field, regs->cp0_epc, field, regs->regs[31]);
409 die_if_kernel("Oops", regs);
410 force_sig(SIGBUS, current);
414 * ll/sc, rdhwr, sync emulation
417 #define OPCODE 0xfc000000
418 #define BASE 0x03e00000
419 #define RT 0x001f0000
420 #define OFFSET 0x0000ffff
421 #define LL 0xc0000000
422 #define SC 0xe0000000
423 #define SPEC0 0x00000000
424 #define SPEC3 0x7c000000
425 #define RD 0x0000f800
426 #define FUNC 0x0000003f
427 #define SYNC 0x0000000f
428 #define RDHWR 0x0000003b
431 * The ll_bit is cleared by r*_switch.S
434 unsigned long ll_bit;
436 static struct task_struct *ll_task = NULL;
438 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
440 unsigned long value, __user *vaddr;
444 * analyse the ll instruction that just caused a ri exception
445 * and put the referenced address to addr.
448 /* sign extend offset */
449 offset = opcode & OFFSET;
453 vaddr = (unsigned long __user *)
454 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
456 if ((unsigned long)vaddr & 3)
458 if (get_user(value, vaddr))
463 if (ll_task == NULL || ll_task == current) {
472 regs->regs[(opcode & RT) >> 16] = value;
477 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
479 unsigned long __user *vaddr;
484 * analyse the sc instruction that just caused a ri exception
485 * and put the referenced address to addr.
488 /* sign extend offset */
489 offset = opcode & OFFSET;
493 vaddr = (unsigned long __user *)
494 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
495 reg = (opcode & RT) >> 16;
497 if ((unsigned long)vaddr & 3)
502 if (ll_bit == 0 || ll_task != current) {
510 if (put_user(regs->regs[reg], vaddr))
519 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
520 * opcodes are supposed to result in coprocessor unusable exceptions if
521 * executed on ll/sc-less processors. That's the theory. In practice a
522 * few processors such as NEC's VR4100 throw reserved instruction exceptions
523 * instead, so we're doing the emulation thing in both exception handlers.
525 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
527 if ((opcode & OPCODE) == LL)
528 return simulate_ll(regs, opcode);
529 if ((opcode & OPCODE) == SC)
530 return simulate_sc(regs, opcode);
532 return -1; /* Must be something else ... */
536 * Simulate trapping 'rdhwr' instructions to provide user accessible
537 * registers not implemented in hardware. The only current use of this
538 * is the thread area pointer.
540 static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
542 struct thread_info *ti = task_thread_info(current);
544 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
545 int rd = (opcode & RD) >> 11;
546 int rt = (opcode & RT) >> 16;
549 regs->regs[rt] = ti->tp_value;
560 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
562 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
565 return -1; /* Must be something else ... */
568 asmlinkage void do_ov(struct pt_regs *regs)
572 die_if_kernel("Integer overflow", regs);
574 info.si_code = FPE_INTOVF;
575 info.si_signo = SIGFPE;
577 info.si_addr = (void __user *) regs->cp0_epc;
578 force_sig_info(SIGFPE, &info, current);
582 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
584 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
588 die_if_kernel("FP exception in kernel code", regs);
590 if (fcr31 & FPU_CSR_UNI_X) {
594 * Unimplemented operation exception. If we've got the full
595 * software emulator on-board, let's use it...
597 * Force FPU to dump state into task/thread context. We're
598 * moving a lot of data here for what is probably a single
599 * instruction, but the alternative is to pre-decode the FP
600 * register operands before invoking the emulator, which seems
601 * a bit extreme for what should be an infrequent event.
603 /* Ensure 'resume' not overwrite saved fp context again. */
606 /* Run the emulator */
607 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1);
610 * We can't allow the emulated instruction to leave any of
611 * the cause bit set in $fcr31.
613 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
615 /* Restore the hardware register state */
616 own_fpu(1); /* Using the FPU again. */
618 /* If something went wrong, signal */
620 force_sig(sig, current);
623 } else if (fcr31 & FPU_CSR_INV_X)
624 info.si_code = FPE_FLTINV;
625 else if (fcr31 & FPU_CSR_DIV_X)
626 info.si_code = FPE_FLTDIV;
627 else if (fcr31 & FPU_CSR_OVF_X)
628 info.si_code = FPE_FLTOVF;
629 else if (fcr31 & FPU_CSR_UDF_X)
630 info.si_code = FPE_FLTUND;
631 else if (fcr31 & FPU_CSR_INE_X)
632 info.si_code = FPE_FLTRES;
634 info.si_code = __SI_FAULT;
635 info.si_signo = SIGFPE;
637 info.si_addr = (void __user *) regs->cp0_epc;
638 force_sig_info(SIGFPE, &info, current);
641 asmlinkage void do_bp(struct pt_regs *regs)
643 unsigned int opcode, bcode;
646 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
650 * There is the ancient bug in the MIPS assemblers that the break
651 * code starts left to bit 16 instead to bit 6 in the opcode.
652 * Gas is bug-compatible, but not always, grrr...
653 * We handle both cases with a simple heuristics. --macro
655 bcode = ((opcode >> 6) & ((1 << 20) - 1));
656 if (bcode < (1 << 10))
660 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
661 * insns, even for break codes that indicate arithmetic failures.
663 * But should we continue the brokenness??? --macro
666 case BRK_OVERFLOW << 10:
667 case BRK_DIVZERO << 10:
668 die_if_kernel("Break instruction in kernel code", regs);
669 if (bcode == (BRK_DIVZERO << 10))
670 info.si_code = FPE_INTDIV;
672 info.si_code = FPE_INTOVF;
673 info.si_signo = SIGFPE;
675 info.si_addr = (void __user *) regs->cp0_epc;
676 force_sig_info(SIGFPE, &info, current);
679 die("Kernel bug detected", regs);
682 die_if_kernel("Break instruction in kernel code", regs);
683 force_sig(SIGTRAP, current);
688 force_sig(SIGSEGV, current);
691 asmlinkage void do_tr(struct pt_regs *regs)
693 unsigned int opcode, tcode = 0;
696 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
699 /* Immediate versions don't provide a code. */
700 if (!(opcode & OPCODE))
701 tcode = ((opcode >> 6) & ((1 << 10) - 1));
704 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
705 * insns, even for trap codes that indicate arithmetic failures.
707 * But should we continue the brokenness??? --macro
712 die_if_kernel("Trap instruction in kernel code", regs);
713 if (tcode == BRK_DIVZERO)
714 info.si_code = FPE_INTDIV;
716 info.si_code = FPE_INTOVF;
717 info.si_signo = SIGFPE;
719 info.si_addr = (void __user *) regs->cp0_epc;
720 force_sig_info(SIGFPE, &info, current);
723 die("Kernel bug detected", regs);
726 die_if_kernel("Trap instruction in kernel code", regs);
727 force_sig(SIGTRAP, current);
732 force_sig(SIGSEGV, current);
735 asmlinkage void do_ri(struct pt_regs *regs)
737 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
738 unsigned long old_epc = regs->cp0_epc;
739 unsigned int opcode = 0;
742 die_if_kernel("Reserved instruction in kernel code", regs);
744 if (unlikely(compute_return_epc(regs) < 0))
747 if (unlikely(get_user(opcode, epc) < 0))
750 if (!cpu_has_llsc && status < 0)
751 status = simulate_llsc(regs, opcode);
754 status = simulate_rdhwr(regs, opcode);
757 status = simulate_sync(regs, opcode);
762 if (unlikely(status > 0)) {
763 regs->cp0_epc = old_epc; /* Undo skip-over. */
764 force_sig(status, current);
769 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
770 * emulated more than some threshold number of instructions, force migration to
771 * a "CPU" that has FP support.
773 static void mt_ase_fp_affinity(void)
775 #ifdef CONFIG_MIPS_MT_FPAFF
776 if (mt_fpemul_threshold > 0 &&
777 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
779 * If there's no FPU present, or if the application has already
780 * restricted the allowed set to exclude any CPUs with FPUs,
781 * we'll skip the procedure.
783 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
786 cpus_and(tmask, current->thread.user_cpus_allowed,
788 set_cpus_allowed(current, tmask);
789 set_thread_flag(TIF_FPUBOUND);
792 #endif /* CONFIG_MIPS_MT_FPAFF */
795 asmlinkage void do_cpu(struct pt_regs *regs)
797 unsigned int __user *epc;
798 unsigned long old_epc;
803 die_if_kernel("do_cpu invoked from kernel context!", regs);
805 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
809 epc = (unsigned int __user *)exception_epc(regs);
810 old_epc = regs->cp0_epc;
814 if (unlikely(compute_return_epc(regs) < 0))
817 if (unlikely(get_user(opcode, epc) < 0))
820 if (!cpu_has_llsc && status < 0)
821 status = simulate_llsc(regs, opcode);
824 status = simulate_rdhwr(regs, opcode);
829 if (unlikely(status > 0)) {
830 regs->cp0_epc = old_epc; /* Undo skip-over. */
831 force_sig(status, current);
837 if (used_math()) /* Using the FPU again. */
839 else { /* First time FPU user. */
844 if (!raw_cpu_has_fpu) {
846 sig = fpu_emulator_cop1Handler(regs,
847 ¤t->thread.fpu, 0);
849 force_sig(sig, current);
851 mt_ase_fp_affinity();
861 force_sig(SIGILL, current);
864 asmlinkage void do_mdmx(struct pt_regs *regs)
866 force_sig(SIGILL, current);
869 asmlinkage void do_watch(struct pt_regs *regs)
871 if (board_watchpoint_handler) {
872 (*board_watchpoint_handler)(regs);
877 * We use the watch exception where available to detect stack
882 panic("Caught WATCH exception - probably caused by stack overflow.");
885 asmlinkage void do_mcheck(struct pt_regs *regs)
887 const int field = 2 * sizeof(unsigned long);
888 int multi_match = regs->cp0_status & ST0_TS;
893 printk("Index : %0x\n", read_c0_index());
894 printk("Pagemask: %0x\n", read_c0_pagemask());
895 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
896 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
897 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
902 show_code((unsigned int __user *) regs->cp0_epc);
905 * Some chips may have other causes of machine check (e.g. SB1
908 panic("Caught Machine Check exception - %scaused by multiple "
909 "matching entries in the TLB.",
910 (multi_match) ? "" : "not ");
913 asmlinkage void do_mt(struct pt_regs *regs)
917 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
918 >> VPECONTROL_EXCPT_SHIFT;
921 printk(KERN_DEBUG "Thread Underflow\n");
924 printk(KERN_DEBUG "Thread Overflow\n");
927 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
930 printk(KERN_DEBUG "Gating Storage Exception\n");
933 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
936 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
939 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
943 die_if_kernel("MIPS MT Thread exception in kernel", regs);
945 force_sig(SIGILL, current);
949 asmlinkage void do_dsp(struct pt_regs *regs)
952 panic("Unexpected DSP exception\n");
954 force_sig(SIGILL, current);
957 asmlinkage void do_reserved(struct pt_regs *regs)
960 * Game over - no way to handle this if it ever occurs. Most probably
961 * caused by a new unknown cpu type or after another deadly
962 * hard/software error.
965 panic("Caught reserved exception %ld - should not happen.",
966 (regs->cp0_cause & 0x7f) >> 2);
970 * Some MIPS CPUs can enable/disable for cache parity detection, but do
973 static inline void parity_protection_init(void)
975 switch (current_cpu_type()) {
979 write_c0_ecc(0x80000000);
980 back_to_back_c0_hazard();
981 /* Set the PE bit (bit 31) in the c0_errctl register. */
982 printk(KERN_INFO "Cache parity protection %sabled\n",
983 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
987 /* Clear the DE bit (bit 16) in the c0_status register. */
988 printk(KERN_INFO "Enable cache parity protection for "
989 "MIPS 20KC/25KF CPUs.\n");
990 clear_c0_status(ST0_DE);
997 asmlinkage void cache_parity_error(void)
999 const int field = 2 * sizeof(unsigned long);
1000 unsigned int reg_val;
1002 /* For the moment, report the problem and hang. */
1003 printk("Cache error exception:\n");
1004 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1005 reg_val = read_c0_cacheerr();
1006 printk("c0_cacheerr == %08x\n", reg_val);
1008 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1009 reg_val & (1<<30) ? "secondary" : "primary",
1010 reg_val & (1<<31) ? "data" : "insn");
1011 printk("Error bits: %s%s%s%s%s%s%s\n",
1012 reg_val & (1<<29) ? "ED " : "",
1013 reg_val & (1<<28) ? "ET " : "",
1014 reg_val & (1<<26) ? "EE " : "",
1015 reg_val & (1<<25) ? "EB " : "",
1016 reg_val & (1<<24) ? "EI " : "",
1017 reg_val & (1<<23) ? "E1 " : "",
1018 reg_val & (1<<22) ? "E0 " : "");
1019 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1021 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1022 if (reg_val & (1<<22))
1023 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1025 if (reg_val & (1<<23))
1026 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1029 panic("Can't handle the cache error!");
1033 * SDBBP EJTAG debug exception handler.
1034 * We skip the instruction and return to the next instruction.
1036 void ejtag_exception_handler(struct pt_regs *regs)
1038 const int field = 2 * sizeof(unsigned long);
1039 unsigned long depc, old_epc;
1042 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1043 depc = read_c0_depc();
1044 debug = read_c0_debug();
1045 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1046 if (debug & 0x80000000) {
1048 * In branch delay slot.
1049 * We cheat a little bit here and use EPC to calculate the
1050 * debug return address (DEPC). EPC is restored after the
1053 old_epc = regs->cp0_epc;
1054 regs->cp0_epc = depc;
1055 __compute_return_epc(regs);
1056 depc = regs->cp0_epc;
1057 regs->cp0_epc = old_epc;
1060 write_c0_depc(depc);
1063 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1064 write_c0_debug(debug | 0x100);
1069 * NMI exception handler.
1071 NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
1074 printk("NMI taken!!!!\n");
1078 #define VECTORSPACING 0x100 /* for EI/VI mode */
1080 unsigned long ebase;
1081 unsigned long exception_handlers[32];
1082 unsigned long vi_handlers[64];
1085 * As a side effect of the way this is implemented we're limited
1086 * to interrupt handlers in the address range from
1087 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
1089 void *set_except_vector(int n, void *addr)
1091 unsigned long handler = (unsigned long) addr;
1092 unsigned long old_handler = exception_handlers[n];
1094 exception_handlers[n] = handler;
1095 if (n == 0 && cpu_has_divec) {
1096 *(u32 *)(ebase + 0x200) = 0x08000000 |
1097 (0x03ffffff & (handler >> 2));
1098 flush_icache_range(ebase + 0x200, ebase + 0x204);
1100 return (void *)old_handler;
1103 static asmlinkage void do_default_vi(void)
1105 show_regs(get_irq_regs());
1106 panic("Caught unexpected vectored interrupt.");
1109 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1111 unsigned long handler;
1112 unsigned long old_handler = vi_handlers[n];
1113 int srssets = current_cpu_data.srsets;
1117 if (!cpu_has_veic && !cpu_has_vint)
1121 handler = (unsigned long) do_default_vi;
1124 handler = (unsigned long) addr;
1125 vi_handlers[n] = (unsigned long) addr;
1127 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1130 panic("Shadow register set %d not supported", srs);
1133 if (board_bind_eic_interrupt)
1134 board_bind_eic_interrupt(n, srs);
1135 } else if (cpu_has_vint) {
1136 /* SRSMap is only defined if shadow sets are implemented */
1138 change_c0_srsmap(0xf << n*4, srs << n*4);
1143 * If no shadow set is selected then use the default handler
1144 * that does normal register saving and a standard interrupt exit
1147 extern char except_vec_vi, except_vec_vi_lui;
1148 extern char except_vec_vi_ori, except_vec_vi_end;
1149 #ifdef CONFIG_MIPS_MT_SMTC
1151 * We need to provide the SMTC vectored interrupt handler
1152 * not only with the address of the handler, but with the
1153 * Status.IM bit to be masked before going there.
1155 extern char except_vec_vi_mori;
1156 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1157 #endif /* CONFIG_MIPS_MT_SMTC */
1158 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1159 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1160 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1162 if (handler_len > VECTORSPACING) {
1164 * Sigh... panicing won't help as the console
1165 * is probably not configured :(
1167 panic("VECTORSPACING too small");
1170 memcpy(b, &except_vec_vi, handler_len);
1171 #ifdef CONFIG_MIPS_MT_SMTC
1172 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1174 w = (u32 *)(b + mori_offset);
1175 *w = (*w & 0xffff0000) | (0x100 << n);
1176 #endif /* CONFIG_MIPS_MT_SMTC */
1177 w = (u32 *)(b + lui_offset);
1178 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1179 w = (u32 *)(b + ori_offset);
1180 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1181 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1185 * In other cases jump directly to the interrupt handler
1187 * It is the handlers responsibility to save registers if required
1188 * (eg hi/lo) and return from the exception using "eret"
1191 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1193 flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1196 return (void *)old_handler;
1199 void *set_vi_handler(int n, vi_handler_t addr)
1201 return set_vi_srs_handler(n, addr, 0);
1205 * This is used by native signal handling
1207 asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
1208 asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
1210 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
1211 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
1213 extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
1214 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
1217 static int smp_save_fp_context(struct sigcontext __user *sc)
1219 return raw_cpu_has_fpu
1220 ? _save_fp_context(sc)
1221 : fpu_emulator_save_context(sc);
1224 static int smp_restore_fp_context(struct sigcontext __user *sc)
1226 return raw_cpu_has_fpu
1227 ? _restore_fp_context(sc)
1228 : fpu_emulator_restore_context(sc);
1232 static inline void signal_init(void)
1235 /* For now just do the cpu_has_fpu check when the functions are invoked */
1236 save_fp_context = smp_save_fp_context;
1237 restore_fp_context = smp_restore_fp_context;
1240 save_fp_context = _save_fp_context;
1241 restore_fp_context = _restore_fp_context;
1243 save_fp_context = fpu_emulator_save_context;
1244 restore_fp_context = fpu_emulator_restore_context;
1249 #ifdef CONFIG_MIPS32_COMPAT
1252 * This is used by 32-bit signal stuff on the 64-bit kernel
1254 asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
1255 asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
1257 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
1258 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
1260 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
1261 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
1263 static inline void signal32_init(void)
1266 save_fp_context32 = _save_fp_context32;
1267 restore_fp_context32 = _restore_fp_context32;
1269 save_fp_context32 = fpu_emulator_save_context32;
1270 restore_fp_context32 = fpu_emulator_restore_context32;
1275 extern void cpu_cache_init(void);
1276 extern void tlb_init(void);
1277 extern void flush_tlb_handlers(void);
1282 int cp0_compare_irq;
1285 * Performance counter IRQ or -1 if shared with timer
1287 int cp0_perfcount_irq;
1288 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1290 void __init per_cpu_trap_init(void)
1292 unsigned int cpu = smp_processor_id();
1293 unsigned int status_set = ST0_CU0;
1294 #ifdef CONFIG_MIPS_MT_SMTC
1295 int secondaryTC = 0;
1296 int bootTC = (cpu == 0);
1299 * Only do per_cpu_trap_init() for first TC of Each VPE.
1300 * Note that this hack assumes that the SMTC init code
1301 * assigns TCs consecutively and in ascending order.
1304 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1305 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1307 #endif /* CONFIG_MIPS_MT_SMTC */
1310 * Disable coprocessors and select 32-bit or 64-bit addressing
1311 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1312 * flag that some firmware may have left set and the TS bit (for
1313 * IP27). Set XX for ISA IV code to work.
1316 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1318 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1319 status_set |= ST0_XX;
1321 status_set |= ST0_MX;
1323 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1326 #ifdef CONFIG_CPU_MIPSR2
1327 if (cpu_has_mips_r2) {
1328 unsigned int enable = 0x0000000f;
1330 if (cpu_has_userlocal)
1331 enable |= (1 << 29);
1333 write_c0_hwrena(enable);
1337 #ifdef CONFIG_MIPS_MT_SMTC
1339 #endif /* CONFIG_MIPS_MT_SMTC */
1341 if (cpu_has_veic || cpu_has_vint) {
1342 write_c0_ebase(ebase);
1343 /* Setting vector spacing enables EI/VI mode */
1344 change_c0_intctl(0x3e0, VECTORSPACING);
1346 if (cpu_has_divec) {
1347 if (cpu_has_mipsmt) {
1348 unsigned int vpflags = dvpe();
1349 set_c0_cause(CAUSEF_IV);
1352 set_c0_cause(CAUSEF_IV);
1356 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1358 * o read IntCtl.IPTI to determine the timer interrupt
1359 * o read IntCtl.IPPCI to determine the performance counter interrupt
1361 if (cpu_has_mips_r2) {
1362 cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
1363 cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
1364 if (cp0_perfcount_irq == cp0_compare_irq)
1365 cp0_perfcount_irq = -1;
1367 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1368 cp0_perfcount_irq = -1;
1371 #ifdef CONFIG_MIPS_MT_SMTC
1373 #endif /* CONFIG_MIPS_MT_SMTC */
1375 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1376 TLBMISS_HANDLER_SETUP();
1378 atomic_inc(&init_mm.mm_count);
1379 current->active_mm = &init_mm;
1380 BUG_ON(current->mm);
1381 enter_lazy_tlb(&init_mm, current);
1383 #ifdef CONFIG_MIPS_MT_SMTC
1385 #endif /* CONFIG_MIPS_MT_SMTC */
1388 #ifdef CONFIG_MIPS_MT_SMTC
1389 } else if (!secondaryTC) {
1391 * First TC in non-boot VPE must do subset of tlb_init()
1392 * for MMU countrol registers.
1394 write_c0_pagemask(PM_DEFAULT_MASK);
1397 #endif /* CONFIG_MIPS_MT_SMTC */
1400 /* Install CPU exception handler */
1401 void __init set_handler(unsigned long offset, void *addr, unsigned long size)
1403 memcpy((void *)(ebase + offset), addr, size);
1404 flush_icache_range(ebase + offset, ebase + offset + size);
1407 static char panic_null_cerr[] __initdata =
1408 "Trying to set NULL cache error exception handler";
1410 /* Install uncached CPU exception handler */
1411 void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
1414 unsigned long uncached_ebase = KSEG1ADDR(ebase);
1417 unsigned long uncached_ebase = TO_UNCAC(ebase);
1421 panic(panic_null_cerr);
1423 memcpy((void *)(uncached_ebase + offset), addr, size);
1426 static int __initdata rdhwr_noopt;
1427 static int __init set_rdhwr_noopt(char *str)
1433 __setup("rdhwr_noopt", set_rdhwr_noopt);
1435 void __init trap_init(void)
1437 extern char except_vec3_generic, except_vec3_r4000;
1438 extern char except_vec4;
1441 if (cpu_has_veic || cpu_has_vint)
1442 ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64);
1446 per_cpu_trap_init();
1449 * Copy the generic exception handlers to their final destination.
1450 * This will be overriden later as suitable for a particular
1453 set_handler(0x180, &except_vec3_generic, 0x80);
1456 * Setup default vectors
1458 for (i = 0; i <= 31; i++)
1459 set_except_vector(i, handle_reserved);
1462 * Copy the EJTAG debug exception vector handler code to it's final
1465 if (cpu_has_ejtag && board_ejtag_handler_setup)
1466 board_ejtag_handler_setup();
1469 * Only some CPUs have the watch exceptions.
1472 set_except_vector(23, handle_watch);
1475 * Initialise interrupt handlers
1477 if (cpu_has_veic || cpu_has_vint) {
1478 int nvec = cpu_has_veic ? 64 : 8;
1479 for (i = 0; i < nvec; i++)
1480 set_vi_handler(i, NULL);
1482 else if (cpu_has_divec)
1483 set_handler(0x200, &except_vec4, 0x8);
1486 * Some CPUs can enable/disable for cache parity detection, but does
1487 * it different ways.
1489 parity_protection_init();
1492 * The Data Bus Errors / Instruction Bus Errors are signaled
1493 * by external hardware. Therefore these two exceptions
1494 * may have board specific handlers.
1499 set_except_vector(0, handle_int);
1500 set_except_vector(1, handle_tlbm);
1501 set_except_vector(2, handle_tlbl);
1502 set_except_vector(3, handle_tlbs);
1504 set_except_vector(4, handle_adel);
1505 set_except_vector(5, handle_ades);
1507 set_except_vector(6, handle_ibe);
1508 set_except_vector(7, handle_dbe);
1510 set_except_vector(8, handle_sys);
1511 set_except_vector(9, handle_bp);
1512 set_except_vector(10, rdhwr_noopt ? handle_ri :
1513 (cpu_has_vtag_icache ?
1514 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1515 set_except_vector(11, handle_cpu);
1516 set_except_vector(12, handle_ov);
1517 set_except_vector(13, handle_tr);
1519 if (current_cpu_type() == CPU_R6000 ||
1520 current_cpu_type() == CPU_R6000A) {
1522 * The R6000 is the only R-series CPU that features a machine
1523 * check exception (similar to the R4000 cache error) and
1524 * unaligned ldc1/sdc1 exception. The handlers have not been
1525 * written yet. Well, anyway there is no R6000 machine on the
1526 * current list of targets for Linux/MIPS.
1527 * (Duh, crap, there is someone with a triple R6k machine)
1529 //set_except_vector(14, handle_mc);
1530 //set_except_vector(15, handle_ndc);
1534 if (board_nmi_handler_setup)
1535 board_nmi_handler_setup();
1537 if (cpu_has_fpu && !cpu_has_nofpuex)
1538 set_except_vector(15, handle_fpe);
1540 set_except_vector(22, handle_mdmx);
1543 set_except_vector(24, handle_mcheck);
1546 set_except_vector(25, handle_mt);
1548 set_except_vector(26, handle_dsp);
1551 /* Special exception: R4[04]00 uses also the divec space. */
1552 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1553 else if (cpu_has_4kex)
1554 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1556 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1559 #ifdef CONFIG_MIPS32_COMPAT
1563 flush_icache_range(ebase, ebase + 0x400);
1564 flush_tlb_handlers();