2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
14 #include <linux/config.h>
15 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/spinlock.h>
22 #include <linux/kallsyms.h>
23 #include <linux/bootmem.h>
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/watch.h>
43 #include <asm/types.h>
45 extern asmlinkage void handle_tlbm(void);
46 extern asmlinkage void handle_tlbl(void);
47 extern asmlinkage void handle_tlbs(void);
48 extern asmlinkage void handle_adel(void);
49 extern asmlinkage void handle_ades(void);
50 extern asmlinkage void handle_ibe(void);
51 extern asmlinkage void handle_dbe(void);
52 extern asmlinkage void handle_sys(void);
53 extern asmlinkage void handle_bp(void);
54 extern asmlinkage void handle_ri(void);
55 extern asmlinkage void handle_cpu(void);
56 extern asmlinkage void handle_ov(void);
57 extern asmlinkage void handle_tr(void);
58 extern asmlinkage void handle_fpe(void);
59 extern asmlinkage void handle_mdmx(void);
60 extern asmlinkage void handle_watch(void);
61 extern asmlinkage void handle_mt(void);
62 extern asmlinkage void handle_dsp(void);
63 extern asmlinkage void handle_mcheck(void);
64 extern asmlinkage void handle_reserved(void);
66 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
67 struct mips_fpu_soft_struct *ctx);
69 void (*board_be_init)(void);
70 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
71 void (*board_nmi_handler_setup)(void);
72 void (*board_ejtag_handler_setup)(void);
73 void (*board_bind_eic_interrupt)(int irq, int regset);
76 * These constant is for searching for possible module text segments.
77 * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
79 #define MODULE_RANGE (8*1024*1024)
82 * This routine abuses get_user()/put_user() to reference pointers
83 * with at least a bit of error checking ...
85 void show_stack(struct task_struct *task, unsigned long *sp)
87 const int field = 2 * sizeof(unsigned long);
92 if (task && task != current)
93 sp = (unsigned long *) task->thread.reg29;
95 sp = (unsigned long *) &sp;
100 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
101 if (i && ((i % (64 / field)) == 0))
108 if (__get_user(stackdata, sp++)) {
109 printk(" (Bad stack address)");
113 printk(" %0*lx", field, stackdata);
119 void show_trace(struct task_struct *task, unsigned long *stack)
121 const int field = 2 * sizeof(unsigned long);
125 if (task && task != current)
126 stack = (unsigned long *) task->thread.reg29;
128 stack = (unsigned long *) &stack;
131 printk("Call Trace:");
132 #ifdef CONFIG_KALLSYMS
135 while (!kstack_end(stack)) {
137 if (__kernel_text_address(addr)) {
138 printk(" [<%0*lx>] ", field, addr);
139 print_symbol("%s\n", addr);
146 * The architecture-independent dump_stack generator
148 void dump_stack(void)
152 show_trace(current, &stack);
155 EXPORT_SYMBOL(dump_stack);
157 void show_code(unsigned int *pc)
163 for(i = -3 ; i < 6 ; i++) {
165 if (__get_user(insn, pc + i)) {
166 printk(" (Bad address in epc)\n");
169 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
173 void show_regs(struct pt_regs *regs)
175 const int field = 2 * sizeof(unsigned long);
176 unsigned int cause = regs->cp0_cause;
179 printk("Cpu %d\n", smp_processor_id());
182 * Saved main processor registers
184 for (i = 0; i < 32; ) {
188 printk(" %0*lx", field, 0UL);
189 else if (i == 26 || i == 27)
190 printk(" %*s", field, "");
192 printk(" %0*lx", field, regs->regs[i]);
199 printk("Hi : %0*lx\n", field, regs->hi);
200 printk("Lo : %0*lx\n", field, regs->lo);
203 * Saved cp0 registers
205 printk("epc : %0*lx ", field, regs->cp0_epc);
206 print_symbol("%s ", regs->cp0_epc);
207 printk(" %s\n", print_tainted());
208 printk("ra : %0*lx ", field, regs->regs[31]);
209 print_symbol("%s\n", regs->regs[31]);
211 printk("Status: %08x ", (uint32_t) regs->cp0_status);
213 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
214 if (regs->cp0_status & ST0_KUO)
216 if (regs->cp0_status & ST0_IEO)
218 if (regs->cp0_status & ST0_KUP)
220 if (regs->cp0_status & ST0_IEP)
222 if (regs->cp0_status & ST0_KUC)
224 if (regs->cp0_status & ST0_IEC)
227 if (regs->cp0_status & ST0_KX)
229 if (regs->cp0_status & ST0_SX)
231 if (regs->cp0_status & ST0_UX)
233 switch (regs->cp0_status & ST0_KSU) {
238 printk("SUPERVISOR ");
247 if (regs->cp0_status & ST0_ERL)
249 if (regs->cp0_status & ST0_EXL)
251 if (regs->cp0_status & ST0_IE)
256 printk("Cause : %08x\n", cause);
258 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
259 if (1 <= cause && cause <= 5)
260 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
262 printk("PrId : %08x\n", read_c0_prid());
265 void show_registers(struct pt_regs *regs)
269 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
270 current->comm, current->pid, current_thread_info(), current);
271 show_stack(current, (long *) regs->regs[29]);
272 show_trace(current, (long *) regs->regs[29]);
273 show_code((unsigned int *) regs->cp0_epc);
277 static DEFINE_SPINLOCK(die_lock);
279 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
281 static int die_counter;
284 spin_lock_irq(&die_lock);
285 printk("%s[#%d]:\n", str, ++die_counter);
286 show_registers(regs);
287 spin_unlock_irq(&die_lock);
291 extern const struct exception_table_entry __start___dbe_table[];
292 extern const struct exception_table_entry __stop___dbe_table[];
294 void __declare_dbe_table(void)
296 __asm__ __volatile__(
297 ".section\t__dbe_table,\"a\"\n\t"
302 /* Given an address, look for it in the exception tables. */
303 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
305 const struct exception_table_entry *e;
307 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
309 e = search_module_dbetables(addr);
313 asmlinkage void do_be(struct pt_regs *regs)
315 const int field = 2 * sizeof(unsigned long);
316 const struct exception_table_entry *fixup = NULL;
317 int data = regs->cp0_cause & 4;
318 int action = MIPS_BE_FATAL;
320 /* XXX For now. Fixme, this searches the wrong table ... */
321 if (data && !user_mode(regs))
322 fixup = search_dbe_tables(exception_epc(regs));
325 action = MIPS_BE_FIXUP;
327 if (board_be_handler)
328 action = board_be_handler(regs, fixup != 0);
331 case MIPS_BE_DISCARD:
335 regs->cp0_epc = fixup->nextinsn;
344 * Assume it would be too dangerous to continue ...
346 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
347 data ? "Data" : "Instruction",
348 field, regs->cp0_epc, field, regs->regs[31]);
349 die_if_kernel("Oops", regs);
350 force_sig(SIGBUS, current);
353 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
355 unsigned int __user *epc;
357 epc = (unsigned int __user *) regs->cp0_epc +
358 ((regs->cp0_cause & CAUSEF_BD) != 0);
359 if (!get_user(*opcode, epc))
362 force_sig(SIGSEGV, current);
370 #define OPCODE 0xfc000000
371 #define BASE 0x03e00000
372 #define RT 0x001f0000
373 #define OFFSET 0x0000ffff
374 #define LL 0xc0000000
375 #define SC 0xe0000000
376 #define SPEC3 0x7c000000
377 #define RD 0x0000f800
378 #define FUNC 0x0000003f
379 #define RDHWR 0x0000003b
382 * The ll_bit is cleared by r*_switch.S
385 unsigned long ll_bit;
387 static struct task_struct *ll_task = NULL;
389 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
391 unsigned long value, __user *vaddr;
396 * analyse the ll instruction that just caused a ri exception
397 * and put the referenced address to addr.
400 /* sign extend offset */
401 offset = opcode & OFFSET;
405 vaddr = (unsigned long __user *)
406 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
408 if ((unsigned long)vaddr & 3) {
412 if (get_user(value, vaddr)) {
419 if (ll_task == NULL || ll_task == current) {
428 compute_return_epc(regs);
430 regs->regs[(opcode & RT) >> 16] = value;
435 force_sig(signal, current);
438 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
440 unsigned long __user *vaddr;
446 * analyse the sc instruction that just caused a ri exception
447 * and put the referenced address to addr.
450 /* sign extend offset */
451 offset = opcode & OFFSET;
455 vaddr = (unsigned long __user *)
456 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
457 reg = (opcode & RT) >> 16;
459 if ((unsigned long)vaddr & 3) {
466 if (ll_bit == 0 || ll_task != current) {
467 compute_return_epc(regs);
475 if (put_user(regs->regs[reg], vaddr)) {
480 compute_return_epc(regs);
486 force_sig(signal, current);
490 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
491 * opcodes are supposed to result in coprocessor unusable exceptions if
492 * executed on ll/sc-less processors. That's the theory. In practice a
493 * few processors such as NEC's VR4100 throw reserved instruction exceptions
494 * instead, so we're doing the emulation thing in both exception handlers.
496 static inline int simulate_llsc(struct pt_regs *regs)
500 if (unlikely(get_insn_opcode(regs, &opcode)))
503 if ((opcode & OPCODE) == LL) {
504 simulate_ll(regs, opcode);
507 if ((opcode & OPCODE) == SC) {
508 simulate_sc(regs, opcode);
512 return -EFAULT; /* Strange things going on ... */
516 * Simulate trapping 'rdhwr' instructions to provide user accessible
517 * registers not implemented in hardware. The only current use of this
518 * is the thread area pointer.
520 static inline int simulate_rdhwr(struct pt_regs *regs)
522 struct thread_info *ti = task_thread_info(current);
525 if (unlikely(get_insn_opcode(regs, &opcode)))
528 if (unlikely(compute_return_epc(regs)))
531 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
532 int rd = (opcode & RD) >> 11;
533 int rt = (opcode & RT) >> 16;
536 regs->regs[rt] = ti->tp_value;
547 asmlinkage void do_ov(struct pt_regs *regs)
551 info.si_code = FPE_INTOVF;
552 info.si_signo = SIGFPE;
554 info.si_addr = (void __user *) regs->cp0_epc;
555 force_sig_info(SIGFPE, &info, current);
559 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
561 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
563 if (fcr31 & FPU_CSR_UNI_X) {
568 #ifdef CONFIG_PREEMPT
569 if (!is_fpu_owner()) {
570 /* We might lose fpu before disabling preempt... */
572 BUG_ON(!used_math());
577 * Unimplemented operation exception. If we've got the full
578 * software emulator on-board, let's use it...
580 * Force FPU to dump state into task/thread context. We're
581 * moving a lot of data here for what is probably a single
582 * instruction, but the alternative is to pre-decode the FP
583 * register operands before invoking the emulator, which seems
584 * a bit extreme for what should be an infrequent event.
587 /* Ensure 'resume' not overwrite saved fp context again. */
592 /* Run the emulator */
593 sig = fpu_emulator_cop1Handler (regs,
594 ¤t->thread.fpu.soft);
598 own_fpu(); /* Using the FPU again. */
600 * We can't allow the emulated instruction to leave any of
601 * the cause bit set in $fcr31.
603 current->thread.fpu.soft.fcr31 &= ~FPU_CSR_ALL_X;
605 /* Restore the hardware register state */
610 /* If something went wrong, signal */
612 force_sig(sig, current);
617 force_sig(SIGFPE, current);
620 asmlinkage void do_bp(struct pt_regs *regs)
622 unsigned int opcode, bcode;
625 die_if_kernel("Break instruction in kernel code", regs);
627 if (get_insn_opcode(regs, &opcode))
631 * There is the ancient bug in the MIPS assemblers that the break
632 * code starts left to bit 16 instead to bit 6 in the opcode.
633 * Gas is bug-compatible, but not always, grrr...
634 * We handle both cases with a simple heuristics. --macro
636 bcode = ((opcode >> 6) & ((1 << 20) - 1));
637 if (bcode < (1 << 10))
641 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
642 * insns, even for break codes that indicate arithmetic failures.
644 * But should we continue the brokenness??? --macro
647 case BRK_OVERFLOW << 10:
648 case BRK_DIVZERO << 10:
649 if (bcode == (BRK_DIVZERO << 10))
650 info.si_code = FPE_INTDIV;
652 info.si_code = FPE_INTOVF;
653 info.si_signo = SIGFPE;
655 info.si_addr = (void __user *) regs->cp0_epc;
656 force_sig_info(SIGFPE, &info, current);
659 force_sig(SIGTRAP, current);
663 asmlinkage void do_tr(struct pt_regs *regs)
665 unsigned int opcode, tcode = 0;
668 die_if_kernel("Trap instruction in kernel code", regs);
670 if (get_insn_opcode(regs, &opcode))
673 /* Immediate versions don't provide a code. */
674 if (!(opcode & OPCODE))
675 tcode = ((opcode >> 6) & ((1 << 10) - 1));
678 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
679 * insns, even for trap codes that indicate arithmetic failures.
681 * But should we continue the brokenness??? --macro
686 if (tcode == BRK_DIVZERO)
687 info.si_code = FPE_INTDIV;
689 info.si_code = FPE_INTOVF;
690 info.si_signo = SIGFPE;
692 info.si_addr = (void __user *) regs->cp0_epc;
693 force_sig_info(SIGFPE, &info, current);
696 force_sig(SIGTRAP, current);
700 asmlinkage void do_ri(struct pt_regs *regs)
702 die_if_kernel("Reserved instruction in kernel code", regs);
705 if (!simulate_llsc(regs))
708 if (!simulate_rdhwr(regs))
711 force_sig(SIGILL, current);
714 asmlinkage void do_cpu(struct pt_regs *regs)
718 die_if_kernel("do_cpu invoked from kernel context!", regs);
720 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
725 if (!simulate_llsc(regs))
728 if (!simulate_rdhwr(regs))
737 if (used_math()) { /* Using the FPU again. */
739 } else { /* First time FPU user. */
747 int sig = fpu_emulator_cop1Handler(regs,
748 ¤t->thread.fpu.soft);
750 force_sig(sig, current);
760 force_sig(SIGILL, current);
763 asmlinkage void do_mdmx(struct pt_regs *regs)
765 force_sig(SIGILL, current);
768 asmlinkage void do_watch(struct pt_regs *regs)
771 * We use the watch exception where available to detect stack
776 panic("Caught WATCH exception - probably caused by stack overflow.");
779 asmlinkage void do_mcheck(struct pt_regs *regs)
784 * Some chips may have other causes of machine check (e.g. SB1
787 panic("Caught Machine Check exception - %scaused by multiple "
788 "matching entries in the TLB.",
789 (regs->cp0_status & ST0_TS) ? "" : "not ");
792 asmlinkage void do_mt(struct pt_regs *regs)
794 die_if_kernel("MIPS MT Thread exception in kernel", regs);
796 force_sig(SIGILL, current);
800 asmlinkage void do_dsp(struct pt_regs *regs)
803 panic("Unexpected DSP exception\n");
805 force_sig(SIGILL, current);
808 asmlinkage void do_reserved(struct pt_regs *regs)
811 * Game over - no way to handle this if it ever occurs. Most probably
812 * caused by a new unknown cpu type or after another deadly
813 * hard/software error.
816 panic("Caught reserved exception %ld - should not happen.",
817 (regs->cp0_cause & 0x7f) >> 2);
820 asmlinkage void do_default_vi(struct pt_regs *regs)
823 panic("Caught unexpected vectored interrupt.");
827 * Some MIPS CPUs can enable/disable for cache parity detection, but do
830 static inline void parity_protection_init(void)
832 switch (current_cpu_data.cputype) {
835 write_c0_ecc(0x80000000);
836 back_to_back_c0_hazard();
837 /* Set the PE bit (bit 31) in the c0_errctl register. */
838 printk(KERN_INFO "Cache parity protection %sabled\n",
839 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
843 /* Clear the DE bit (bit 16) in the c0_status register. */
844 printk(KERN_INFO "Enable cache parity protection for "
845 "MIPS 20KC/25KF CPUs.\n");
846 clear_c0_status(ST0_DE);
853 asmlinkage void cache_parity_error(void)
855 const int field = 2 * sizeof(unsigned long);
856 unsigned int reg_val;
858 /* For the moment, report the problem and hang. */
859 printk("Cache error exception:\n");
860 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
861 reg_val = read_c0_cacheerr();
862 printk("c0_cacheerr == %08x\n", reg_val);
864 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
865 reg_val & (1<<30) ? "secondary" : "primary",
866 reg_val & (1<<31) ? "data" : "insn");
867 printk("Error bits: %s%s%s%s%s%s%s\n",
868 reg_val & (1<<29) ? "ED " : "",
869 reg_val & (1<<28) ? "ET " : "",
870 reg_val & (1<<26) ? "EE " : "",
871 reg_val & (1<<25) ? "EB " : "",
872 reg_val & (1<<24) ? "EI " : "",
873 reg_val & (1<<23) ? "E1 " : "",
874 reg_val & (1<<22) ? "E0 " : "");
875 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
877 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
878 if (reg_val & (1<<22))
879 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
881 if (reg_val & (1<<23))
882 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
885 panic("Can't handle the cache error!");
889 * SDBBP EJTAG debug exception handler.
890 * We skip the instruction and return to the next instruction.
892 void ejtag_exception_handler(struct pt_regs *regs)
894 const int field = 2 * sizeof(unsigned long);
895 unsigned long depc, old_epc;
898 printk("SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
899 depc = read_c0_depc();
900 debug = read_c0_debug();
901 printk("c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
902 if (debug & 0x80000000) {
904 * In branch delay slot.
905 * We cheat a little bit here and use EPC to calculate the
906 * debug return address (DEPC). EPC is restored after the
909 old_epc = regs->cp0_epc;
910 regs->cp0_epc = depc;
911 __compute_return_epc(regs);
912 depc = regs->cp0_epc;
913 regs->cp0_epc = old_epc;
919 printk("\n\n----- Enable EJTAG single stepping ----\n\n");
920 write_c0_debug(debug | 0x100);
925 * NMI exception handler.
927 void nmi_exception_handler(struct pt_regs *regs)
929 printk("NMI taken!!!!\n");
934 #define VECTORSPACING 0x100 /* for EI/VI mode */
937 unsigned long exception_handlers[32];
938 unsigned long vi_handlers[64];
941 * As a side effect of the way this is implemented we're limited
942 * to interrupt handlers in the address range from
943 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
945 void *set_except_vector(int n, void *addr)
947 unsigned long handler = (unsigned long) addr;
948 unsigned long old_handler = exception_handlers[n];
950 exception_handlers[n] = handler;
951 if (n == 0 && cpu_has_divec) {
952 *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
953 (0x03ffffff & (handler >> 2));
954 flush_icache_range(ebase + 0x200, ebase + 0x204);
956 return (void *)old_handler;
959 #ifdef CONFIG_CPU_MIPSR2
961 * Shadow register allocation
965 /* MIPSR2 shadow register sets */
966 struct shadow_registers {
967 spinlock_t sr_lock; /* */
968 int sr_supported; /* Number of shadow register sets supported */
969 int sr_allocated; /* Bitmap of allocated shadow registers */
972 void mips_srs_init(void)
974 #ifdef CONFIG_CPU_MIPSR2_SRS
975 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
976 printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported);
978 shadow_registers.sr_supported = 1;
980 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
981 spin_lock_init(&shadow_registers.sr_lock);
984 int mips_srs_max(void)
986 return shadow_registers.sr_supported;
989 int mips_srs_alloc (void)
991 struct shadow_registers *sr = &shadow_registers;
995 spin_lock_irqsave(&sr->sr_lock, flags);
997 for (set = 0; set < sr->sr_supported; set++) {
998 if ((sr->sr_allocated & (1 << set)) == 0) {
999 sr->sr_allocated |= 1 << set;
1000 spin_unlock_irqrestore(&sr->sr_lock, flags);
1005 /* None available */
1006 spin_unlock_irqrestore(&sr->sr_lock, flags);
1010 void mips_srs_free (int set)
1012 struct shadow_registers *sr = &shadow_registers;
1013 unsigned long flags;
1015 spin_lock_irqsave(&sr->sr_lock, flags);
1016 sr->sr_allocated &= ~(1 << set);
1017 spin_unlock_irqrestore(&sr->sr_lock, flags);
1020 void *set_vi_srs_handler (int n, void *addr, int srs)
1022 unsigned long handler;
1023 unsigned long old_handler = vi_handlers[n];
1027 if (!cpu_has_veic && !cpu_has_vint)
1031 handler = (unsigned long) do_default_vi;
1035 handler = (unsigned long) addr;
1036 vi_handlers[n] = (unsigned long) addr;
1038 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1040 if (srs >= mips_srs_max())
1041 panic("Shadow register set %d not supported", srs);
1044 if (board_bind_eic_interrupt)
1045 board_bind_eic_interrupt (n, srs);
1047 else if (cpu_has_vint) {
1048 /* SRSMap is only defined if shadow sets are implemented */
1049 if (mips_srs_max() > 1)
1050 change_c0_srsmap (0xf << n*4, srs << n*4);
1055 * If no shadow set is selected then use the default handler
1056 * that does normal register saving and a standard interrupt exit
1059 extern char except_vec_vi, except_vec_vi_lui;
1060 extern char except_vec_vi_ori, except_vec_vi_end;
1061 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1062 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1063 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1065 if (handler_len > VECTORSPACING) {
1067 * Sigh... panicing won't help as the console
1068 * is probably not configured :(
1070 panic ("VECTORSPACING too small");
1073 memcpy (b, &except_vec_vi, handler_len);
1074 w = (u32 *)(b + lui_offset);
1075 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1076 w = (u32 *)(b + ori_offset);
1077 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1078 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1082 * In other cases jump directly to the interrupt handler
1084 * It is the handlers responsibility to save registers if required
1085 * (eg hi/lo) and return from the exception using "eret"
1088 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1090 flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1093 return (void *)old_handler;
1096 void *set_vi_handler (int n, void *addr)
1098 return set_vi_srs_handler (n, addr, 0);
1103 * This is used by native signal handling
1105 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
1106 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
1108 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
1109 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1111 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1112 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1114 static inline void signal_init(void)
1117 save_fp_context = _save_fp_context;
1118 restore_fp_context = _restore_fp_context;
1120 save_fp_context = fpu_emulator_save_context;
1121 restore_fp_context = fpu_emulator_restore_context;
1125 #ifdef CONFIG_MIPS32_COMPAT
1128 * This is used by 32-bit signal stuff on the 64-bit kernel
1130 asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
1131 asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
1133 extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
1134 extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
1136 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
1137 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
1139 static inline void signal32_init(void)
1142 save_fp_context32 = _save_fp_context32;
1143 restore_fp_context32 = _restore_fp_context32;
1145 save_fp_context32 = fpu_emulator_save_context32;
1146 restore_fp_context32 = fpu_emulator_restore_context32;
1151 extern void cpu_cache_init(void);
1152 extern void tlb_init(void);
1153 extern void flush_tlb_handlers(void);
1155 void __init per_cpu_trap_init(void)
1157 unsigned int cpu = smp_processor_id();
1158 unsigned int status_set = ST0_CU0;
1161 * Disable coprocessors and select 32-bit or 64-bit addressing
1162 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1163 * flag that some firmware may have left set and the TS bit (for
1164 * IP27). Set XX for ISA IV code to work.
1167 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1169 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1170 status_set |= ST0_XX;
1171 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1175 set_c0_status(ST0_MX);
1177 #ifdef CONFIG_CPU_MIPSR2
1178 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1182 * Interrupt handling.
1184 if (cpu_has_veic || cpu_has_vint) {
1185 write_c0_ebase (ebase);
1186 /* Setting vector spacing enables EI/VI mode */
1187 change_c0_intctl (0x3e0, VECTORSPACING);
1189 if (cpu_has_divec) {
1190 if (cpu_has_mipsmt) {
1191 unsigned int vpflags = dvpe();
1192 set_c0_cause(CAUSEF_IV);
1195 set_c0_cause(CAUSEF_IV);
1198 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1199 TLBMISS_HANDLER_SETUP();
1201 atomic_inc(&init_mm.mm_count);
1202 current->active_mm = &init_mm;
1203 BUG_ON(current->mm);
1204 enter_lazy_tlb(&init_mm, current);
1210 /* Install CPU exception handler */
1211 void __init set_handler (unsigned long offset, void *addr, unsigned long size)
1213 memcpy((void *)(ebase + offset), addr, size);
1214 flush_icache_range(ebase + offset, ebase + offset + size);
1217 /* Install uncached CPU exception handler */
1218 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
1221 unsigned long uncached_ebase = KSEG1ADDR(ebase);
1224 unsigned long uncached_ebase = TO_UNCAC(ebase);
1227 memcpy((void *)(uncached_ebase + offset), addr, size);
1230 void __init trap_init(void)
1232 extern char except_vec3_generic, except_vec3_r4000;
1233 extern char except_vec4;
1236 if (cpu_has_veic || cpu_has_vint)
1237 ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
1241 #ifdef CONFIG_CPU_MIPSR2
1245 per_cpu_trap_init();
1248 * Copy the generic exception handlers to their final destination.
1249 * This will be overriden later as suitable for a particular
1252 set_handler(0x180, &except_vec3_generic, 0x80);
1255 * Setup default vectors
1257 for (i = 0; i <= 31; i++)
1258 set_except_vector(i, handle_reserved);
1261 * Copy the EJTAG debug exception vector handler code to it's final
1264 if (cpu_has_ejtag && board_ejtag_handler_setup)
1265 board_ejtag_handler_setup ();
1268 * Only some CPUs have the watch exceptions.
1271 set_except_vector(23, handle_watch);
1274 * Initialise interrupt handlers
1276 if (cpu_has_veic || cpu_has_vint) {
1277 int nvec = cpu_has_veic ? 64 : 8;
1278 for (i = 0; i < nvec; i++)
1279 set_vi_handler (i, NULL);
1281 else if (cpu_has_divec)
1282 set_handler(0x200, &except_vec4, 0x8);
1285 * Some CPUs can enable/disable for cache parity detection, but does
1286 * it different ways.
1288 parity_protection_init();
1291 * The Data Bus Errors / Instruction Bus Errors are signaled
1292 * by external hardware. Therefore these two exceptions
1293 * may have board specific handlers.
1298 set_except_vector(1, handle_tlbm);
1299 set_except_vector(2, handle_tlbl);
1300 set_except_vector(3, handle_tlbs);
1302 set_except_vector(4, handle_adel);
1303 set_except_vector(5, handle_ades);
1305 set_except_vector(6, handle_ibe);
1306 set_except_vector(7, handle_dbe);
1308 set_except_vector(8, handle_sys);
1309 set_except_vector(9, handle_bp);
1310 set_except_vector(10, handle_ri);
1311 set_except_vector(11, handle_cpu);
1312 set_except_vector(12, handle_ov);
1313 set_except_vector(13, handle_tr);
1315 if (current_cpu_data.cputype == CPU_R6000 ||
1316 current_cpu_data.cputype == CPU_R6000A) {
1318 * The R6000 is the only R-series CPU that features a machine
1319 * check exception (similar to the R4000 cache error) and
1320 * unaligned ldc1/sdc1 exception. The handlers have not been
1321 * written yet. Well, anyway there is no R6000 machine on the
1322 * current list of targets for Linux/MIPS.
1323 * (Duh, crap, there is someone with a triple R6k machine)
1325 //set_except_vector(14, handle_mc);
1326 //set_except_vector(15, handle_ndc);
1330 if (board_nmi_handler_setup)
1331 board_nmi_handler_setup();
1333 if (cpu_has_fpu && !cpu_has_nofpuex)
1334 set_except_vector(15, handle_fpe);
1336 set_except_vector(22, handle_mdmx);
1339 set_except_vector(24, handle_mcheck);
1342 set_except_vector(25, handle_mt);
1345 set_except_vector(26, handle_dsp);
1348 /* Special exception: R4[04]00 uses also the divec space. */
1349 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1350 else if (cpu_has_4kex)
1351 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1353 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1356 #ifdef CONFIG_MIPS32_COMPAT
1360 flush_icache_range(ebase, ebase + 0x400);
1361 flush_tlb_handlers();