1 /* arch/sparc64/kernel/traps.c
3 * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
8 * I like traps on v9, :))))
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/smp.h>
17 #include <linux/init.h>
18 #include <linux/kdebug.h>
21 #include <asm/delay.h>
22 #include <asm/system.h>
23 #include <asm/ptrace.h>
24 #include <asm/oplib.h>
26 #include <asm/pgtable.h>
27 #include <asm/unistd.h>
28 #include <asm/uaccess.h>
29 #include <asm/fpumacro.h>
32 #include <asm/estate.h>
33 #include <asm/chafsr.h>
34 #include <asm/sfafsr.h>
35 #include <asm/psrcompat.h>
36 #include <asm/processor.h>
37 #include <asm/timer.h>
44 /* When an irrecoverable trap occurs at tl > 0, the trap entry
45 * code logs the trap state registers at every level in the trap
46 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
59 static void dump_tl1_traplog(struct tl1_traplog *p)
63 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
64 "dumping track stack.\n", p->tl);
66 limit = (tlb_type == hypervisor) ? 2 : 4;
67 for (i = 0; i < limit; i++) {
69 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
70 "TNPC[%016lx] TT[%lx]\n",
72 p->trapstack[i].tstate, p->trapstack[i].tpc,
73 p->trapstack[i].tnpc, p->trapstack[i].tt);
74 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
78 void bad_trap(struct pt_regs *regs, long lvl)
83 if (notify_die(DIE_TRAP, "bad trap", regs,
84 0, lvl, SIGTRAP) == NOTIFY_STOP)
88 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
89 die_if_kernel(buffer, regs);
93 if (regs->tstate & TSTATE_PRIV) {
94 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
95 die_if_kernel(buffer, regs);
97 if (test_thread_flag(TIF_32BIT)) {
98 regs->tpc &= 0xffffffff;
99 regs->tnpc &= 0xffffffff;
101 info.si_signo = SIGILL;
103 info.si_code = ILL_ILLTRP;
104 info.si_addr = (void __user *)regs->tpc;
105 info.si_trapno = lvl;
106 force_sig_info(SIGILL, &info, current);
109 void bad_trap_tl1(struct pt_regs *regs, long lvl)
113 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
114 0, lvl, SIGTRAP) == NOTIFY_STOP)
117 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
119 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
120 die_if_kernel (buffer, regs);
123 #ifdef CONFIG_DEBUG_BUGVERBOSE
124 void do_BUG(const char *file, int line)
127 printk("kernel BUG at %s:%d!\n", file, line);
131 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
135 if (notify_die(DIE_TRAP, "instruction access exception", regs,
136 0, 0x8, SIGTRAP) == NOTIFY_STOP)
139 if (regs->tstate & TSTATE_PRIV) {
140 printk("spitfire_insn_access_exception: SFSR[%016lx] "
141 "SFAR[%016lx], going.\n", sfsr, sfar);
142 die_if_kernel("Iax", regs);
144 if (test_thread_flag(TIF_32BIT)) {
145 regs->tpc &= 0xffffffff;
146 regs->tnpc &= 0xffffffff;
148 info.si_signo = SIGSEGV;
150 info.si_code = SEGV_MAPERR;
151 info.si_addr = (void __user *)regs->tpc;
153 force_sig_info(SIGSEGV, &info, current);
156 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
158 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
159 0, 0x8, SIGTRAP) == NOTIFY_STOP)
162 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
163 spitfire_insn_access_exception(regs, sfsr, sfar);
166 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
168 unsigned short type = (type_ctx >> 16);
169 unsigned short ctx = (type_ctx & 0xffff);
172 if (notify_die(DIE_TRAP, "instruction access exception", regs,
173 0, 0x8, SIGTRAP) == NOTIFY_STOP)
176 if (regs->tstate & TSTATE_PRIV) {
177 printk("sun4v_insn_access_exception: ADDR[%016lx] "
178 "CTX[%04x] TYPE[%04x], going.\n",
180 die_if_kernel("Iax", regs);
183 if (test_thread_flag(TIF_32BIT)) {
184 regs->tpc &= 0xffffffff;
185 regs->tnpc &= 0xffffffff;
187 info.si_signo = SIGSEGV;
189 info.si_code = SEGV_MAPERR;
190 info.si_addr = (void __user *) addr;
192 force_sig_info(SIGSEGV, &info, current);
195 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
197 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
198 0, 0x8, SIGTRAP) == NOTIFY_STOP)
201 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
202 sun4v_insn_access_exception(regs, addr, type_ctx);
205 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
209 if (notify_die(DIE_TRAP, "data access exception", regs,
210 0, 0x30, SIGTRAP) == NOTIFY_STOP)
213 if (regs->tstate & TSTATE_PRIV) {
214 /* Test if this comes from uaccess places. */
215 const struct exception_table_entry *entry;
217 entry = search_exception_tables(regs->tpc);
219 /* Ouch, somebody is trying VM hole tricks on us... */
220 #ifdef DEBUG_EXCEPTIONS
221 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
222 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
223 regs->tpc, entry->fixup);
225 regs->tpc = entry->fixup;
226 regs->tnpc = regs->tpc + 4;
230 printk("spitfire_data_access_exception: SFSR[%016lx] "
231 "SFAR[%016lx], going.\n", sfsr, sfar);
232 die_if_kernel("Dax", regs);
235 info.si_signo = SIGSEGV;
237 info.si_code = SEGV_MAPERR;
238 info.si_addr = (void __user *)sfar;
240 force_sig_info(SIGSEGV, &info, current);
243 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
245 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
246 0, 0x30, SIGTRAP) == NOTIFY_STOP)
249 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
250 spitfire_data_access_exception(regs, sfsr, sfar);
253 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
255 unsigned short type = (type_ctx >> 16);
256 unsigned short ctx = (type_ctx & 0xffff);
259 if (notify_die(DIE_TRAP, "data access exception", regs,
260 0, 0x8, SIGTRAP) == NOTIFY_STOP)
263 if (regs->tstate & TSTATE_PRIV) {
264 printk("sun4v_data_access_exception: ADDR[%016lx] "
265 "CTX[%04x] TYPE[%04x], going.\n",
267 die_if_kernel("Dax", regs);
270 if (test_thread_flag(TIF_32BIT)) {
271 regs->tpc &= 0xffffffff;
272 regs->tnpc &= 0xffffffff;
274 info.si_signo = SIGSEGV;
276 info.si_code = SEGV_MAPERR;
277 info.si_addr = (void __user *) addr;
279 force_sig_info(SIGSEGV, &info, current);
282 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
284 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
285 0, 0x8, SIGTRAP) == NOTIFY_STOP)
288 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
289 sun4v_data_access_exception(regs, addr, type_ctx);
293 /* This is really pathetic... */
294 extern volatile int pci_poke_in_progress;
295 extern volatile int pci_poke_cpu;
296 extern volatile int pci_poke_faulted;
299 /* When access exceptions happen, we must do this. */
300 static void spitfire_clean_and_reenable_l1_caches(void)
304 if (tlb_type != spitfire)
308 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
309 spitfire_put_icache_tag(va, 0x0);
310 spitfire_put_dcache_tag(va, 0x0);
313 /* Re-enable in LSU. */
314 __asm__ __volatile__("flush %%g6\n\t"
316 "stxa %0, [%%g0] %1\n\t"
319 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
320 LSU_CONTROL_IM | LSU_CONTROL_DM),
321 "i" (ASI_LSU_CONTROL)
325 static void spitfire_enable_estate_errors(void)
327 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
330 : "r" (ESTATE_ERR_ALL),
331 "i" (ASI_ESTATE_ERROR_EN));
334 static char ecc_syndrome_table[] = {
335 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
336 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
337 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
338 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
339 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
340 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
341 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
342 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
343 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
344 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
345 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
346 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
347 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
348 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
349 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
350 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
351 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
352 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
353 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
354 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
355 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
356 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
357 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
358 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
359 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
360 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
361 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
362 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
363 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
364 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
365 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
366 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
369 static char *syndrome_unknown = "<Unknown>";
371 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
373 unsigned short scode;
374 char memmod_str[64], *p;
377 scode = ecc_syndrome_table[udbl & 0xff];
378 if (prom_getunumber(scode, afar,
379 memmod_str, sizeof(memmod_str)) == -1)
380 p = syndrome_unknown;
383 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
384 "Memory Module \"%s\"\n",
385 smp_processor_id(), scode, p);
389 scode = ecc_syndrome_table[udbh & 0xff];
390 if (prom_getunumber(scode, afar,
391 memmod_str, sizeof(memmod_str)) == -1)
392 p = syndrome_unknown;
395 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
396 "Memory Module \"%s\"\n",
397 smp_processor_id(), scode, p);
402 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
405 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
406 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
407 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
409 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
411 /* We always log it, even if someone is listening for this
414 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
415 0, TRAP_TYPE_CEE, SIGTRAP);
417 /* The Correctable ECC Error trap does not disable I/D caches. So
418 * we only have to restore the ESTATE Error Enable register.
420 spitfire_enable_estate_errors();
423 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
427 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
428 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
429 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
431 /* XXX add more human friendly logging of the error status
432 * XXX as is implemented for cheetah
435 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
437 /* We always log it, even if someone is listening for this
440 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
443 if (regs->tstate & TSTATE_PRIV) {
445 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
446 die_if_kernel("UE", regs);
449 /* XXX need more intelligent processing here, such as is implemented
450 * XXX for cheetah errors, in fact if the E-cache still holds the
451 * XXX line with bad parity this will loop
454 spitfire_clean_and_reenable_l1_caches();
455 spitfire_enable_estate_errors();
457 if (test_thread_flag(TIF_32BIT)) {
458 regs->tpc &= 0xffffffff;
459 regs->tnpc &= 0xffffffff;
461 info.si_signo = SIGBUS;
463 info.si_code = BUS_OBJERR;
464 info.si_addr = (void *)0;
466 force_sig_info(SIGBUS, &info, current);
469 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
471 unsigned long afsr, tt, udbh, udbl;
474 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
475 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
476 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
477 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
478 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
481 if (tt == TRAP_TYPE_DAE &&
482 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
483 spitfire_clean_and_reenable_l1_caches();
484 spitfire_enable_estate_errors();
486 pci_poke_faulted = 1;
487 regs->tnpc = regs->tpc + 4;
492 if (afsr & SFAFSR_UE)
493 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
495 if (tt == TRAP_TYPE_CEE) {
496 /* Handle the case where we took a CEE trap, but ACK'd
497 * only the UE state in the UDB error registers.
499 if (afsr & SFAFSR_UE) {
500 if (udbh & UDBE_CE) {
501 __asm__ __volatile__(
502 "stxa %0, [%1] %2\n\t"
505 : "r" (udbh & UDBE_CE),
506 "r" (0x0), "i" (ASI_UDB_ERROR_W));
508 if (udbl & UDBE_CE) {
509 __asm__ __volatile__(
510 "stxa %0, [%1] %2\n\t"
513 : "r" (udbl & UDBE_CE),
514 "r" (0x18), "i" (ASI_UDB_ERROR_W));
518 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
522 int cheetah_pcache_forced_on;
524 void cheetah_enable_pcache(void)
528 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
531 __asm__ __volatile__("ldxa [%%g0] %1, %0"
533 : "i" (ASI_DCU_CONTROL_REG));
534 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
535 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
538 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
541 /* Cheetah error trap handling. */
542 static unsigned long ecache_flush_physbase;
543 static unsigned long ecache_flush_linesize;
544 static unsigned long ecache_flush_size;
546 /* This table is ordered in priority of errors and matches the
547 * AFAR overwrite policy as well.
550 struct afsr_error_table {
555 static const char CHAFSR_PERR_msg[] =
556 "System interface protocol error";
557 static const char CHAFSR_IERR_msg[] =
558 "Internal processor error";
559 static const char CHAFSR_ISAP_msg[] =
560 "System request parity error on incoming addresss";
561 static const char CHAFSR_UCU_msg[] =
562 "Uncorrectable E-cache ECC error for ifetch/data";
563 static const char CHAFSR_UCC_msg[] =
564 "SW Correctable E-cache ECC error for ifetch/data";
565 static const char CHAFSR_UE_msg[] =
566 "Uncorrectable system bus data ECC error for read";
567 static const char CHAFSR_EDU_msg[] =
568 "Uncorrectable E-cache ECC error for stmerge/blkld";
569 static const char CHAFSR_EMU_msg[] =
570 "Uncorrectable system bus MTAG error";
571 static const char CHAFSR_WDU_msg[] =
572 "Uncorrectable E-cache ECC error for writeback";
573 static const char CHAFSR_CPU_msg[] =
574 "Uncorrectable ECC error for copyout";
575 static const char CHAFSR_CE_msg[] =
576 "HW corrected system bus data ECC error for read";
577 static const char CHAFSR_EDC_msg[] =
578 "HW corrected E-cache ECC error for stmerge/blkld";
579 static const char CHAFSR_EMC_msg[] =
580 "HW corrected system bus MTAG ECC error";
581 static const char CHAFSR_WDC_msg[] =
582 "HW corrected E-cache ECC error for writeback";
583 static const char CHAFSR_CPC_msg[] =
584 "HW corrected ECC error for copyout";
585 static const char CHAFSR_TO_msg[] =
586 "Unmapped error from system bus";
587 static const char CHAFSR_BERR_msg[] =
588 "Bus error response from system bus";
589 static const char CHAFSR_IVC_msg[] =
590 "HW corrected system bus data ECC error for ivec read";
591 static const char CHAFSR_IVU_msg[] =
592 "Uncorrectable system bus data ECC error for ivec read";
593 static struct afsr_error_table __cheetah_error_table[] = {
594 { CHAFSR_PERR, CHAFSR_PERR_msg },
595 { CHAFSR_IERR, CHAFSR_IERR_msg },
596 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
597 { CHAFSR_UCU, CHAFSR_UCU_msg },
598 { CHAFSR_UCC, CHAFSR_UCC_msg },
599 { CHAFSR_UE, CHAFSR_UE_msg },
600 { CHAFSR_EDU, CHAFSR_EDU_msg },
601 { CHAFSR_EMU, CHAFSR_EMU_msg },
602 { CHAFSR_WDU, CHAFSR_WDU_msg },
603 { CHAFSR_CPU, CHAFSR_CPU_msg },
604 { CHAFSR_CE, CHAFSR_CE_msg },
605 { CHAFSR_EDC, CHAFSR_EDC_msg },
606 { CHAFSR_EMC, CHAFSR_EMC_msg },
607 { CHAFSR_WDC, CHAFSR_WDC_msg },
608 { CHAFSR_CPC, CHAFSR_CPC_msg },
609 { CHAFSR_TO, CHAFSR_TO_msg },
610 { CHAFSR_BERR, CHAFSR_BERR_msg },
611 /* These two do not update the AFAR. */
612 { CHAFSR_IVC, CHAFSR_IVC_msg },
613 { CHAFSR_IVU, CHAFSR_IVU_msg },
616 static const char CHPAFSR_DTO_msg[] =
617 "System bus unmapped error for prefetch/storequeue-read";
618 static const char CHPAFSR_DBERR_msg[] =
619 "System bus error for prefetch/storequeue-read";
620 static const char CHPAFSR_THCE_msg[] =
621 "Hardware corrected E-cache Tag ECC error";
622 static const char CHPAFSR_TSCE_msg[] =
623 "SW handled correctable E-cache Tag ECC error";
624 static const char CHPAFSR_TUE_msg[] =
625 "Uncorrectable E-cache Tag ECC error";
626 static const char CHPAFSR_DUE_msg[] =
627 "System bus uncorrectable data ECC error due to prefetch/store-fill";
628 static struct afsr_error_table __cheetah_plus_error_table[] = {
629 { CHAFSR_PERR, CHAFSR_PERR_msg },
630 { CHAFSR_IERR, CHAFSR_IERR_msg },
631 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
632 { CHAFSR_UCU, CHAFSR_UCU_msg },
633 { CHAFSR_UCC, CHAFSR_UCC_msg },
634 { CHAFSR_UE, CHAFSR_UE_msg },
635 { CHAFSR_EDU, CHAFSR_EDU_msg },
636 { CHAFSR_EMU, CHAFSR_EMU_msg },
637 { CHAFSR_WDU, CHAFSR_WDU_msg },
638 { CHAFSR_CPU, CHAFSR_CPU_msg },
639 { CHAFSR_CE, CHAFSR_CE_msg },
640 { CHAFSR_EDC, CHAFSR_EDC_msg },
641 { CHAFSR_EMC, CHAFSR_EMC_msg },
642 { CHAFSR_WDC, CHAFSR_WDC_msg },
643 { CHAFSR_CPC, CHAFSR_CPC_msg },
644 { CHAFSR_TO, CHAFSR_TO_msg },
645 { CHAFSR_BERR, CHAFSR_BERR_msg },
646 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
647 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
648 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
649 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
650 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
651 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
652 /* These two do not update the AFAR. */
653 { CHAFSR_IVC, CHAFSR_IVC_msg },
654 { CHAFSR_IVU, CHAFSR_IVU_msg },
657 static const char JPAFSR_JETO_msg[] =
658 "System interface protocol error, hw timeout caused";
659 static const char JPAFSR_SCE_msg[] =
660 "Parity error on system snoop results";
661 static const char JPAFSR_JEIC_msg[] =
662 "System interface protocol error, illegal command detected";
663 static const char JPAFSR_JEIT_msg[] =
664 "System interface protocol error, illegal ADTYPE detected";
665 static const char JPAFSR_OM_msg[] =
666 "Out of range memory error has occurred";
667 static const char JPAFSR_ETP_msg[] =
668 "Parity error on L2 cache tag SRAM";
669 static const char JPAFSR_UMS_msg[] =
670 "Error due to unsupported store";
671 static const char JPAFSR_RUE_msg[] =
672 "Uncorrectable ECC error from remote cache/memory";
673 static const char JPAFSR_RCE_msg[] =
674 "Correctable ECC error from remote cache/memory";
675 static const char JPAFSR_BP_msg[] =
676 "JBUS parity error on returned read data";
677 static const char JPAFSR_WBP_msg[] =
678 "JBUS parity error on data for writeback or block store";
679 static const char JPAFSR_FRC_msg[] =
680 "Foreign read to DRAM incurring correctable ECC error";
681 static const char JPAFSR_FRU_msg[] =
682 "Foreign read to DRAM incurring uncorrectable ECC error";
683 static struct afsr_error_table __jalapeno_error_table[] = {
684 { JPAFSR_JETO, JPAFSR_JETO_msg },
685 { JPAFSR_SCE, JPAFSR_SCE_msg },
686 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
687 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
688 { CHAFSR_PERR, CHAFSR_PERR_msg },
689 { CHAFSR_IERR, CHAFSR_IERR_msg },
690 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
691 { CHAFSR_UCU, CHAFSR_UCU_msg },
692 { CHAFSR_UCC, CHAFSR_UCC_msg },
693 { CHAFSR_UE, CHAFSR_UE_msg },
694 { CHAFSR_EDU, CHAFSR_EDU_msg },
695 { JPAFSR_OM, JPAFSR_OM_msg },
696 { CHAFSR_WDU, CHAFSR_WDU_msg },
697 { CHAFSR_CPU, CHAFSR_CPU_msg },
698 { CHAFSR_CE, CHAFSR_CE_msg },
699 { CHAFSR_EDC, CHAFSR_EDC_msg },
700 { JPAFSR_ETP, JPAFSR_ETP_msg },
701 { CHAFSR_WDC, CHAFSR_WDC_msg },
702 { CHAFSR_CPC, CHAFSR_CPC_msg },
703 { CHAFSR_TO, CHAFSR_TO_msg },
704 { CHAFSR_BERR, CHAFSR_BERR_msg },
705 { JPAFSR_UMS, JPAFSR_UMS_msg },
706 { JPAFSR_RUE, JPAFSR_RUE_msg },
707 { JPAFSR_RCE, JPAFSR_RCE_msg },
708 { JPAFSR_BP, JPAFSR_BP_msg },
709 { JPAFSR_WBP, JPAFSR_WBP_msg },
710 { JPAFSR_FRC, JPAFSR_FRC_msg },
711 { JPAFSR_FRU, JPAFSR_FRU_msg },
712 /* These two do not update the AFAR. */
713 { CHAFSR_IVU, CHAFSR_IVU_msg },
716 static struct afsr_error_table *cheetah_error_table;
717 static unsigned long cheetah_afsr_errors;
719 struct cheetah_err_info *cheetah_error_log;
721 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
723 struct cheetah_err_info *p;
724 int cpu = smp_processor_id();
726 if (!cheetah_error_log)
729 p = cheetah_error_log + (cpu * 2);
730 if ((afsr & CHAFSR_TL1) != 0UL)
736 extern unsigned int tl0_icpe[], tl1_icpe[];
737 extern unsigned int tl0_dcpe[], tl1_dcpe[];
738 extern unsigned int tl0_fecc[], tl1_fecc[];
739 extern unsigned int tl0_cee[], tl1_cee[];
740 extern unsigned int tl0_iae[], tl1_iae[];
741 extern unsigned int tl0_dae[], tl1_dae[];
742 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
743 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
744 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
745 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
746 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
748 void __init cheetah_ecache_flush_init(void)
750 unsigned long largest_size, smallest_linesize, order, ver;
753 /* Scan all cpu device tree nodes, note two values:
754 * 1) largest E-cache size
755 * 2) smallest E-cache line size
758 smallest_linesize = ~0UL;
760 for (i = 0; i < NR_CPUS; i++) {
763 val = cpu_data(i).ecache_size;
767 if (val > largest_size)
770 val = cpu_data(i).ecache_line_size;
771 if (val < smallest_linesize)
772 smallest_linesize = val;
776 if (largest_size == 0UL || smallest_linesize == ~0UL) {
777 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
782 ecache_flush_size = (2 * largest_size);
783 ecache_flush_linesize = smallest_linesize;
785 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
787 if (ecache_flush_physbase == ~0UL) {
788 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
789 "contiguous physical memory.\n",
794 /* Now allocate error trap reporting scoreboard. */
795 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
796 for (order = 0; order < MAX_ORDER; order++) {
797 if ((PAGE_SIZE << order) >= sz)
800 cheetah_error_log = (struct cheetah_err_info *)
801 __get_free_pages(GFP_KERNEL, order);
802 if (!cheetah_error_log) {
803 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
804 "error logging scoreboard (%d bytes).\n", sz);
807 memset(cheetah_error_log, 0, PAGE_SIZE << order);
809 /* Mark all AFSRs as invalid so that the trap handler will
810 * log new new information there.
812 for (i = 0; i < 2 * NR_CPUS; i++)
813 cheetah_error_log[i].afsr = CHAFSR_INVALID;
815 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
816 if ((ver >> 32) == __JALAPENO_ID ||
817 (ver >> 32) == __SERRANO_ID) {
818 cheetah_error_table = &__jalapeno_error_table[0];
819 cheetah_afsr_errors = JPAFSR_ERRORS;
820 } else if ((ver >> 32) == 0x003e0015) {
821 cheetah_error_table = &__cheetah_plus_error_table[0];
822 cheetah_afsr_errors = CHPAFSR_ERRORS;
824 cheetah_error_table = &__cheetah_error_table[0];
825 cheetah_afsr_errors = CHAFSR_ERRORS;
828 /* Now patch trap tables. */
829 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
830 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
831 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
832 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
833 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
834 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
835 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
836 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
837 if (tlb_type == cheetah_plus) {
838 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
839 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
840 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
841 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
846 static void cheetah_flush_ecache(void)
848 unsigned long flush_base = ecache_flush_physbase;
849 unsigned long flush_linesize = ecache_flush_linesize;
850 unsigned long flush_size = ecache_flush_size;
852 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
853 " bne,pt %%xcc, 1b\n\t"
854 " ldxa [%2 + %0] %3, %%g0\n\t"
856 : "0" (flush_size), "r" (flush_base),
857 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
860 static void cheetah_flush_ecache_line(unsigned long physaddr)
864 physaddr &= ~(8UL - 1UL);
865 physaddr = (ecache_flush_physbase +
866 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
867 alias = physaddr + (ecache_flush_size >> 1UL);
868 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
869 "ldxa [%1] %2, %%g0\n\t"
872 : "r" (physaddr), "r" (alias),
873 "i" (ASI_PHYS_USE_EC));
876 /* Unfortunately, the diagnostic access to the I-cache tags we need to
877 * use to clear the thing interferes with I-cache coherency transactions.
879 * So we must only flush the I-cache when it is disabled.
881 static void __cheetah_flush_icache(void)
883 unsigned int icache_size, icache_line_size;
886 icache_size = local_cpu_data().icache_size;
887 icache_line_size = local_cpu_data().icache_line_size;
889 /* Clear the valid bits in all the tags. */
890 for (addr = 0; addr < icache_size; addr += icache_line_size) {
891 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
894 : "r" (addr | (2 << 3)),
899 static void cheetah_flush_icache(void)
901 unsigned long dcu_save;
903 /* Save current DCU, disable I-cache. */
904 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
905 "or %0, %2, %%g1\n\t"
906 "stxa %%g1, [%%g0] %1\n\t"
909 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
912 __cheetah_flush_icache();
914 /* Restore DCU register */
915 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
918 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
921 static void cheetah_flush_dcache(void)
923 unsigned int dcache_size, dcache_line_size;
926 dcache_size = local_cpu_data().dcache_size;
927 dcache_line_size = local_cpu_data().dcache_line_size;
929 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
930 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
933 : "r" (addr), "i" (ASI_DCACHE_TAG));
937 /* In order to make the even parity correct we must do two things.
938 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
939 * Next, we clear out all 32-bytes of data for that line. Data of
940 * all-zero + tag parity value of zero == correct parity.
942 static void cheetah_plus_zap_dcache_parity(void)
944 unsigned int dcache_size, dcache_line_size;
947 dcache_size = local_cpu_data().dcache_size;
948 dcache_line_size = local_cpu_data().dcache_line_size;
950 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
951 unsigned long tag = (addr >> 14);
954 __asm__ __volatile__("membar #Sync\n\t"
955 "stxa %0, [%1] %2\n\t"
958 : "r" (tag), "r" (addr),
959 "i" (ASI_DCACHE_UTAG));
960 for (line = addr; line < addr + dcache_line_size; line += 8)
961 __asm__ __volatile__("membar #Sync\n\t"
962 "stxa %%g0, [%0] %1\n\t"
966 "i" (ASI_DCACHE_DATA));
970 /* Conversion tables used to frob Cheetah AFSR syndrome values into
971 * something palatable to the memory controller driver get_unumber
995 static unsigned char cheetah_ecc_syntab[] = {
996 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
997 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
998 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
999 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1000 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1001 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1002 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1003 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1004 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1005 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1006 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1007 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1008 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1009 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1010 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1011 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1012 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1013 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1014 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1015 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1016 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1017 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1018 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1019 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1020 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1021 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1022 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1023 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1024 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1025 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1026 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1027 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1029 static unsigned char cheetah_mtag_syntab[] = {
1040 /* Return the highest priority error conditon mentioned. */
1041 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1043 unsigned long tmp = 0;
1046 for (i = 0; cheetah_error_table[i].mask; i++) {
1047 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1053 static const char *cheetah_get_string(unsigned long bit)
1057 for (i = 0; cheetah_error_table[i].mask; i++) {
1058 if ((bit & cheetah_error_table[i].mask) != 0UL)
1059 return cheetah_error_table[i].name;
1064 extern int chmc_getunumber(int, unsigned long, char *, int);
1066 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1067 unsigned long afsr, unsigned long afar, int recoverable)
1069 unsigned long hipri;
1072 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1073 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1075 (afsr & CHAFSR_TL1) ? 1 : 0);
1076 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1077 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1078 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1079 printk("%s" "ERROR(%d): ",
1080 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1081 printk("TPC<%pS>\n", (void *) regs->tpc);
1082 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1083 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1084 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1085 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1086 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1087 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1088 hipri = cheetah_get_hipri(afsr);
1089 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1090 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1091 hipri, cheetah_get_string(hipri));
1093 /* Try to get unumber if relevant. */
1094 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1095 CHAFSR_CPC | CHAFSR_CPU | \
1096 CHAFSR_UE | CHAFSR_CE | \
1097 CHAFSR_EDC | CHAFSR_EDU | \
1098 CHAFSR_UCC | CHAFSR_UCU | \
1099 CHAFSR_WDU | CHAFSR_WDC)
1100 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1101 if (afsr & ESYND_ERRORS) {
1105 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1106 syndrome = cheetah_ecc_syntab[syndrome];
1107 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1109 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1110 (recoverable ? KERN_WARNING : KERN_CRIT),
1111 smp_processor_id(), unum);
1112 } else if (afsr & MSYND_ERRORS) {
1116 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1117 syndrome = cheetah_mtag_syntab[syndrome];
1118 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1120 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1121 (recoverable ? KERN_WARNING : KERN_CRIT),
1122 smp_processor_id(), unum);
1125 /* Now dump the cache snapshots. */
1126 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1128 (int) info->dcache_index,
1132 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 info->dcache_data[0],
1135 info->dcache_data[1],
1136 info->dcache_data[2],
1137 info->dcache_data[3]);
1138 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1139 "u[%016lx] l[%016lx]\n",
1140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1141 (int) info->icache_index,
1146 info->icache_lower);
1147 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1148 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1149 info->icache_data[0],
1150 info->icache_data[1],
1151 info->icache_data[2],
1152 info->icache_data[3]);
1153 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1154 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1155 info->icache_data[4],
1156 info->icache_data[5],
1157 info->icache_data[6],
1158 info->icache_data[7]);
1159 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1160 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1161 (int) info->ecache_index, info->ecache_tag);
1162 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1164 info->ecache_data[0],
1165 info->ecache_data[1],
1166 info->ecache_data[2],
1167 info->ecache_data[3]);
1169 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1170 while (afsr != 0UL) {
1171 unsigned long bit = cheetah_get_hipri(afsr);
1173 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1174 (recoverable ? KERN_WARNING : KERN_CRIT),
1175 bit, cheetah_get_string(bit));
1181 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1184 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1186 unsigned long afsr, afar;
1189 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1192 if ((afsr & cheetah_afsr_errors) != 0) {
1194 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1202 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1204 : : "r" (afsr), "i" (ASI_AFSR));
1209 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1211 struct cheetah_err_info local_snapshot, *p;
1215 cheetah_flush_ecache();
1217 p = cheetah_get_error_log(afsr);
1219 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1221 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1222 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1226 /* Grab snapshot of logged error. */
1227 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1229 /* If the current trap snapshot does not match what the
1230 * trap handler passed along into our args, big trouble.
1231 * In such a case, mark the local copy as invalid.
1233 * Else, it matches and we mark the afsr in the non-local
1234 * copy as invalid so we may log new error traps there.
1236 if (p->afsr != afsr || p->afar != afar)
1237 local_snapshot.afsr = CHAFSR_INVALID;
1239 p->afsr = CHAFSR_INVALID;
1241 cheetah_flush_icache();
1242 cheetah_flush_dcache();
1244 /* Re-enable I-cache/D-cache */
1245 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1246 "or %%g1, %1, %%g1\n\t"
1247 "stxa %%g1, [%%g0] %0\n\t"
1250 : "i" (ASI_DCU_CONTROL_REG),
1251 "i" (DCU_DC | DCU_IC)
1254 /* Re-enable error reporting */
1255 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1256 "or %%g1, %1, %%g1\n\t"
1257 "stxa %%g1, [%%g0] %0\n\t"
1260 : "i" (ASI_ESTATE_ERROR_EN),
1261 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1264 /* Decide if we can continue after handling this trap and
1265 * logging the error.
1268 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1271 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1272 * error was logged while we had error reporting traps disabled.
1274 if (cheetah_recheck_errors(&local_snapshot)) {
1275 unsigned long new_afsr = local_snapshot.afsr;
1277 /* If we got a new asynchronous error, die... */
1278 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1279 CHAFSR_WDU | CHAFSR_CPU |
1280 CHAFSR_IVU | CHAFSR_UE |
1281 CHAFSR_BERR | CHAFSR_TO))
1286 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1289 panic("Irrecoverable Fast-ECC error trap.\n");
1291 /* Flush E-cache to kick the error trap handlers out. */
1292 cheetah_flush_ecache();
1295 /* Try to fix a correctable error by pushing the line out from
1296 * the E-cache. Recheck error reporting registers to see if the
1297 * problem is intermittent.
1299 static int cheetah_fix_ce(unsigned long physaddr)
1301 unsigned long orig_estate;
1302 unsigned long alias1, alias2;
1305 /* Make sure correctable error traps are disabled. */
1306 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1307 "andn %0, %1, %%g1\n\t"
1308 "stxa %%g1, [%%g0] %2\n\t"
1310 : "=&r" (orig_estate)
1311 : "i" (ESTATE_ERROR_CEEN),
1312 "i" (ASI_ESTATE_ERROR_EN)
1315 /* We calculate alias addresses that will force the
1316 * cache line in question out of the E-cache. Then
1317 * we bring it back in with an atomic instruction so
1318 * that we get it in some modified/exclusive state,
1319 * then we displace it again to try and get proper ECC
1320 * pushed back into the system.
1322 physaddr &= ~(8UL - 1UL);
1323 alias1 = (ecache_flush_physbase +
1324 (physaddr & ((ecache_flush_size >> 1) - 1)));
1325 alias2 = alias1 + (ecache_flush_size >> 1);
1326 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1327 "ldxa [%1] %3, %%g0\n\t"
1328 "casxa [%2] %3, %%g0, %%g0\n\t"
1329 "membar #StoreLoad | #StoreStore\n\t"
1330 "ldxa [%0] %3, %%g0\n\t"
1331 "ldxa [%1] %3, %%g0\n\t"
1334 : "r" (alias1), "r" (alias2),
1335 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1337 /* Did that trigger another error? */
1338 if (cheetah_recheck_errors(NULL)) {
1339 /* Try one more time. */
1340 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1342 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1343 if (cheetah_recheck_errors(NULL))
1348 /* No new error, intermittent problem. */
1352 /* Restore error enables. */
1353 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1355 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1360 /* Return non-zero if PADDR is a valid physical memory address. */
1361 static int cheetah_check_main_memory(unsigned long paddr)
1363 unsigned long vaddr = PAGE_OFFSET + paddr;
1365 if (vaddr > (unsigned long) high_memory)
1368 return kern_addr_valid(vaddr);
1371 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1373 struct cheetah_err_info local_snapshot, *p;
1374 int recoverable, is_memory;
1376 p = cheetah_get_error_log(afsr);
1378 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1380 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1381 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1385 /* Grab snapshot of logged error. */
1386 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1388 /* If the current trap snapshot does not match what the
1389 * trap handler passed along into our args, big trouble.
1390 * In such a case, mark the local copy as invalid.
1392 * Else, it matches and we mark the afsr in the non-local
1393 * copy as invalid so we may log new error traps there.
1395 if (p->afsr != afsr || p->afar != afar)
1396 local_snapshot.afsr = CHAFSR_INVALID;
1398 p->afsr = CHAFSR_INVALID;
1400 is_memory = cheetah_check_main_memory(afar);
1402 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1403 /* XXX Might want to log the results of this operation
1404 * XXX somewhere... -DaveM
1406 cheetah_fix_ce(afar);
1410 int flush_all, flush_line;
1412 flush_all = flush_line = 0;
1413 if ((afsr & CHAFSR_EDC) != 0UL) {
1414 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1418 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1419 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1425 /* Trap handler only disabled I-cache, flush it. */
1426 cheetah_flush_icache();
1428 /* Re-enable I-cache */
1429 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1430 "or %%g1, %1, %%g1\n\t"
1431 "stxa %%g1, [%%g0] %0\n\t"
1434 : "i" (ASI_DCU_CONTROL_REG),
1439 cheetah_flush_ecache();
1440 else if (flush_line)
1441 cheetah_flush_ecache_line(afar);
1444 /* Re-enable error reporting */
1445 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1446 "or %%g1, %1, %%g1\n\t"
1447 "stxa %%g1, [%%g0] %0\n\t"
1450 : "i" (ASI_ESTATE_ERROR_EN),
1451 "i" (ESTATE_ERROR_CEEN)
1454 /* Decide if we can continue after handling this trap and
1455 * logging the error.
1458 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1461 /* Re-check AFSR/AFAR */
1462 (void) cheetah_recheck_errors(&local_snapshot);
1465 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1468 panic("Irrecoverable Correctable-ECC error trap.\n");
1471 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1473 struct cheetah_err_info local_snapshot, *p;
1474 int recoverable, is_memory;
1477 /* Check for the special PCI poke sequence. */
1478 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1479 cheetah_flush_icache();
1480 cheetah_flush_dcache();
1482 /* Re-enable I-cache/D-cache */
1483 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1484 "or %%g1, %1, %%g1\n\t"
1485 "stxa %%g1, [%%g0] %0\n\t"
1488 : "i" (ASI_DCU_CONTROL_REG),
1489 "i" (DCU_DC | DCU_IC)
1492 /* Re-enable error reporting */
1493 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1494 "or %%g1, %1, %%g1\n\t"
1495 "stxa %%g1, [%%g0] %0\n\t"
1498 : "i" (ASI_ESTATE_ERROR_EN),
1499 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1502 (void) cheetah_recheck_errors(NULL);
1504 pci_poke_faulted = 1;
1506 regs->tnpc = regs->tpc + 4;
1511 p = cheetah_get_error_log(afsr);
1513 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1515 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1516 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1520 /* Grab snapshot of logged error. */
1521 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1523 /* If the current trap snapshot does not match what the
1524 * trap handler passed along into our args, big trouble.
1525 * In such a case, mark the local copy as invalid.
1527 * Else, it matches and we mark the afsr in the non-local
1528 * copy as invalid so we may log new error traps there.
1530 if (p->afsr != afsr || p->afar != afar)
1531 local_snapshot.afsr = CHAFSR_INVALID;
1533 p->afsr = CHAFSR_INVALID;
1535 is_memory = cheetah_check_main_memory(afar);
1538 int flush_all, flush_line;
1540 flush_all = flush_line = 0;
1541 if ((afsr & CHAFSR_EDU) != 0UL) {
1542 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1546 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1547 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1553 cheetah_flush_icache();
1554 cheetah_flush_dcache();
1556 /* Re-enable I/D caches */
1557 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1558 "or %%g1, %1, %%g1\n\t"
1559 "stxa %%g1, [%%g0] %0\n\t"
1562 : "i" (ASI_DCU_CONTROL_REG),
1563 "i" (DCU_IC | DCU_DC)
1567 cheetah_flush_ecache();
1568 else if (flush_line)
1569 cheetah_flush_ecache_line(afar);
1572 /* Re-enable error reporting */
1573 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1574 "or %%g1, %1, %%g1\n\t"
1575 "stxa %%g1, [%%g0] %0\n\t"
1578 : "i" (ASI_ESTATE_ERROR_EN),
1579 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1582 /* Decide if we can continue after handling this trap and
1583 * logging the error.
1586 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1589 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1590 * error was logged while we had error reporting traps disabled.
1592 if (cheetah_recheck_errors(&local_snapshot)) {
1593 unsigned long new_afsr = local_snapshot.afsr;
1595 /* If we got a new asynchronous error, die... */
1596 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1597 CHAFSR_WDU | CHAFSR_CPU |
1598 CHAFSR_IVU | CHAFSR_UE |
1599 CHAFSR_BERR | CHAFSR_TO))
1604 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1606 /* "Recoverable" here means we try to yank the page from ever
1607 * being newly used again. This depends upon a few things:
1608 * 1) Must be main memory, and AFAR must be valid.
1609 * 2) If we trapped from user, OK.
1610 * 3) Else, if we trapped from kernel we must find exception
1611 * table entry (ie. we have to have been accessing user
1614 * If AFAR is not in main memory, or we trapped from kernel
1615 * and cannot find an exception table entry, it is unacceptable
1616 * to try and continue.
1618 if (recoverable && is_memory) {
1619 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1620 /* OK, usermode access. */
1623 const struct exception_table_entry *entry;
1625 entry = search_exception_tables(regs->tpc);
1627 /* OK, kernel access to userspace. */
1631 /* BAD, privileged state is corrupted. */
1636 if (pfn_valid(afar >> PAGE_SHIFT))
1637 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1641 /* Only perform fixup if we still have a
1642 * recoverable condition.
1645 regs->tpc = entry->fixup;
1646 regs->tnpc = regs->tpc + 4;
1655 panic("Irrecoverable deferred error trap.\n");
1658 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1660 * Bit0: 0=dcache,1=icache
1661 * Bit1: 0=recoverable,1=unrecoverable
1663 * The hardware has disabled both the I-cache and D-cache in
1664 * the %dcr register.
1666 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1669 __cheetah_flush_icache();
1671 cheetah_plus_zap_dcache_parity();
1672 cheetah_flush_dcache();
1674 /* Re-enable I-cache/D-cache */
1675 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1676 "or %%g1, %1, %%g1\n\t"
1677 "stxa %%g1, [%%g0] %0\n\t"
1680 : "i" (ASI_DCU_CONTROL_REG),
1681 "i" (DCU_DC | DCU_IC)
1685 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1687 (type & 0x1) ? 'I' : 'D',
1689 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1690 panic("Irrecoverable Cheetah+ parity error.");
1693 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1695 (type & 0x1) ? 'I' : 'D',
1697 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1700 struct sun4v_error_entry {
1705 #define SUN4V_ERR_TYPE_UNDEFINED 0
1706 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1707 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1708 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1709 #define SUN4V_ERR_TYPE_WARNING_RES 4
1712 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1713 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1714 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1715 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1716 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1717 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1718 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1719 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1727 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1728 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1730 static const char *sun4v_err_type_to_str(u32 type)
1733 case SUN4V_ERR_TYPE_UNDEFINED:
1735 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1736 return "uncorrected resumable";
1737 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1738 return "precise nonresumable";
1739 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1740 return "deferred nonresumable";
1741 case SUN4V_ERR_TYPE_WARNING_RES:
1742 return "warning resumable";
1748 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1752 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1753 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1755 ent->err_handle, ent->err_stick,
1757 sun4v_err_type_to_str(ent->err_type));
1758 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1761 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1763 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1765 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1767 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1768 "integer-regs" : ""),
1769 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1771 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1773 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1775 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1776 "queue-full" : ""));
1777 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1779 ent->err_raddr, ent->err_size, ent->err_cpu);
1783 if ((cnt = atomic_read(ocnt)) != 0) {
1784 atomic_set(ocnt, 0);
1786 printk("%s: Queue overflowed %d times.\n",
1791 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1792 * Log the event and clear the first word of the entry.
1794 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1796 struct sun4v_error_entry *ent, local_copy;
1797 struct trap_per_cpu *tb;
1798 unsigned long paddr;
1803 tb = &trap_block[cpu];
1804 paddr = tb->resum_kernel_buf_pa + offset;
1807 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1809 /* We have a local copy now, so release the entry. */
1810 ent->err_handle = 0;
1815 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1816 /* If err_type is 0x4, it's a powerdown request. Do
1817 * not do the usual resumable error log because that
1818 * makes it look like some abnormal error.
1820 printk(KERN_INFO "Power down request...\n");
1821 kill_cad_pid(SIGINT, 1);
1825 sun4v_log_error(regs, &local_copy, cpu,
1826 KERN_ERR "RESUMABLE ERROR",
1827 &sun4v_resum_oflow_cnt);
1830 /* If we try to printk() we'll probably make matters worse, by trying
1831 * to retake locks this cpu already holds or causing more errors. So
1832 * just bump a counter, and we'll report these counter bumps above.
1834 void sun4v_resum_overflow(struct pt_regs *regs)
1836 atomic_inc(&sun4v_resum_oflow_cnt);
1839 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1840 * Log the event, clear the first word of the entry, and die.
1842 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1844 struct sun4v_error_entry *ent, local_copy;
1845 struct trap_per_cpu *tb;
1846 unsigned long paddr;
1851 tb = &trap_block[cpu];
1852 paddr = tb->nonresum_kernel_buf_pa + offset;
1855 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1857 /* We have a local copy now, so release the entry. */
1858 ent->err_handle = 0;
1864 /* Check for the special PCI poke sequence. */
1865 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1866 pci_poke_faulted = 1;
1868 regs->tnpc = regs->tpc + 4;
1873 sun4v_log_error(regs, &local_copy, cpu,
1874 KERN_EMERG "NON-RESUMABLE ERROR",
1875 &sun4v_nonresum_oflow_cnt);
1877 panic("Non-resumable error.");
1880 /* If we try to printk() we'll probably make matters worse, by trying
1881 * to retake locks this cpu already holds or causing more errors. So
1882 * just bump a counter, and we'll report these counter bumps above.
1884 void sun4v_nonresum_overflow(struct pt_regs *regs)
1886 /* XXX Actually even this can make not that much sense. Perhaps
1887 * XXX we should just pull the plug and panic directly from here?
1889 atomic_inc(&sun4v_nonresum_oflow_cnt);
1892 unsigned long sun4v_err_itlb_vaddr;
1893 unsigned long sun4v_err_itlb_ctx;
1894 unsigned long sun4v_err_itlb_pte;
1895 unsigned long sun4v_err_itlb_error;
1897 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1900 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1902 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1904 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1905 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1906 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1907 (void *) regs->u_regs[UREG_I7]);
1908 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1909 "pte[%lx] error[%lx]\n",
1910 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1911 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1916 unsigned long sun4v_err_dtlb_vaddr;
1917 unsigned long sun4v_err_dtlb_ctx;
1918 unsigned long sun4v_err_dtlb_pte;
1919 unsigned long sun4v_err_dtlb_error;
1921 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1924 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1926 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1928 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1929 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1930 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1931 (void *) regs->u_regs[UREG_I7]);
1932 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1933 "pte[%lx] error[%lx]\n",
1934 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1935 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1940 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1942 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1946 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1948 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1952 void do_fpe_common(struct pt_regs *regs)
1954 if (regs->tstate & TSTATE_PRIV) {
1955 regs->tpc = regs->tnpc;
1958 unsigned long fsr = current_thread_info()->xfsr[0];
1961 if (test_thread_flag(TIF_32BIT)) {
1962 regs->tpc &= 0xffffffff;
1963 regs->tnpc &= 0xffffffff;
1965 info.si_signo = SIGFPE;
1967 info.si_addr = (void __user *)regs->tpc;
1969 info.si_code = __SI_FAULT;
1970 if ((fsr & 0x1c000) == (1 << 14)) {
1972 info.si_code = FPE_FLTINV;
1973 else if (fsr & 0x08)
1974 info.si_code = FPE_FLTOVF;
1975 else if (fsr & 0x04)
1976 info.si_code = FPE_FLTUND;
1977 else if (fsr & 0x02)
1978 info.si_code = FPE_FLTDIV;
1979 else if (fsr & 0x01)
1980 info.si_code = FPE_FLTRES;
1982 force_sig_info(SIGFPE, &info, current);
1986 void do_fpieee(struct pt_regs *regs)
1988 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1989 0, 0x24, SIGFPE) == NOTIFY_STOP)
1992 do_fpe_common(regs);
1995 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1997 void do_fpother(struct pt_regs *regs)
1999 struct fpustate *f = FPUSTATE;
2002 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2003 0, 0x25, SIGFPE) == NOTIFY_STOP)
2006 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2007 case (2 << 14): /* unfinished_FPop */
2008 case (3 << 14): /* unimplemented_FPop */
2009 ret = do_mathemu(regs, f);
2014 do_fpe_common(regs);
2017 void do_tof(struct pt_regs *regs)
2021 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2022 0, 0x26, SIGEMT) == NOTIFY_STOP)
2025 if (regs->tstate & TSTATE_PRIV)
2026 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2027 if (test_thread_flag(TIF_32BIT)) {
2028 regs->tpc &= 0xffffffff;
2029 regs->tnpc &= 0xffffffff;
2031 info.si_signo = SIGEMT;
2033 info.si_code = EMT_TAGOVF;
2034 info.si_addr = (void __user *)regs->tpc;
2036 force_sig_info(SIGEMT, &info, current);
2039 void do_div0(struct pt_regs *regs)
2043 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2044 0, 0x28, SIGFPE) == NOTIFY_STOP)
2047 if (regs->tstate & TSTATE_PRIV)
2048 die_if_kernel("TL0: Kernel divide by zero.", regs);
2049 if (test_thread_flag(TIF_32BIT)) {
2050 regs->tpc &= 0xffffffff;
2051 regs->tnpc &= 0xffffffff;
2053 info.si_signo = SIGFPE;
2055 info.si_code = FPE_INTDIV;
2056 info.si_addr = (void __user *)regs->tpc;
2058 force_sig_info(SIGFPE, &info, current);
2061 static void instruction_dump(unsigned int *pc)
2065 if ((((unsigned long) pc) & 3))
2068 printk("Instruction DUMP:");
2069 for (i = -3; i < 6; i++)
2070 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2074 static void user_instruction_dump(unsigned int __user *pc)
2077 unsigned int buf[9];
2079 if ((((unsigned long) pc) & 3))
2082 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2085 printk("Instruction DUMP:");
2086 for (i = 0; i < 9; i++)
2087 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2091 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2093 unsigned long fp, thread_base, ksp;
2094 struct thread_info *tp;
2097 ksp = (unsigned long) _ksp;
2100 tp = task_thread_info(tsk);
2103 asm("mov %%fp, %0" : "=r" (ksp));
2107 if (tp == current_thread_info())
2110 fp = ksp + STACK_BIAS;
2111 thread_base = (unsigned long) tp;
2113 printk("Call Trace:\n");
2115 struct sparc_stackf *sf;
2116 struct pt_regs *regs;
2119 if (!kstack_valid(tp, fp))
2121 sf = (struct sparc_stackf *) fp;
2122 regs = (struct pt_regs *) (sf + 1);
2124 if (kstack_is_trap_frame(tp, regs)) {
2125 if (!(regs->tstate & TSTATE_PRIV))
2128 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2130 pc = sf->callers_pc;
2131 fp = (unsigned long)sf->fp + STACK_BIAS;
2134 printk(" [%016lx] %pS\n", pc, (void *) pc);
2135 } while (++count < 16);
2138 void dump_stack(void)
2140 show_stack(current, NULL);
2143 EXPORT_SYMBOL(dump_stack);
2145 static inline int is_kernel_stack(struct task_struct *task,
2146 struct reg_window *rw)
2148 unsigned long rw_addr = (unsigned long) rw;
2149 unsigned long thread_base, thread_end;
2151 if (rw_addr < PAGE_OFFSET) {
2152 if (task != &init_task)
2156 thread_base = (unsigned long) task_stack_page(task);
2157 thread_end = thread_base + sizeof(union thread_union);
2158 if (rw_addr >= thread_base &&
2159 rw_addr < thread_end &&
2166 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2168 unsigned long fp = rw->ins[6];
2173 return (struct reg_window *) (fp + STACK_BIAS);
2176 void die_if_kernel(char *str, struct pt_regs *regs)
2178 static int die_counter;
2181 /* Amuse the user. */
2184 " \"@'/ .. \\`@\"\n"
2188 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2189 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2190 __asm__ __volatile__("flushw");
2192 add_taint(TAINT_DIE);
2193 if (regs->tstate & TSTATE_PRIV) {
2194 struct reg_window *rw = (struct reg_window *)
2195 (regs->u_regs[UREG_FP] + STACK_BIAS);
2197 /* Stop the back trace when we hit userland or we
2198 * find some badly aligned kernel stack.
2202 is_kernel_stack(current, rw)) {
2203 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2204 (void *) rw->ins[7]);
2206 rw = kernel_stack_up(rw);
2208 instruction_dump ((unsigned int *) regs->tpc);
2210 if (test_thread_flag(TIF_32BIT)) {
2211 regs->tpc &= 0xffffffff;
2212 regs->tnpc &= 0xffffffff;
2214 user_instruction_dump ((unsigned int __user *) regs->tpc);
2216 if (regs->tstate & TSTATE_PRIV)
2221 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2222 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2224 extern int handle_popc(u32 insn, struct pt_regs *regs);
2225 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2226 extern int vis_emul(struct pt_regs *, unsigned int);
2228 void do_illegal_instruction(struct pt_regs *regs)
2230 unsigned long pc = regs->tpc;
2231 unsigned long tstate = regs->tstate;
2235 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2236 0, 0x10, SIGILL) == NOTIFY_STOP)
2239 if (tstate & TSTATE_PRIV)
2240 die_if_kernel("Kernel illegal instruction", regs);
2241 if (test_thread_flag(TIF_32BIT))
2243 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2244 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2245 if (handle_popc(insn, regs))
2247 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2248 if (handle_ldf_stq(insn, regs))
2250 } else if (tlb_type == hypervisor) {
2251 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2252 if (!vis_emul(regs, insn))
2255 struct fpustate *f = FPUSTATE;
2257 /* XXX maybe verify XFSR bits like
2258 * XXX do_fpother() does?
2260 if (do_mathemu(regs, f))
2265 info.si_signo = SIGILL;
2267 info.si_code = ILL_ILLOPC;
2268 info.si_addr = (void __user *)pc;
2270 force_sig_info(SIGILL, &info, current);
2273 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2275 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2279 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2280 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2283 if (regs->tstate & TSTATE_PRIV) {
2284 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2287 info.si_signo = SIGBUS;
2289 info.si_code = BUS_ADRALN;
2290 info.si_addr = (void __user *)sfar;
2292 force_sig_info(SIGBUS, &info, current);
2295 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2299 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2300 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2303 if (regs->tstate & TSTATE_PRIV) {
2304 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2307 info.si_signo = SIGBUS;
2309 info.si_code = BUS_ADRALN;
2310 info.si_addr = (void __user *) addr;
2312 force_sig_info(SIGBUS, &info, current);
2315 void do_privop(struct pt_regs *regs)
2319 if (notify_die(DIE_TRAP, "privileged operation", regs,
2320 0, 0x11, SIGILL) == NOTIFY_STOP)
2323 if (test_thread_flag(TIF_32BIT)) {
2324 regs->tpc &= 0xffffffff;
2325 regs->tnpc &= 0xffffffff;
2327 info.si_signo = SIGILL;
2329 info.si_code = ILL_PRVOPC;
2330 info.si_addr = (void __user *)regs->tpc;
2332 force_sig_info(SIGILL, &info, current);
2335 void do_privact(struct pt_regs *regs)
2340 /* Trap level 1 stuff or other traps we should never see... */
2341 void do_cee(struct pt_regs *regs)
2343 die_if_kernel("TL0: Cache Error Exception", regs);
2346 void do_cee_tl1(struct pt_regs *regs)
2348 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2349 die_if_kernel("TL1: Cache Error Exception", regs);
2352 void do_dae_tl1(struct pt_regs *regs)
2354 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2355 die_if_kernel("TL1: Data Access Exception", regs);
2358 void do_iae_tl1(struct pt_regs *regs)
2360 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2361 die_if_kernel("TL1: Instruction Access Exception", regs);
2364 void do_div0_tl1(struct pt_regs *regs)
2366 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2367 die_if_kernel("TL1: DIV0 Exception", regs);
2370 void do_fpdis_tl1(struct pt_regs *regs)
2372 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2373 die_if_kernel("TL1: FPU Disabled", regs);
2376 void do_fpieee_tl1(struct pt_regs *regs)
2378 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2379 die_if_kernel("TL1: FPU IEEE Exception", regs);
2382 void do_fpother_tl1(struct pt_regs *regs)
2384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2385 die_if_kernel("TL1: FPU Other Exception", regs);
2388 void do_ill_tl1(struct pt_regs *regs)
2390 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2391 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2394 void do_irq_tl1(struct pt_regs *regs)
2396 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2397 die_if_kernel("TL1: IRQ Exception", regs);
2400 void do_lddfmna_tl1(struct pt_regs *regs)
2402 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2403 die_if_kernel("TL1: LDDF Exception", regs);
2406 void do_stdfmna_tl1(struct pt_regs *regs)
2408 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2409 die_if_kernel("TL1: STDF Exception", regs);
2412 void do_paw(struct pt_regs *regs)
2414 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2417 void do_paw_tl1(struct pt_regs *regs)
2419 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2420 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2423 void do_vaw(struct pt_regs *regs)
2425 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2428 void do_vaw_tl1(struct pt_regs *regs)
2430 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2431 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2434 void do_tof_tl1(struct pt_regs *regs)
2436 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2437 die_if_kernel("TL1: Tag Overflow Exception", regs);
2440 void do_getpsr(struct pt_regs *regs)
2442 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2443 regs->tpc = regs->tnpc;
2445 if (test_thread_flag(TIF_32BIT)) {
2446 regs->tpc &= 0xffffffff;
2447 regs->tnpc &= 0xffffffff;
2451 struct trap_per_cpu trap_block[NR_CPUS];
2453 /* This can get invoked before sched_init() so play it super safe
2454 * and use hard_smp_processor_id().
2456 void init_cur_cpu_trap(struct thread_info *t)
2458 int cpu = hard_smp_processor_id();
2459 struct trap_per_cpu *p = &trap_block[cpu];
2465 extern void thread_info_offsets_are_bolixed_dave(void);
2466 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2467 extern void tsb_config_offsets_are_bolixed_dave(void);
2469 /* Only invoked on boot processor. */
2470 void __init trap_init(void)
2472 /* Compile time sanity check. */
2473 if (TI_TASK != offsetof(struct thread_info, task) ||
2474 TI_FLAGS != offsetof(struct thread_info, flags) ||
2475 TI_CPU != offsetof(struct thread_info, cpu) ||
2476 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2477 TI_KSP != offsetof(struct thread_info, ksp) ||
2478 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2479 TI_KREGS != offsetof(struct thread_info, kregs) ||
2480 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2481 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2482 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2483 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2484 TI_GSR != offsetof(struct thread_info, gsr) ||
2485 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2486 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2487 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2488 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2489 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2490 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2491 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2492 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2493 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2494 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2495 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2496 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2497 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2498 (TI_FPREGS & (64 - 1)))
2499 thread_info_offsets_are_bolixed_dave();
2501 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2502 (TRAP_PER_CPU_PGD_PADDR !=
2503 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2504 (TRAP_PER_CPU_CPU_MONDO_PA !=
2505 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2506 (TRAP_PER_CPU_DEV_MONDO_PA !=
2507 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2508 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2509 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2510 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2511 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2512 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2513 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2514 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2515 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2516 (TRAP_PER_CPU_FAULT_INFO !=
2517 offsetof(struct trap_per_cpu, fault_info)) ||
2518 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2519 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2520 (TRAP_PER_CPU_CPU_LIST_PA !=
2521 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2522 (TRAP_PER_CPU_TSB_HUGE !=
2523 offsetof(struct trap_per_cpu, tsb_huge)) ||
2524 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2525 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2526 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2527 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2528 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2529 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2530 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2531 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2532 (TRAP_PER_CPU_RESUM_QMASK !=
2533 offsetof(struct trap_per_cpu, resum_qmask)) ||
2534 (TRAP_PER_CPU_NONRESUM_QMASK !=
2535 offsetof(struct trap_per_cpu, nonresum_qmask)))
2536 trap_per_cpu_offsets_are_bolixed_dave();
2538 if ((TSB_CONFIG_TSB !=
2539 offsetof(struct tsb_config, tsb)) ||
2540 (TSB_CONFIG_RSS_LIMIT !=
2541 offsetof(struct tsb_config, tsb_rss_limit)) ||
2542 (TSB_CONFIG_NENTRIES !=
2543 offsetof(struct tsb_config, tsb_nentries)) ||
2544 (TSB_CONFIG_REG_VAL !=
2545 offsetof(struct tsb_config, tsb_reg_val)) ||
2546 (TSB_CONFIG_MAP_VADDR !=
2547 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2548 (TSB_CONFIG_MAP_PTE !=
2549 offsetof(struct tsb_config, tsb_map_pte)))
2550 tsb_config_offsets_are_bolixed_dave();
2552 /* Attach to the address space of init_task. On SMP we
2553 * do this in smp.c:smp_callin for other cpus.
2555 atomic_inc(&init_mm.mm_count);
2556 current->active_mm = &init_mm;