1 /* arch/sparc64/kernel/traps.c
3 * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
8 * I like traps on v9, :))))
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/smp.h>
17 #include <linux/init.h>
18 #include <linux/kdebug.h>
21 #include <asm/delay.h>
22 #include <asm/system.h>
23 #include <asm/ptrace.h>
24 #include <asm/oplib.h>
26 #include <asm/pgtable.h>
27 #include <asm/unistd.h>
28 #include <asm/uaccess.h>
29 #include <asm/fpumacro.h>
32 #include <asm/estate.h>
33 #include <asm/chafsr.h>
34 #include <asm/sfafsr.h>
35 #include <asm/psrcompat.h>
36 #include <asm/processor.h>
37 #include <asm/timer.h>
40 #include <linux/kmod.h>
46 /* When an irrecoverable trap occurs at tl > 0, the trap entry
47 * code logs the trap state registers at every level in the trap
48 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
61 static void dump_tl1_traplog(struct tl1_traplog *p)
65 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
66 "dumping track stack.\n", p->tl);
68 limit = (tlb_type == hypervisor) ? 2 : 4;
69 for (i = 0; i < limit; i++) {
71 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
72 "TNPC[%016lx] TT[%lx]\n",
74 p->trapstack[i].tstate, p->trapstack[i].tpc,
75 p->trapstack[i].tnpc, p->trapstack[i].tt);
76 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
80 void bad_trap(struct pt_regs *regs, long lvl)
85 if (notify_die(DIE_TRAP, "bad trap", regs,
86 0, lvl, SIGTRAP) == NOTIFY_STOP)
90 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
91 die_if_kernel(buffer, regs);
95 if (regs->tstate & TSTATE_PRIV) {
96 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
97 die_if_kernel(buffer, regs);
99 if (test_thread_flag(TIF_32BIT)) {
100 regs->tpc &= 0xffffffff;
101 regs->tnpc &= 0xffffffff;
103 info.si_signo = SIGILL;
105 info.si_code = ILL_ILLTRP;
106 info.si_addr = (void __user *)regs->tpc;
107 info.si_trapno = lvl;
108 force_sig_info(SIGILL, &info, current);
111 void bad_trap_tl1(struct pt_regs *regs, long lvl)
115 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
116 0, lvl, SIGTRAP) == NOTIFY_STOP)
119 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
121 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
122 die_if_kernel (buffer, regs);
125 #ifdef CONFIG_DEBUG_BUGVERBOSE
126 void do_BUG(const char *file, int line)
129 printk("kernel BUG at %s:%d!\n", file, line);
133 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
137 if (notify_die(DIE_TRAP, "instruction access exception", regs,
138 0, 0x8, SIGTRAP) == NOTIFY_STOP)
141 if (regs->tstate & TSTATE_PRIV) {
142 printk("spitfire_insn_access_exception: SFSR[%016lx] "
143 "SFAR[%016lx], going.\n", sfsr, sfar);
144 die_if_kernel("Iax", regs);
146 if (test_thread_flag(TIF_32BIT)) {
147 regs->tpc &= 0xffffffff;
148 regs->tnpc &= 0xffffffff;
150 info.si_signo = SIGSEGV;
152 info.si_code = SEGV_MAPERR;
153 info.si_addr = (void __user *)regs->tpc;
155 force_sig_info(SIGSEGV, &info, current);
158 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
160 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
161 0, 0x8, SIGTRAP) == NOTIFY_STOP)
164 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
165 spitfire_insn_access_exception(regs, sfsr, sfar);
168 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
170 unsigned short type = (type_ctx >> 16);
171 unsigned short ctx = (type_ctx & 0xffff);
174 if (notify_die(DIE_TRAP, "instruction access exception", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
178 if (regs->tstate & TSTATE_PRIV) {
179 printk("sun4v_insn_access_exception: ADDR[%016lx] "
180 "CTX[%04x] TYPE[%04x], going.\n",
182 die_if_kernel("Iax", regs);
185 if (test_thread_flag(TIF_32BIT)) {
186 regs->tpc &= 0xffffffff;
187 regs->tnpc &= 0xffffffff;
189 info.si_signo = SIGSEGV;
191 info.si_code = SEGV_MAPERR;
192 info.si_addr = (void __user *) addr;
194 force_sig_info(SIGSEGV, &info, current);
197 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
199 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
200 0, 0x8, SIGTRAP) == NOTIFY_STOP)
203 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
204 sun4v_insn_access_exception(regs, addr, type_ctx);
207 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
211 if (notify_die(DIE_TRAP, "data access exception", regs,
212 0, 0x30, SIGTRAP) == NOTIFY_STOP)
215 if (regs->tstate & TSTATE_PRIV) {
216 /* Test if this comes from uaccess places. */
217 const struct exception_table_entry *entry;
219 entry = search_exception_tables(regs->tpc);
221 /* Ouch, somebody is trying VM hole tricks on us... */
222 #ifdef DEBUG_EXCEPTIONS
223 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
224 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
225 regs->tpc, entry->fixup);
227 regs->tpc = entry->fixup;
228 regs->tnpc = regs->tpc + 4;
232 printk("spitfire_data_access_exception: SFSR[%016lx] "
233 "SFAR[%016lx], going.\n", sfsr, sfar);
234 die_if_kernel("Dax", regs);
237 info.si_signo = SIGSEGV;
239 info.si_code = SEGV_MAPERR;
240 info.si_addr = (void __user *)sfar;
242 force_sig_info(SIGSEGV, &info, current);
245 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
247 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
248 0, 0x30, SIGTRAP) == NOTIFY_STOP)
251 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
252 spitfire_data_access_exception(regs, sfsr, sfar);
255 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
257 unsigned short type = (type_ctx >> 16);
258 unsigned short ctx = (type_ctx & 0xffff);
261 if (notify_die(DIE_TRAP, "data access exception", regs,
262 0, 0x8, SIGTRAP) == NOTIFY_STOP)
265 if (regs->tstate & TSTATE_PRIV) {
266 printk("sun4v_data_access_exception: ADDR[%016lx] "
267 "CTX[%04x] TYPE[%04x], going.\n",
269 die_if_kernel("Dax", regs);
272 if (test_thread_flag(TIF_32BIT)) {
273 regs->tpc &= 0xffffffff;
274 regs->tnpc &= 0xffffffff;
276 info.si_signo = SIGSEGV;
278 info.si_code = SEGV_MAPERR;
279 info.si_addr = (void __user *) addr;
281 force_sig_info(SIGSEGV, &info, current);
284 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
286 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
287 0, 0x8, SIGTRAP) == NOTIFY_STOP)
290 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
291 sun4v_data_access_exception(regs, addr, type_ctx);
295 /* This is really pathetic... */
296 extern volatile int pci_poke_in_progress;
297 extern volatile int pci_poke_cpu;
298 extern volatile int pci_poke_faulted;
301 /* When access exceptions happen, we must do this. */
302 static void spitfire_clean_and_reenable_l1_caches(void)
306 if (tlb_type != spitfire)
310 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
311 spitfire_put_icache_tag(va, 0x0);
312 spitfire_put_dcache_tag(va, 0x0);
315 /* Re-enable in LSU. */
316 __asm__ __volatile__("flush %%g6\n\t"
318 "stxa %0, [%%g0] %1\n\t"
321 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
322 LSU_CONTROL_IM | LSU_CONTROL_DM),
323 "i" (ASI_LSU_CONTROL)
327 static void spitfire_enable_estate_errors(void)
329 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
332 : "r" (ESTATE_ERR_ALL),
333 "i" (ASI_ESTATE_ERROR_EN));
336 static char ecc_syndrome_table[] = {
337 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
338 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
339 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
340 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
341 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
342 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
343 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
344 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
345 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
346 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
347 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
348 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
349 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
350 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
351 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
352 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
353 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
354 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
355 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
356 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
357 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
358 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
359 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
360 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
361 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
362 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
363 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
364 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
365 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
366 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
367 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
368 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
371 static char *syndrome_unknown = "<Unknown>";
373 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
375 unsigned short scode;
376 char memmod_str[64], *p;
379 scode = ecc_syndrome_table[udbl & 0xff];
380 if (prom_getunumber(scode, afar,
381 memmod_str, sizeof(memmod_str)) == -1)
382 p = syndrome_unknown;
385 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
386 "Memory Module \"%s\"\n",
387 smp_processor_id(), scode, p);
391 scode = ecc_syndrome_table[udbh & 0xff];
392 if (prom_getunumber(scode, afar,
393 memmod_str, sizeof(memmod_str)) == -1)
394 p = syndrome_unknown;
397 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
398 "Memory Module \"%s\"\n",
399 smp_processor_id(), scode, p);
404 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
407 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
408 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
409 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
411 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
413 /* We always log it, even if someone is listening for this
416 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
417 0, TRAP_TYPE_CEE, SIGTRAP);
419 /* The Correctable ECC Error trap does not disable I/D caches. So
420 * we only have to restore the ESTATE Error Enable register.
422 spitfire_enable_estate_errors();
425 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
429 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
430 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
431 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
433 /* XXX add more human friendly logging of the error status
434 * XXX as is implemented for cheetah
437 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
439 /* We always log it, even if someone is listening for this
442 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
445 if (regs->tstate & TSTATE_PRIV) {
447 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
448 die_if_kernel("UE", regs);
451 /* XXX need more intelligent processing here, such as is implemented
452 * XXX for cheetah errors, in fact if the E-cache still holds the
453 * XXX line with bad parity this will loop
456 spitfire_clean_and_reenable_l1_caches();
457 spitfire_enable_estate_errors();
459 if (test_thread_flag(TIF_32BIT)) {
460 regs->tpc &= 0xffffffff;
461 regs->tnpc &= 0xffffffff;
463 info.si_signo = SIGBUS;
465 info.si_code = BUS_OBJERR;
466 info.si_addr = (void *)0;
468 force_sig_info(SIGBUS, &info, current);
471 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
473 unsigned long afsr, tt, udbh, udbl;
476 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
477 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
478 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
479 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
480 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
483 if (tt == TRAP_TYPE_DAE &&
484 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
485 spitfire_clean_and_reenable_l1_caches();
486 spitfire_enable_estate_errors();
488 pci_poke_faulted = 1;
489 regs->tnpc = regs->tpc + 4;
494 if (afsr & SFAFSR_UE)
495 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
497 if (tt == TRAP_TYPE_CEE) {
498 /* Handle the case where we took a CEE trap, but ACK'd
499 * only the UE state in the UDB error registers.
501 if (afsr & SFAFSR_UE) {
502 if (udbh & UDBE_CE) {
503 __asm__ __volatile__(
504 "stxa %0, [%1] %2\n\t"
507 : "r" (udbh & UDBE_CE),
508 "r" (0x0), "i" (ASI_UDB_ERROR_W));
510 if (udbl & UDBE_CE) {
511 __asm__ __volatile__(
512 "stxa %0, [%1] %2\n\t"
515 : "r" (udbl & UDBE_CE),
516 "r" (0x18), "i" (ASI_UDB_ERROR_W));
520 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
524 int cheetah_pcache_forced_on;
526 void cheetah_enable_pcache(void)
530 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
533 __asm__ __volatile__("ldxa [%%g0] %1, %0"
535 : "i" (ASI_DCU_CONTROL_REG));
536 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
537 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
540 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
543 /* Cheetah error trap handling. */
544 static unsigned long ecache_flush_physbase;
545 static unsigned long ecache_flush_linesize;
546 static unsigned long ecache_flush_size;
548 /* This table is ordered in priority of errors and matches the
549 * AFAR overwrite policy as well.
552 struct afsr_error_table {
557 static const char CHAFSR_PERR_msg[] =
558 "System interface protocol error";
559 static const char CHAFSR_IERR_msg[] =
560 "Internal processor error";
561 static const char CHAFSR_ISAP_msg[] =
562 "System request parity error on incoming addresss";
563 static const char CHAFSR_UCU_msg[] =
564 "Uncorrectable E-cache ECC error for ifetch/data";
565 static const char CHAFSR_UCC_msg[] =
566 "SW Correctable E-cache ECC error for ifetch/data";
567 static const char CHAFSR_UE_msg[] =
568 "Uncorrectable system bus data ECC error for read";
569 static const char CHAFSR_EDU_msg[] =
570 "Uncorrectable E-cache ECC error for stmerge/blkld";
571 static const char CHAFSR_EMU_msg[] =
572 "Uncorrectable system bus MTAG error";
573 static const char CHAFSR_WDU_msg[] =
574 "Uncorrectable E-cache ECC error for writeback";
575 static const char CHAFSR_CPU_msg[] =
576 "Uncorrectable ECC error for copyout";
577 static const char CHAFSR_CE_msg[] =
578 "HW corrected system bus data ECC error for read";
579 static const char CHAFSR_EDC_msg[] =
580 "HW corrected E-cache ECC error for stmerge/blkld";
581 static const char CHAFSR_EMC_msg[] =
582 "HW corrected system bus MTAG ECC error";
583 static const char CHAFSR_WDC_msg[] =
584 "HW corrected E-cache ECC error for writeback";
585 static const char CHAFSR_CPC_msg[] =
586 "HW corrected ECC error for copyout";
587 static const char CHAFSR_TO_msg[] =
588 "Unmapped error from system bus";
589 static const char CHAFSR_BERR_msg[] =
590 "Bus error response from system bus";
591 static const char CHAFSR_IVC_msg[] =
592 "HW corrected system bus data ECC error for ivec read";
593 static const char CHAFSR_IVU_msg[] =
594 "Uncorrectable system bus data ECC error for ivec read";
595 static struct afsr_error_table __cheetah_error_table[] = {
596 { CHAFSR_PERR, CHAFSR_PERR_msg },
597 { CHAFSR_IERR, CHAFSR_IERR_msg },
598 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
599 { CHAFSR_UCU, CHAFSR_UCU_msg },
600 { CHAFSR_UCC, CHAFSR_UCC_msg },
601 { CHAFSR_UE, CHAFSR_UE_msg },
602 { CHAFSR_EDU, CHAFSR_EDU_msg },
603 { CHAFSR_EMU, CHAFSR_EMU_msg },
604 { CHAFSR_WDU, CHAFSR_WDU_msg },
605 { CHAFSR_CPU, CHAFSR_CPU_msg },
606 { CHAFSR_CE, CHAFSR_CE_msg },
607 { CHAFSR_EDC, CHAFSR_EDC_msg },
608 { CHAFSR_EMC, CHAFSR_EMC_msg },
609 { CHAFSR_WDC, CHAFSR_WDC_msg },
610 { CHAFSR_CPC, CHAFSR_CPC_msg },
611 { CHAFSR_TO, CHAFSR_TO_msg },
612 { CHAFSR_BERR, CHAFSR_BERR_msg },
613 /* These two do not update the AFAR. */
614 { CHAFSR_IVC, CHAFSR_IVC_msg },
615 { CHAFSR_IVU, CHAFSR_IVU_msg },
618 static const char CHPAFSR_DTO_msg[] =
619 "System bus unmapped error for prefetch/storequeue-read";
620 static const char CHPAFSR_DBERR_msg[] =
621 "System bus error for prefetch/storequeue-read";
622 static const char CHPAFSR_THCE_msg[] =
623 "Hardware corrected E-cache Tag ECC error";
624 static const char CHPAFSR_TSCE_msg[] =
625 "SW handled correctable E-cache Tag ECC error";
626 static const char CHPAFSR_TUE_msg[] =
627 "Uncorrectable E-cache Tag ECC error";
628 static const char CHPAFSR_DUE_msg[] =
629 "System bus uncorrectable data ECC error due to prefetch/store-fill";
630 static struct afsr_error_table __cheetah_plus_error_table[] = {
631 { CHAFSR_PERR, CHAFSR_PERR_msg },
632 { CHAFSR_IERR, CHAFSR_IERR_msg },
633 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
634 { CHAFSR_UCU, CHAFSR_UCU_msg },
635 { CHAFSR_UCC, CHAFSR_UCC_msg },
636 { CHAFSR_UE, CHAFSR_UE_msg },
637 { CHAFSR_EDU, CHAFSR_EDU_msg },
638 { CHAFSR_EMU, CHAFSR_EMU_msg },
639 { CHAFSR_WDU, CHAFSR_WDU_msg },
640 { CHAFSR_CPU, CHAFSR_CPU_msg },
641 { CHAFSR_CE, CHAFSR_CE_msg },
642 { CHAFSR_EDC, CHAFSR_EDC_msg },
643 { CHAFSR_EMC, CHAFSR_EMC_msg },
644 { CHAFSR_WDC, CHAFSR_WDC_msg },
645 { CHAFSR_CPC, CHAFSR_CPC_msg },
646 { CHAFSR_TO, CHAFSR_TO_msg },
647 { CHAFSR_BERR, CHAFSR_BERR_msg },
648 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
649 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
650 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
651 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
652 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
653 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
654 /* These two do not update the AFAR. */
655 { CHAFSR_IVC, CHAFSR_IVC_msg },
656 { CHAFSR_IVU, CHAFSR_IVU_msg },
659 static const char JPAFSR_JETO_msg[] =
660 "System interface protocol error, hw timeout caused";
661 static const char JPAFSR_SCE_msg[] =
662 "Parity error on system snoop results";
663 static const char JPAFSR_JEIC_msg[] =
664 "System interface protocol error, illegal command detected";
665 static const char JPAFSR_JEIT_msg[] =
666 "System interface protocol error, illegal ADTYPE detected";
667 static const char JPAFSR_OM_msg[] =
668 "Out of range memory error has occurred";
669 static const char JPAFSR_ETP_msg[] =
670 "Parity error on L2 cache tag SRAM";
671 static const char JPAFSR_UMS_msg[] =
672 "Error due to unsupported store";
673 static const char JPAFSR_RUE_msg[] =
674 "Uncorrectable ECC error from remote cache/memory";
675 static const char JPAFSR_RCE_msg[] =
676 "Correctable ECC error from remote cache/memory";
677 static const char JPAFSR_BP_msg[] =
678 "JBUS parity error on returned read data";
679 static const char JPAFSR_WBP_msg[] =
680 "JBUS parity error on data for writeback or block store";
681 static const char JPAFSR_FRC_msg[] =
682 "Foreign read to DRAM incurring correctable ECC error";
683 static const char JPAFSR_FRU_msg[] =
684 "Foreign read to DRAM incurring uncorrectable ECC error";
685 static struct afsr_error_table __jalapeno_error_table[] = {
686 { JPAFSR_JETO, JPAFSR_JETO_msg },
687 { JPAFSR_SCE, JPAFSR_SCE_msg },
688 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
689 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
690 { CHAFSR_PERR, CHAFSR_PERR_msg },
691 { CHAFSR_IERR, CHAFSR_IERR_msg },
692 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
693 { CHAFSR_UCU, CHAFSR_UCU_msg },
694 { CHAFSR_UCC, CHAFSR_UCC_msg },
695 { CHAFSR_UE, CHAFSR_UE_msg },
696 { CHAFSR_EDU, CHAFSR_EDU_msg },
697 { JPAFSR_OM, JPAFSR_OM_msg },
698 { CHAFSR_WDU, CHAFSR_WDU_msg },
699 { CHAFSR_CPU, CHAFSR_CPU_msg },
700 { CHAFSR_CE, CHAFSR_CE_msg },
701 { CHAFSR_EDC, CHAFSR_EDC_msg },
702 { JPAFSR_ETP, JPAFSR_ETP_msg },
703 { CHAFSR_WDC, CHAFSR_WDC_msg },
704 { CHAFSR_CPC, CHAFSR_CPC_msg },
705 { CHAFSR_TO, CHAFSR_TO_msg },
706 { CHAFSR_BERR, CHAFSR_BERR_msg },
707 { JPAFSR_UMS, JPAFSR_UMS_msg },
708 { JPAFSR_RUE, JPAFSR_RUE_msg },
709 { JPAFSR_RCE, JPAFSR_RCE_msg },
710 { JPAFSR_BP, JPAFSR_BP_msg },
711 { JPAFSR_WBP, JPAFSR_WBP_msg },
712 { JPAFSR_FRC, JPAFSR_FRC_msg },
713 { JPAFSR_FRU, JPAFSR_FRU_msg },
714 /* These two do not update the AFAR. */
715 { CHAFSR_IVU, CHAFSR_IVU_msg },
718 static struct afsr_error_table *cheetah_error_table;
719 static unsigned long cheetah_afsr_errors;
721 struct cheetah_err_info *cheetah_error_log;
723 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
725 struct cheetah_err_info *p;
726 int cpu = smp_processor_id();
728 if (!cheetah_error_log)
731 p = cheetah_error_log + (cpu * 2);
732 if ((afsr & CHAFSR_TL1) != 0UL)
738 extern unsigned int tl0_icpe[], tl1_icpe[];
739 extern unsigned int tl0_dcpe[], tl1_dcpe[];
740 extern unsigned int tl0_fecc[], tl1_fecc[];
741 extern unsigned int tl0_cee[], tl1_cee[];
742 extern unsigned int tl0_iae[], tl1_iae[];
743 extern unsigned int tl0_dae[], tl1_dae[];
744 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
745 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
746 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
747 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
748 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
750 void __init cheetah_ecache_flush_init(void)
752 unsigned long largest_size, smallest_linesize, order, ver;
755 /* Scan all cpu device tree nodes, note two values:
756 * 1) largest E-cache size
757 * 2) smallest E-cache line size
760 smallest_linesize = ~0UL;
762 for (i = 0; i < NR_CPUS; i++) {
765 val = cpu_data(i).ecache_size;
769 if (val > largest_size)
772 val = cpu_data(i).ecache_line_size;
773 if (val < smallest_linesize)
774 smallest_linesize = val;
778 if (largest_size == 0UL || smallest_linesize == ~0UL) {
779 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
784 ecache_flush_size = (2 * largest_size);
785 ecache_flush_linesize = smallest_linesize;
787 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
789 if (ecache_flush_physbase == ~0UL) {
790 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
791 "contiguous physical memory.\n",
796 /* Now allocate error trap reporting scoreboard. */
797 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
798 for (order = 0; order < MAX_ORDER; order++) {
799 if ((PAGE_SIZE << order) >= sz)
802 cheetah_error_log = (struct cheetah_err_info *)
803 __get_free_pages(GFP_KERNEL, order);
804 if (!cheetah_error_log) {
805 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
806 "error logging scoreboard (%d bytes).\n", sz);
809 memset(cheetah_error_log, 0, PAGE_SIZE << order);
811 /* Mark all AFSRs as invalid so that the trap handler will
812 * log new new information there.
814 for (i = 0; i < 2 * NR_CPUS; i++)
815 cheetah_error_log[i].afsr = CHAFSR_INVALID;
817 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
818 if ((ver >> 32) == __JALAPENO_ID ||
819 (ver >> 32) == __SERRANO_ID) {
820 cheetah_error_table = &__jalapeno_error_table[0];
821 cheetah_afsr_errors = JPAFSR_ERRORS;
822 } else if ((ver >> 32) == 0x003e0015) {
823 cheetah_error_table = &__cheetah_plus_error_table[0];
824 cheetah_afsr_errors = CHPAFSR_ERRORS;
826 cheetah_error_table = &__cheetah_error_table[0];
827 cheetah_afsr_errors = CHAFSR_ERRORS;
830 /* Now patch trap tables. */
831 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
832 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
833 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
834 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
835 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
836 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
837 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
838 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
839 if (tlb_type == cheetah_plus) {
840 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
841 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
842 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
843 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
848 static void cheetah_flush_ecache(void)
850 unsigned long flush_base = ecache_flush_physbase;
851 unsigned long flush_linesize = ecache_flush_linesize;
852 unsigned long flush_size = ecache_flush_size;
854 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
855 " bne,pt %%xcc, 1b\n\t"
856 " ldxa [%2 + %0] %3, %%g0\n\t"
858 : "0" (flush_size), "r" (flush_base),
859 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
862 static void cheetah_flush_ecache_line(unsigned long physaddr)
866 physaddr &= ~(8UL - 1UL);
867 physaddr = (ecache_flush_physbase +
868 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
869 alias = physaddr + (ecache_flush_size >> 1UL);
870 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
871 "ldxa [%1] %2, %%g0\n\t"
874 : "r" (physaddr), "r" (alias),
875 "i" (ASI_PHYS_USE_EC));
878 /* Unfortunately, the diagnostic access to the I-cache tags we need to
879 * use to clear the thing interferes with I-cache coherency transactions.
881 * So we must only flush the I-cache when it is disabled.
883 static void __cheetah_flush_icache(void)
885 unsigned int icache_size, icache_line_size;
888 icache_size = local_cpu_data().icache_size;
889 icache_line_size = local_cpu_data().icache_line_size;
891 /* Clear the valid bits in all the tags. */
892 for (addr = 0; addr < icache_size; addr += icache_line_size) {
893 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
896 : "r" (addr | (2 << 3)),
901 static void cheetah_flush_icache(void)
903 unsigned long dcu_save;
905 /* Save current DCU, disable I-cache. */
906 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
907 "or %0, %2, %%g1\n\t"
908 "stxa %%g1, [%%g0] %1\n\t"
911 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
914 __cheetah_flush_icache();
916 /* Restore DCU register */
917 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
920 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
923 static void cheetah_flush_dcache(void)
925 unsigned int dcache_size, dcache_line_size;
928 dcache_size = local_cpu_data().dcache_size;
929 dcache_line_size = local_cpu_data().dcache_line_size;
931 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
932 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
935 : "r" (addr), "i" (ASI_DCACHE_TAG));
939 /* In order to make the even parity correct we must do two things.
940 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
941 * Next, we clear out all 32-bytes of data for that line. Data of
942 * all-zero + tag parity value of zero == correct parity.
944 static void cheetah_plus_zap_dcache_parity(void)
946 unsigned int dcache_size, dcache_line_size;
949 dcache_size = local_cpu_data().dcache_size;
950 dcache_line_size = local_cpu_data().dcache_line_size;
952 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
953 unsigned long tag = (addr >> 14);
956 __asm__ __volatile__("membar #Sync\n\t"
957 "stxa %0, [%1] %2\n\t"
960 : "r" (tag), "r" (addr),
961 "i" (ASI_DCACHE_UTAG));
962 for (line = addr; line < addr + dcache_line_size; line += 8)
963 __asm__ __volatile__("membar #Sync\n\t"
964 "stxa %%g0, [%0] %1\n\t"
968 "i" (ASI_DCACHE_DATA));
972 /* Conversion tables used to frob Cheetah AFSR syndrome values into
973 * something palatable to the memory controller driver get_unumber
997 static unsigned char cheetah_ecc_syntab[] = {
998 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
999 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1000 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1001 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1002 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1003 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1004 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1005 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1006 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1007 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1008 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1009 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1010 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1011 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1012 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1013 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1014 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1015 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1016 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1017 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1018 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1019 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1020 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1021 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1022 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1023 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1024 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1025 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1026 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1027 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1028 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1029 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1031 static unsigned char cheetah_mtag_syntab[] = {
1042 /* Return the highest priority error conditon mentioned. */
1043 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1045 unsigned long tmp = 0;
1048 for (i = 0; cheetah_error_table[i].mask; i++) {
1049 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1055 static const char *cheetah_get_string(unsigned long bit)
1059 for (i = 0; cheetah_error_table[i].mask; i++) {
1060 if ((bit & cheetah_error_table[i].mask) != 0UL)
1061 return cheetah_error_table[i].name;
1066 extern int chmc_getunumber(int, unsigned long, char *, int);
1068 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1069 unsigned long afsr, unsigned long afar, int recoverable)
1071 unsigned long hipri;
1074 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1075 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 (afsr & CHAFSR_TL1) ? 1 : 0);
1078 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1079 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1080 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1081 printk("%s" "ERROR(%d): ",
1082 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1083 printk("TPC<%pS>\n", (void *) regs->tpc);
1084 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1085 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1086 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1087 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1088 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1089 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1090 hipri = cheetah_get_hipri(afsr);
1091 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1092 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1093 hipri, cheetah_get_string(hipri));
1095 /* Try to get unumber if relevant. */
1096 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1097 CHAFSR_CPC | CHAFSR_CPU | \
1098 CHAFSR_UE | CHAFSR_CE | \
1099 CHAFSR_EDC | CHAFSR_EDU | \
1100 CHAFSR_UCC | CHAFSR_UCU | \
1101 CHAFSR_WDU | CHAFSR_WDC)
1102 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1103 if (afsr & ESYND_ERRORS) {
1107 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1108 syndrome = cheetah_ecc_syntab[syndrome];
1109 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1111 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1112 (recoverable ? KERN_WARNING : KERN_CRIT),
1113 smp_processor_id(), unum);
1114 } else if (afsr & MSYND_ERRORS) {
1118 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1119 syndrome = cheetah_mtag_syntab[syndrome];
1120 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1122 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1123 (recoverable ? KERN_WARNING : KERN_CRIT),
1124 smp_processor_id(), unum);
1127 /* Now dump the cache snapshots. */
1128 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1129 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1130 (int) info->dcache_index,
1134 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1136 info->dcache_data[0],
1137 info->dcache_data[1],
1138 info->dcache_data[2],
1139 info->dcache_data[3]);
1140 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1141 "u[%016lx] l[%016lx]\n",
1142 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1143 (int) info->icache_index,
1148 info->icache_lower);
1149 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1150 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1151 info->icache_data[0],
1152 info->icache_data[1],
1153 info->icache_data[2],
1154 info->icache_data[3]);
1155 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1156 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1157 info->icache_data[4],
1158 info->icache_data[5],
1159 info->icache_data[6],
1160 info->icache_data[7]);
1161 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1162 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1163 (int) info->ecache_index, info->ecache_tag);
1164 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1165 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1166 info->ecache_data[0],
1167 info->ecache_data[1],
1168 info->ecache_data[2],
1169 info->ecache_data[3]);
1171 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1172 while (afsr != 0UL) {
1173 unsigned long bit = cheetah_get_hipri(afsr);
1175 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1176 (recoverable ? KERN_WARNING : KERN_CRIT),
1177 bit, cheetah_get_string(bit));
1183 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1186 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1188 unsigned long afsr, afar;
1191 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1194 if ((afsr & cheetah_afsr_errors) != 0) {
1196 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1204 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1206 : : "r" (afsr), "i" (ASI_AFSR));
1211 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1213 struct cheetah_err_info local_snapshot, *p;
1217 cheetah_flush_ecache();
1219 p = cheetah_get_error_log(afsr);
1221 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1223 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1224 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1228 /* Grab snapshot of logged error. */
1229 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1231 /* If the current trap snapshot does not match what the
1232 * trap handler passed along into our args, big trouble.
1233 * In such a case, mark the local copy as invalid.
1235 * Else, it matches and we mark the afsr in the non-local
1236 * copy as invalid so we may log new error traps there.
1238 if (p->afsr != afsr || p->afar != afar)
1239 local_snapshot.afsr = CHAFSR_INVALID;
1241 p->afsr = CHAFSR_INVALID;
1243 cheetah_flush_icache();
1244 cheetah_flush_dcache();
1246 /* Re-enable I-cache/D-cache */
1247 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1248 "or %%g1, %1, %%g1\n\t"
1249 "stxa %%g1, [%%g0] %0\n\t"
1252 : "i" (ASI_DCU_CONTROL_REG),
1253 "i" (DCU_DC | DCU_IC)
1256 /* Re-enable error reporting */
1257 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1258 "or %%g1, %1, %%g1\n\t"
1259 "stxa %%g1, [%%g0] %0\n\t"
1262 : "i" (ASI_ESTATE_ERROR_EN),
1263 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1266 /* Decide if we can continue after handling this trap and
1267 * logging the error.
1270 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1273 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1274 * error was logged while we had error reporting traps disabled.
1276 if (cheetah_recheck_errors(&local_snapshot)) {
1277 unsigned long new_afsr = local_snapshot.afsr;
1279 /* If we got a new asynchronous error, die... */
1280 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1281 CHAFSR_WDU | CHAFSR_CPU |
1282 CHAFSR_IVU | CHAFSR_UE |
1283 CHAFSR_BERR | CHAFSR_TO))
1288 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1291 panic("Irrecoverable Fast-ECC error trap.\n");
1293 /* Flush E-cache to kick the error trap handlers out. */
1294 cheetah_flush_ecache();
1297 /* Try to fix a correctable error by pushing the line out from
1298 * the E-cache. Recheck error reporting registers to see if the
1299 * problem is intermittent.
1301 static int cheetah_fix_ce(unsigned long physaddr)
1303 unsigned long orig_estate;
1304 unsigned long alias1, alias2;
1307 /* Make sure correctable error traps are disabled. */
1308 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1309 "andn %0, %1, %%g1\n\t"
1310 "stxa %%g1, [%%g0] %2\n\t"
1312 : "=&r" (orig_estate)
1313 : "i" (ESTATE_ERROR_CEEN),
1314 "i" (ASI_ESTATE_ERROR_EN)
1317 /* We calculate alias addresses that will force the
1318 * cache line in question out of the E-cache. Then
1319 * we bring it back in with an atomic instruction so
1320 * that we get it in some modified/exclusive state,
1321 * then we displace it again to try and get proper ECC
1322 * pushed back into the system.
1324 physaddr &= ~(8UL - 1UL);
1325 alias1 = (ecache_flush_physbase +
1326 (physaddr & ((ecache_flush_size >> 1) - 1)));
1327 alias2 = alias1 + (ecache_flush_size >> 1);
1328 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1329 "ldxa [%1] %3, %%g0\n\t"
1330 "casxa [%2] %3, %%g0, %%g0\n\t"
1331 "membar #StoreLoad | #StoreStore\n\t"
1332 "ldxa [%0] %3, %%g0\n\t"
1333 "ldxa [%1] %3, %%g0\n\t"
1336 : "r" (alias1), "r" (alias2),
1337 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1339 /* Did that trigger another error? */
1340 if (cheetah_recheck_errors(NULL)) {
1341 /* Try one more time. */
1342 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1344 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1345 if (cheetah_recheck_errors(NULL))
1350 /* No new error, intermittent problem. */
1354 /* Restore error enables. */
1355 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1357 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1362 /* Return non-zero if PADDR is a valid physical memory address. */
1363 static int cheetah_check_main_memory(unsigned long paddr)
1365 unsigned long vaddr = PAGE_OFFSET + paddr;
1367 if (vaddr > (unsigned long) high_memory)
1370 return kern_addr_valid(vaddr);
1373 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1375 struct cheetah_err_info local_snapshot, *p;
1376 int recoverable, is_memory;
1378 p = cheetah_get_error_log(afsr);
1380 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1382 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1383 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1387 /* Grab snapshot of logged error. */
1388 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1390 /* If the current trap snapshot does not match what the
1391 * trap handler passed along into our args, big trouble.
1392 * In such a case, mark the local copy as invalid.
1394 * Else, it matches and we mark the afsr in the non-local
1395 * copy as invalid so we may log new error traps there.
1397 if (p->afsr != afsr || p->afar != afar)
1398 local_snapshot.afsr = CHAFSR_INVALID;
1400 p->afsr = CHAFSR_INVALID;
1402 is_memory = cheetah_check_main_memory(afar);
1404 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1405 /* XXX Might want to log the results of this operation
1406 * XXX somewhere... -DaveM
1408 cheetah_fix_ce(afar);
1412 int flush_all, flush_line;
1414 flush_all = flush_line = 0;
1415 if ((afsr & CHAFSR_EDC) != 0UL) {
1416 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1420 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1421 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1427 /* Trap handler only disabled I-cache, flush it. */
1428 cheetah_flush_icache();
1430 /* Re-enable I-cache */
1431 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1432 "or %%g1, %1, %%g1\n\t"
1433 "stxa %%g1, [%%g0] %0\n\t"
1436 : "i" (ASI_DCU_CONTROL_REG),
1441 cheetah_flush_ecache();
1442 else if (flush_line)
1443 cheetah_flush_ecache_line(afar);
1446 /* Re-enable error reporting */
1447 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1448 "or %%g1, %1, %%g1\n\t"
1449 "stxa %%g1, [%%g0] %0\n\t"
1452 : "i" (ASI_ESTATE_ERROR_EN),
1453 "i" (ESTATE_ERROR_CEEN)
1456 /* Decide if we can continue after handling this trap and
1457 * logging the error.
1460 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1463 /* Re-check AFSR/AFAR */
1464 (void) cheetah_recheck_errors(&local_snapshot);
1467 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1470 panic("Irrecoverable Correctable-ECC error trap.\n");
1473 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1475 struct cheetah_err_info local_snapshot, *p;
1476 int recoverable, is_memory;
1479 /* Check for the special PCI poke sequence. */
1480 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1481 cheetah_flush_icache();
1482 cheetah_flush_dcache();
1484 /* Re-enable I-cache/D-cache */
1485 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1486 "or %%g1, %1, %%g1\n\t"
1487 "stxa %%g1, [%%g0] %0\n\t"
1490 : "i" (ASI_DCU_CONTROL_REG),
1491 "i" (DCU_DC | DCU_IC)
1494 /* Re-enable error reporting */
1495 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1496 "or %%g1, %1, %%g1\n\t"
1497 "stxa %%g1, [%%g0] %0\n\t"
1500 : "i" (ASI_ESTATE_ERROR_EN),
1501 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1504 (void) cheetah_recheck_errors(NULL);
1506 pci_poke_faulted = 1;
1508 regs->tnpc = regs->tpc + 4;
1513 p = cheetah_get_error_log(afsr);
1515 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1517 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1518 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1522 /* Grab snapshot of logged error. */
1523 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1525 /* If the current trap snapshot does not match what the
1526 * trap handler passed along into our args, big trouble.
1527 * In such a case, mark the local copy as invalid.
1529 * Else, it matches and we mark the afsr in the non-local
1530 * copy as invalid so we may log new error traps there.
1532 if (p->afsr != afsr || p->afar != afar)
1533 local_snapshot.afsr = CHAFSR_INVALID;
1535 p->afsr = CHAFSR_INVALID;
1537 is_memory = cheetah_check_main_memory(afar);
1540 int flush_all, flush_line;
1542 flush_all = flush_line = 0;
1543 if ((afsr & CHAFSR_EDU) != 0UL) {
1544 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1548 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1549 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1555 cheetah_flush_icache();
1556 cheetah_flush_dcache();
1558 /* Re-enable I/D caches */
1559 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1560 "or %%g1, %1, %%g1\n\t"
1561 "stxa %%g1, [%%g0] %0\n\t"
1564 : "i" (ASI_DCU_CONTROL_REG),
1565 "i" (DCU_IC | DCU_DC)
1569 cheetah_flush_ecache();
1570 else if (flush_line)
1571 cheetah_flush_ecache_line(afar);
1574 /* Re-enable error reporting */
1575 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1576 "or %%g1, %1, %%g1\n\t"
1577 "stxa %%g1, [%%g0] %0\n\t"
1580 : "i" (ASI_ESTATE_ERROR_EN),
1581 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1584 /* Decide if we can continue after handling this trap and
1585 * logging the error.
1588 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1591 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1592 * error was logged while we had error reporting traps disabled.
1594 if (cheetah_recheck_errors(&local_snapshot)) {
1595 unsigned long new_afsr = local_snapshot.afsr;
1597 /* If we got a new asynchronous error, die... */
1598 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1599 CHAFSR_WDU | CHAFSR_CPU |
1600 CHAFSR_IVU | CHAFSR_UE |
1601 CHAFSR_BERR | CHAFSR_TO))
1606 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1608 /* "Recoverable" here means we try to yank the page from ever
1609 * being newly used again. This depends upon a few things:
1610 * 1) Must be main memory, and AFAR must be valid.
1611 * 2) If we trapped from user, OK.
1612 * 3) Else, if we trapped from kernel we must find exception
1613 * table entry (ie. we have to have been accessing user
1616 * If AFAR is not in main memory, or we trapped from kernel
1617 * and cannot find an exception table entry, it is unacceptable
1618 * to try and continue.
1620 if (recoverable && is_memory) {
1621 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1622 /* OK, usermode access. */
1625 const struct exception_table_entry *entry;
1627 entry = search_exception_tables(regs->tpc);
1629 /* OK, kernel access to userspace. */
1633 /* BAD, privileged state is corrupted. */
1638 if (pfn_valid(afar >> PAGE_SHIFT))
1639 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1643 /* Only perform fixup if we still have a
1644 * recoverable condition.
1647 regs->tpc = entry->fixup;
1648 regs->tnpc = regs->tpc + 4;
1657 panic("Irrecoverable deferred error trap.\n");
1660 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1662 * Bit0: 0=dcache,1=icache
1663 * Bit1: 0=recoverable,1=unrecoverable
1665 * The hardware has disabled both the I-cache and D-cache in
1666 * the %dcr register.
1668 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1671 __cheetah_flush_icache();
1673 cheetah_plus_zap_dcache_parity();
1674 cheetah_flush_dcache();
1676 /* Re-enable I-cache/D-cache */
1677 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1678 "or %%g1, %1, %%g1\n\t"
1679 "stxa %%g1, [%%g0] %0\n\t"
1682 : "i" (ASI_DCU_CONTROL_REG),
1683 "i" (DCU_DC | DCU_IC)
1687 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1689 (type & 0x1) ? 'I' : 'D',
1691 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1692 panic("Irrecoverable Cheetah+ parity error.");
1695 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1697 (type & 0x1) ? 'I' : 'D',
1699 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1702 struct sun4v_error_entry {
1707 #define SUN4V_ERR_TYPE_UNDEFINED 0
1708 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1709 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1710 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1711 #define SUN4V_ERR_TYPE_WARNING_RES 4
1714 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1715 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1716 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1717 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1718 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1719 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1720 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1721 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1729 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1730 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1732 static const char *sun4v_err_type_to_str(u32 type)
1735 case SUN4V_ERR_TYPE_UNDEFINED:
1737 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1738 return "uncorrected resumable";
1739 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1740 return "precise nonresumable";
1741 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1742 return "deferred nonresumable";
1743 case SUN4V_ERR_TYPE_WARNING_RES:
1744 return "warning resumable";
1750 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1754 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1755 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1757 ent->err_handle, ent->err_stick,
1759 sun4v_err_type_to_str(ent->err_type));
1760 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1763 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1765 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1767 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1769 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1770 "integer-regs" : ""),
1771 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1773 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1775 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1777 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1778 "queue-full" : ""));
1779 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1781 ent->err_raddr, ent->err_size, ent->err_cpu);
1785 if ((cnt = atomic_read(ocnt)) != 0) {
1786 atomic_set(ocnt, 0);
1788 printk("%s: Queue overflowed %d times.\n",
1793 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1794 * Log the event and clear the first word of the entry.
1796 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1798 struct sun4v_error_entry *ent, local_copy;
1799 struct trap_per_cpu *tb;
1800 unsigned long paddr;
1805 tb = &trap_block[cpu];
1806 paddr = tb->resum_kernel_buf_pa + offset;
1809 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1811 /* We have a local copy now, so release the entry. */
1812 ent->err_handle = 0;
1817 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1818 /* If err_type is 0x4, it's a powerdown request. Do
1819 * not do the usual resumable error log because that
1820 * makes it look like some abnormal error.
1822 printk(KERN_INFO "Power down request...\n");
1823 kill_cad_pid(SIGINT, 1);
1827 sun4v_log_error(regs, &local_copy, cpu,
1828 KERN_ERR "RESUMABLE ERROR",
1829 &sun4v_resum_oflow_cnt);
1832 /* If we try to printk() we'll probably make matters worse, by trying
1833 * to retake locks this cpu already holds or causing more errors. So
1834 * just bump a counter, and we'll report these counter bumps above.
1836 void sun4v_resum_overflow(struct pt_regs *regs)
1838 atomic_inc(&sun4v_resum_oflow_cnt);
1841 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1842 * Log the event, clear the first word of the entry, and die.
1844 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1846 struct sun4v_error_entry *ent, local_copy;
1847 struct trap_per_cpu *tb;
1848 unsigned long paddr;
1853 tb = &trap_block[cpu];
1854 paddr = tb->nonresum_kernel_buf_pa + offset;
1857 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1859 /* We have a local copy now, so release the entry. */
1860 ent->err_handle = 0;
1866 /* Check for the special PCI poke sequence. */
1867 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1868 pci_poke_faulted = 1;
1870 regs->tnpc = regs->tpc + 4;
1875 sun4v_log_error(regs, &local_copy, cpu,
1876 KERN_EMERG "NON-RESUMABLE ERROR",
1877 &sun4v_nonresum_oflow_cnt);
1879 panic("Non-resumable error.");
1882 /* If we try to printk() we'll probably make matters worse, by trying
1883 * to retake locks this cpu already holds or causing more errors. So
1884 * just bump a counter, and we'll report these counter bumps above.
1886 void sun4v_nonresum_overflow(struct pt_regs *regs)
1888 /* XXX Actually even this can make not that much sense. Perhaps
1889 * XXX we should just pull the plug and panic directly from here?
1891 atomic_inc(&sun4v_nonresum_oflow_cnt);
1894 unsigned long sun4v_err_itlb_vaddr;
1895 unsigned long sun4v_err_itlb_ctx;
1896 unsigned long sun4v_err_itlb_pte;
1897 unsigned long sun4v_err_itlb_error;
1899 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1902 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1904 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1906 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1907 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1908 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1909 (void *) regs->u_regs[UREG_I7]);
1910 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1911 "pte[%lx] error[%lx]\n",
1912 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1913 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1918 unsigned long sun4v_err_dtlb_vaddr;
1919 unsigned long sun4v_err_dtlb_ctx;
1920 unsigned long sun4v_err_dtlb_pte;
1921 unsigned long sun4v_err_dtlb_error;
1923 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1926 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1928 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1930 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1931 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1932 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1933 (void *) regs->u_regs[UREG_I7]);
1934 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1935 "pte[%lx] error[%lx]\n",
1936 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1937 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1942 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1944 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1948 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1950 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1954 void do_fpe_common(struct pt_regs *regs)
1956 if (regs->tstate & TSTATE_PRIV) {
1957 regs->tpc = regs->tnpc;
1960 unsigned long fsr = current_thread_info()->xfsr[0];
1963 if (test_thread_flag(TIF_32BIT)) {
1964 regs->tpc &= 0xffffffff;
1965 regs->tnpc &= 0xffffffff;
1967 info.si_signo = SIGFPE;
1969 info.si_addr = (void __user *)regs->tpc;
1971 info.si_code = __SI_FAULT;
1972 if ((fsr & 0x1c000) == (1 << 14)) {
1974 info.si_code = FPE_FLTINV;
1975 else if (fsr & 0x08)
1976 info.si_code = FPE_FLTOVF;
1977 else if (fsr & 0x04)
1978 info.si_code = FPE_FLTUND;
1979 else if (fsr & 0x02)
1980 info.si_code = FPE_FLTDIV;
1981 else if (fsr & 0x01)
1982 info.si_code = FPE_FLTRES;
1984 force_sig_info(SIGFPE, &info, current);
1988 void do_fpieee(struct pt_regs *regs)
1990 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1991 0, 0x24, SIGFPE) == NOTIFY_STOP)
1994 do_fpe_common(regs);
1997 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1999 void do_fpother(struct pt_regs *regs)
2001 struct fpustate *f = FPUSTATE;
2004 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2005 0, 0x25, SIGFPE) == NOTIFY_STOP)
2008 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2009 case (2 << 14): /* unfinished_FPop */
2010 case (3 << 14): /* unimplemented_FPop */
2011 ret = do_mathemu(regs, f);
2016 do_fpe_common(regs);
2019 void do_tof(struct pt_regs *regs)
2023 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2024 0, 0x26, SIGEMT) == NOTIFY_STOP)
2027 if (regs->tstate & TSTATE_PRIV)
2028 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2029 if (test_thread_flag(TIF_32BIT)) {
2030 regs->tpc &= 0xffffffff;
2031 regs->tnpc &= 0xffffffff;
2033 info.si_signo = SIGEMT;
2035 info.si_code = EMT_TAGOVF;
2036 info.si_addr = (void __user *)regs->tpc;
2038 force_sig_info(SIGEMT, &info, current);
2041 void do_div0(struct pt_regs *regs)
2045 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2046 0, 0x28, SIGFPE) == NOTIFY_STOP)
2049 if (regs->tstate & TSTATE_PRIV)
2050 die_if_kernel("TL0: Kernel divide by zero.", regs);
2051 if (test_thread_flag(TIF_32BIT)) {
2052 regs->tpc &= 0xffffffff;
2053 regs->tnpc &= 0xffffffff;
2055 info.si_signo = SIGFPE;
2057 info.si_code = FPE_INTDIV;
2058 info.si_addr = (void __user *)regs->tpc;
2060 force_sig_info(SIGFPE, &info, current);
2063 static void instruction_dump(unsigned int *pc)
2067 if ((((unsigned long) pc) & 3))
2070 printk("Instruction DUMP:");
2071 for (i = -3; i < 6; i++)
2072 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2076 static void user_instruction_dump(unsigned int __user *pc)
2079 unsigned int buf[9];
2081 if ((((unsigned long) pc) & 3))
2084 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2087 printk("Instruction DUMP:");
2088 for (i = 0; i < 9; i++)
2089 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2093 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2095 unsigned long fp, thread_base, ksp;
2096 struct thread_info *tp;
2099 ksp = (unsigned long) _ksp;
2102 tp = task_thread_info(tsk);
2105 asm("mov %%fp, %0" : "=r" (ksp));
2109 if (tp == current_thread_info())
2112 fp = ksp + STACK_BIAS;
2113 thread_base = (unsigned long) tp;
2115 printk("Call Trace:\n");
2117 struct sparc_stackf *sf;
2118 struct pt_regs *regs;
2121 /* Bogus frame pointer? */
2122 if (fp < (thread_base + sizeof(struct thread_info)) ||
2123 fp >= (thread_base + THREAD_SIZE))
2125 sf = (struct sparc_stackf *) fp;
2126 regs = (struct pt_regs *) (sf + 1);
2128 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC) {
2129 if (!(regs->tstate & TSTATE_PRIV))
2132 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2134 pc = sf->callers_pc;
2135 fp = (unsigned long)sf->fp + STACK_BIAS;
2138 printk(" [%016lx] %pS\n", pc, (void *) pc);
2139 } while (++count < 16);
2142 void dump_stack(void)
2144 show_stack(current, NULL);
2147 EXPORT_SYMBOL(dump_stack);
2149 static inline int is_kernel_stack(struct task_struct *task,
2150 struct reg_window *rw)
2152 unsigned long rw_addr = (unsigned long) rw;
2153 unsigned long thread_base, thread_end;
2155 if (rw_addr < PAGE_OFFSET) {
2156 if (task != &init_task)
2160 thread_base = (unsigned long) task_stack_page(task);
2161 thread_end = thread_base + sizeof(union thread_union);
2162 if (rw_addr >= thread_base &&
2163 rw_addr < thread_end &&
2170 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2172 unsigned long fp = rw->ins[6];
2177 return (struct reg_window *) (fp + STACK_BIAS);
2180 void die_if_kernel(char *str, struct pt_regs *regs)
2182 static int die_counter;
2183 extern void smp_report_regs(void);
2186 /* Amuse the user. */
2189 " \"@'/ .. \\`@\"\n"
2193 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2194 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2195 __asm__ __volatile__("flushw");
2197 add_taint(TAINT_DIE);
2198 if (regs->tstate & TSTATE_PRIV) {
2199 struct reg_window *rw = (struct reg_window *)
2200 (regs->u_regs[UREG_FP] + STACK_BIAS);
2202 /* Stop the back trace when we hit userland or we
2203 * find some badly aligned kernel stack.
2207 is_kernel_stack(current, rw)) {
2208 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2209 (void *) rw->ins[7]);
2211 rw = kernel_stack_up(rw);
2213 instruction_dump ((unsigned int *) regs->tpc);
2215 if (test_thread_flag(TIF_32BIT)) {
2216 regs->tpc &= 0xffffffff;
2217 regs->tnpc &= 0xffffffff;
2219 user_instruction_dump ((unsigned int __user *) regs->tpc);
2226 if (regs->tstate & TSTATE_PRIV)
2231 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2232 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2234 extern int handle_popc(u32 insn, struct pt_regs *regs);
2235 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2236 extern int vis_emul(struct pt_regs *, unsigned int);
2238 void do_illegal_instruction(struct pt_regs *regs)
2240 unsigned long pc = regs->tpc;
2241 unsigned long tstate = regs->tstate;
2245 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2246 0, 0x10, SIGILL) == NOTIFY_STOP)
2249 if (tstate & TSTATE_PRIV)
2250 die_if_kernel("Kernel illegal instruction", regs);
2251 if (test_thread_flag(TIF_32BIT))
2253 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2254 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2255 if (handle_popc(insn, regs))
2257 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2258 if (handle_ldf_stq(insn, regs))
2260 } else if (tlb_type == hypervisor) {
2261 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2262 if (!vis_emul(regs, insn))
2265 struct fpustate *f = FPUSTATE;
2267 /* XXX maybe verify XFSR bits like
2268 * XXX do_fpother() does?
2270 if (do_mathemu(regs, f))
2275 info.si_signo = SIGILL;
2277 info.si_code = ILL_ILLOPC;
2278 info.si_addr = (void __user *)pc;
2280 force_sig_info(SIGILL, &info, current);
2283 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2285 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2289 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2290 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2293 if (regs->tstate & TSTATE_PRIV) {
2294 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2297 info.si_signo = SIGBUS;
2299 info.si_code = BUS_ADRALN;
2300 info.si_addr = (void __user *)sfar;
2302 force_sig_info(SIGBUS, &info, current);
2305 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2309 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2310 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2313 if (regs->tstate & TSTATE_PRIV) {
2314 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2317 info.si_signo = SIGBUS;
2319 info.si_code = BUS_ADRALN;
2320 info.si_addr = (void __user *) addr;
2322 force_sig_info(SIGBUS, &info, current);
2325 void do_privop(struct pt_regs *regs)
2329 if (notify_die(DIE_TRAP, "privileged operation", regs,
2330 0, 0x11, SIGILL) == NOTIFY_STOP)
2333 if (test_thread_flag(TIF_32BIT)) {
2334 regs->tpc &= 0xffffffff;
2335 regs->tnpc &= 0xffffffff;
2337 info.si_signo = SIGILL;
2339 info.si_code = ILL_PRVOPC;
2340 info.si_addr = (void __user *)regs->tpc;
2342 force_sig_info(SIGILL, &info, current);
2345 void do_privact(struct pt_regs *regs)
2350 /* Trap level 1 stuff or other traps we should never see... */
2351 void do_cee(struct pt_regs *regs)
2353 die_if_kernel("TL0: Cache Error Exception", regs);
2356 void do_cee_tl1(struct pt_regs *regs)
2358 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2359 die_if_kernel("TL1: Cache Error Exception", regs);
2362 void do_dae_tl1(struct pt_regs *regs)
2364 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2365 die_if_kernel("TL1: Data Access Exception", regs);
2368 void do_iae_tl1(struct pt_regs *regs)
2370 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2371 die_if_kernel("TL1: Instruction Access Exception", regs);
2374 void do_div0_tl1(struct pt_regs *regs)
2376 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2377 die_if_kernel("TL1: DIV0 Exception", regs);
2380 void do_fpdis_tl1(struct pt_regs *regs)
2382 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2383 die_if_kernel("TL1: FPU Disabled", regs);
2386 void do_fpieee_tl1(struct pt_regs *regs)
2388 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2389 die_if_kernel("TL1: FPU IEEE Exception", regs);
2392 void do_fpother_tl1(struct pt_regs *regs)
2394 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2395 die_if_kernel("TL1: FPU Other Exception", regs);
2398 void do_ill_tl1(struct pt_regs *regs)
2400 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2401 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2404 void do_irq_tl1(struct pt_regs *regs)
2406 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2407 die_if_kernel("TL1: IRQ Exception", regs);
2410 void do_lddfmna_tl1(struct pt_regs *regs)
2412 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2413 die_if_kernel("TL1: LDDF Exception", regs);
2416 void do_stdfmna_tl1(struct pt_regs *regs)
2418 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2419 die_if_kernel("TL1: STDF Exception", regs);
2422 void do_paw(struct pt_regs *regs)
2424 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2427 void do_paw_tl1(struct pt_regs *regs)
2429 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2430 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2433 void do_vaw(struct pt_regs *regs)
2435 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2438 void do_vaw_tl1(struct pt_regs *regs)
2440 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2441 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2444 void do_tof_tl1(struct pt_regs *regs)
2446 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2447 die_if_kernel("TL1: Tag Overflow Exception", regs);
2450 void do_getpsr(struct pt_regs *regs)
2452 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2453 regs->tpc = regs->tnpc;
2455 if (test_thread_flag(TIF_32BIT)) {
2456 regs->tpc &= 0xffffffff;
2457 regs->tnpc &= 0xffffffff;
2461 struct trap_per_cpu trap_block[NR_CPUS];
2463 /* This can get invoked before sched_init() so play it super safe
2464 * and use hard_smp_processor_id().
2466 void init_cur_cpu_trap(struct thread_info *t)
2468 int cpu = hard_smp_processor_id();
2469 struct trap_per_cpu *p = &trap_block[cpu];
2475 extern void thread_info_offsets_are_bolixed_dave(void);
2476 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2477 extern void tsb_config_offsets_are_bolixed_dave(void);
2479 /* Only invoked on boot processor. */
2480 void __init trap_init(void)
2482 /* Compile time sanity check. */
2483 if (TI_TASK != offsetof(struct thread_info, task) ||
2484 TI_FLAGS != offsetof(struct thread_info, flags) ||
2485 TI_CPU != offsetof(struct thread_info, cpu) ||
2486 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2487 TI_KSP != offsetof(struct thread_info, ksp) ||
2488 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2489 TI_KREGS != offsetof(struct thread_info, kregs) ||
2490 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2491 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2492 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2493 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2494 TI_GSR != offsetof(struct thread_info, gsr) ||
2495 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2496 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2497 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2498 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2499 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2500 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2501 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2502 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2503 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2504 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2505 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2506 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2507 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2508 (TI_FPREGS & (64 - 1)))
2509 thread_info_offsets_are_bolixed_dave();
2511 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2512 (TRAP_PER_CPU_PGD_PADDR !=
2513 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2514 (TRAP_PER_CPU_CPU_MONDO_PA !=
2515 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2516 (TRAP_PER_CPU_DEV_MONDO_PA !=
2517 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2518 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2519 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2520 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2521 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2522 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2523 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2524 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2525 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2526 (TRAP_PER_CPU_FAULT_INFO !=
2527 offsetof(struct trap_per_cpu, fault_info)) ||
2528 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2529 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2530 (TRAP_PER_CPU_CPU_LIST_PA !=
2531 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2532 (TRAP_PER_CPU_TSB_HUGE !=
2533 offsetof(struct trap_per_cpu, tsb_huge)) ||
2534 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2535 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2536 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2537 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2538 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2539 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2540 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2541 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2542 (TRAP_PER_CPU_RESUM_QMASK !=
2543 offsetof(struct trap_per_cpu, resum_qmask)) ||
2544 (TRAP_PER_CPU_NONRESUM_QMASK !=
2545 offsetof(struct trap_per_cpu, nonresum_qmask)))
2546 trap_per_cpu_offsets_are_bolixed_dave();
2548 if ((TSB_CONFIG_TSB !=
2549 offsetof(struct tsb_config, tsb)) ||
2550 (TSB_CONFIG_RSS_LIMIT !=
2551 offsetof(struct tsb_config, tsb_rss_limit)) ||
2552 (TSB_CONFIG_NENTRIES !=
2553 offsetof(struct tsb_config, tsb_nentries)) ||
2554 (TSB_CONFIG_REG_VAL !=
2555 offsetof(struct tsb_config, tsb_reg_val)) ||
2556 (TSB_CONFIG_MAP_VADDR !=
2557 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2558 (TSB_CONFIG_MAP_PTE !=
2559 offsetof(struct tsb_config, tsb_map_pte)))
2560 tsb_config_offsets_are_bolixed_dave();
2562 /* Attach to the address space of init_task. On SMP we
2563 * do this in smp.c:smp_callin for other cpus.
2565 atomic_inc(&init_mm.mm_count);
2566 current->active_mm = &init_mm;