1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
43 #include <linux/kmod.h>
46 struct notifier_block *sparc64die_chain;
47 static DEFINE_SPINLOCK(die_notifier_lock);
49 int register_die_notifier(struct notifier_block *nb)
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
59 /* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
74 static void dump_tl1_traplog(struct tl1_traplog *p)
78 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
79 "dumping track stack.\n", p->tl);
81 limit = (tlb_type == hypervisor) ? 2 : 4;
82 for (i = 0; i < limit; i++) {
84 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
85 "TNPC[%016lx] TT[%lx]\n",
87 p->trapstack[i].tstate, p->trapstack[i].tpc,
88 p->trapstack[i].tnpc, p->trapstack[i].tt);
92 void do_call_debug(struct pt_regs *regs)
94 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
97 void bad_trap(struct pt_regs *regs, long lvl)
102 if (notify_die(DIE_TRAP, "bad trap", regs,
103 0, lvl, SIGTRAP) == NOTIFY_STOP)
107 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
108 die_if_kernel(buffer, regs);
112 if (regs->tstate & TSTATE_PRIV) {
113 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
114 die_if_kernel(buffer, regs);
116 if (test_thread_flag(TIF_32BIT)) {
117 regs->tpc &= 0xffffffff;
118 regs->tnpc &= 0xffffffff;
120 info.si_signo = SIGILL;
122 info.si_code = ILL_ILLTRP;
123 info.si_addr = (void __user *)regs->tpc;
124 info.si_trapno = lvl;
125 force_sig_info(SIGILL, &info, current);
128 void bad_trap_tl1(struct pt_regs *regs, long lvl)
132 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
133 0, lvl, SIGTRAP) == NOTIFY_STOP)
136 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
138 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
139 die_if_kernel (buffer, regs);
142 #ifdef CONFIG_DEBUG_BUGVERBOSE
143 void do_BUG(const char *file, int line)
146 printk("kernel BUG at %s:%d!\n", file, line);
150 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
154 if (notify_die(DIE_TRAP, "instruction access exception", regs,
155 0, 0x8, SIGTRAP) == NOTIFY_STOP)
158 if (regs->tstate & TSTATE_PRIV) {
159 printk("spitfire_insn_access_exception: SFSR[%016lx] "
160 "SFAR[%016lx], going.\n", sfsr, sfar);
161 die_if_kernel("Iax", regs);
163 if (test_thread_flag(TIF_32BIT)) {
164 regs->tpc &= 0xffffffff;
165 regs->tnpc &= 0xffffffff;
167 info.si_signo = SIGSEGV;
169 info.si_code = SEGV_MAPERR;
170 info.si_addr = (void __user *)regs->tpc;
172 force_sig_info(SIGSEGV, &info, current);
175 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
177 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
178 0, 0x8, SIGTRAP) == NOTIFY_STOP)
181 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
182 spitfire_insn_access_exception(regs, sfsr, sfar);
185 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
187 unsigned short type = (type_ctx >> 16);
188 unsigned short ctx = (type_ctx & 0xffff);
191 if (notify_die(DIE_TRAP, "instruction access exception", regs,
192 0, 0x8, SIGTRAP) == NOTIFY_STOP)
195 if (regs->tstate & TSTATE_PRIV) {
196 printk("sun4v_insn_access_exception: ADDR[%016lx] "
197 "CTX[%04x] TYPE[%04x], going.\n",
199 die_if_kernel("Iax", regs);
202 if (test_thread_flag(TIF_32BIT)) {
203 regs->tpc &= 0xffffffff;
204 regs->tnpc &= 0xffffffff;
206 info.si_signo = SIGSEGV;
208 info.si_code = SEGV_MAPERR;
209 info.si_addr = (void __user *) addr;
211 force_sig_info(SIGSEGV, &info, current);
214 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
216 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
217 0, 0x8, SIGTRAP) == NOTIFY_STOP)
220 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
221 sun4v_insn_access_exception(regs, addr, type_ctx);
224 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
228 if (notify_die(DIE_TRAP, "data access exception", regs,
229 0, 0x30, SIGTRAP) == NOTIFY_STOP)
232 if (regs->tstate & TSTATE_PRIV) {
233 /* Test if this comes from uaccess places. */
234 const struct exception_table_entry *entry;
236 entry = search_exception_tables(regs->tpc);
238 /* Ouch, somebody is trying VM hole tricks on us... */
239 #ifdef DEBUG_EXCEPTIONS
240 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
241 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
242 regs->tpc, entry->fixup);
244 regs->tpc = entry->fixup;
245 regs->tnpc = regs->tpc + 4;
249 printk("spitfire_data_access_exception: SFSR[%016lx] "
250 "SFAR[%016lx], going.\n", sfsr, sfar);
251 die_if_kernel("Dax", regs);
254 info.si_signo = SIGSEGV;
256 info.si_code = SEGV_MAPERR;
257 info.si_addr = (void __user *)sfar;
259 force_sig_info(SIGSEGV, &info, current);
262 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
264 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
265 0, 0x30, SIGTRAP) == NOTIFY_STOP)
268 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
269 spitfire_data_access_exception(regs, sfsr, sfar);
272 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
274 unsigned short type = (type_ctx >> 16);
275 unsigned short ctx = (type_ctx & 0xffff);
278 if (notify_die(DIE_TRAP, "data access exception", regs,
279 0, 0x8, SIGTRAP) == NOTIFY_STOP)
282 if (regs->tstate & TSTATE_PRIV) {
283 printk("sun4v_data_access_exception: ADDR[%016lx] "
284 "CTX[%04x] TYPE[%04x], going.\n",
286 die_if_kernel("Dax", regs);
289 if (test_thread_flag(TIF_32BIT)) {
290 regs->tpc &= 0xffffffff;
291 regs->tnpc &= 0xffffffff;
293 info.si_signo = SIGSEGV;
295 info.si_code = SEGV_MAPERR;
296 info.si_addr = (void __user *) addr;
298 force_sig_info(SIGSEGV, &info, current);
301 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
303 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
304 0, 0x8, SIGTRAP) == NOTIFY_STOP)
307 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
308 sun4v_data_access_exception(regs, addr, type_ctx);
312 /* This is really pathetic... */
313 extern volatile int pci_poke_in_progress;
314 extern volatile int pci_poke_cpu;
315 extern volatile int pci_poke_faulted;
318 /* When access exceptions happen, we must do this. */
319 static void spitfire_clean_and_reenable_l1_caches(void)
323 if (tlb_type != spitfire)
327 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
328 spitfire_put_icache_tag(va, 0x0);
329 spitfire_put_dcache_tag(va, 0x0);
332 /* Re-enable in LSU. */
333 __asm__ __volatile__("flush %%g6\n\t"
335 "stxa %0, [%%g0] %1\n\t"
338 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
339 LSU_CONTROL_IM | LSU_CONTROL_DM),
340 "i" (ASI_LSU_CONTROL)
344 static void spitfire_enable_estate_errors(void)
346 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
349 : "r" (ESTATE_ERR_ALL),
350 "i" (ASI_ESTATE_ERROR_EN));
353 static char ecc_syndrome_table[] = {
354 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
355 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
356 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
357 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
358 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
359 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
360 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
361 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
362 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
363 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
364 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
365 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
366 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
367 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
368 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
369 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
370 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
371 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
372 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
373 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
374 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
375 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
376 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
377 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
378 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
379 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
380 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
381 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
382 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
383 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
384 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
385 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
388 static char *syndrome_unknown = "<Unknown>";
390 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
392 unsigned short scode;
393 char memmod_str[64], *p;
396 scode = ecc_syndrome_table[udbl & 0xff];
397 if (prom_getunumber(scode, afar,
398 memmod_str, sizeof(memmod_str)) == -1)
399 p = syndrome_unknown;
402 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
403 "Memory Module \"%s\"\n",
404 smp_processor_id(), scode, p);
408 scode = ecc_syndrome_table[udbh & 0xff];
409 if (prom_getunumber(scode, afar,
410 memmod_str, sizeof(memmod_str)) == -1)
411 p = syndrome_unknown;
414 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
415 "Memory Module \"%s\"\n",
416 smp_processor_id(), scode, p);
421 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
424 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
425 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
426 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
428 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
430 /* We always log it, even if someone is listening for this
433 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
434 0, TRAP_TYPE_CEE, SIGTRAP);
436 /* The Correctable ECC Error trap does not disable I/D caches. So
437 * we only have to restore the ESTATE Error Enable register.
439 spitfire_enable_estate_errors();
442 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
446 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
447 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
448 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
450 /* XXX add more human friendly logging of the error status
451 * XXX as is implemented for cheetah
454 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
456 /* We always log it, even if someone is listening for this
459 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
462 if (regs->tstate & TSTATE_PRIV) {
464 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
465 die_if_kernel("UE", regs);
468 /* XXX need more intelligent processing here, such as is implemented
469 * XXX for cheetah errors, in fact if the E-cache still holds the
470 * XXX line with bad parity this will loop
473 spitfire_clean_and_reenable_l1_caches();
474 spitfire_enable_estate_errors();
476 if (test_thread_flag(TIF_32BIT)) {
477 regs->tpc &= 0xffffffff;
478 regs->tnpc &= 0xffffffff;
480 info.si_signo = SIGBUS;
482 info.si_code = BUS_OBJERR;
483 info.si_addr = (void *)0;
485 force_sig_info(SIGBUS, &info, current);
488 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
490 unsigned long afsr, tt, udbh, udbl;
493 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
494 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
495 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
496 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
497 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
500 if (tt == TRAP_TYPE_DAE &&
501 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
502 spitfire_clean_and_reenable_l1_caches();
503 spitfire_enable_estate_errors();
505 pci_poke_faulted = 1;
506 regs->tnpc = regs->tpc + 4;
511 if (afsr & SFAFSR_UE)
512 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
514 if (tt == TRAP_TYPE_CEE) {
515 /* Handle the case where we took a CEE trap, but ACK'd
516 * only the UE state in the UDB error registers.
518 if (afsr & SFAFSR_UE) {
519 if (udbh & UDBE_CE) {
520 __asm__ __volatile__(
521 "stxa %0, [%1] %2\n\t"
524 : "r" (udbh & UDBE_CE),
525 "r" (0x0), "i" (ASI_UDB_ERROR_W));
527 if (udbl & UDBE_CE) {
528 __asm__ __volatile__(
529 "stxa %0, [%1] %2\n\t"
532 : "r" (udbl & UDBE_CE),
533 "r" (0x18), "i" (ASI_UDB_ERROR_W));
537 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
541 int cheetah_pcache_forced_on;
543 void cheetah_enable_pcache(void)
547 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
550 __asm__ __volatile__("ldxa [%%g0] %1, %0"
552 : "i" (ASI_DCU_CONTROL_REG));
553 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
554 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
557 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
560 /* Cheetah error trap handling. */
561 static unsigned long ecache_flush_physbase;
562 static unsigned long ecache_flush_linesize;
563 static unsigned long ecache_flush_size;
565 /* WARNING: The error trap handlers in assembly know the precise
566 * layout of the following structure.
568 * C-level handlers below use this information to log the error
569 * and then determine how to recover (if possible).
571 struct cheetah_err_info {
576 /*0x10*/u64 dcache_data[4]; /* The actual data */
577 /*0x30*/u64 dcache_index; /* D-cache index */
578 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
579 /*0x40*/u64 dcache_utag; /* D-cache microtag */
580 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
583 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
584 /*0x90*/u64 icache_index; /* I-cache index */
585 /*0x98*/u64 icache_tag; /* I-cache phys tag */
586 /*0xa0*/u64 icache_utag; /* I-cache microtag */
587 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
588 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
589 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
592 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
593 /*0xe0*/u64 ecache_index; /* E-cache index */
594 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
596 /*0xf0*/u64 __pad[32 - 30];
598 #define CHAFSR_INVALID ((u64)-1L)
600 /* This table is ordered in priority of errors and matches the
601 * AFAR overwrite policy as well.
604 struct afsr_error_table {
609 static const char CHAFSR_PERR_msg[] =
610 "System interface protocol error";
611 static const char CHAFSR_IERR_msg[] =
612 "Internal processor error";
613 static const char CHAFSR_ISAP_msg[] =
614 "System request parity error on incoming addresss";
615 static const char CHAFSR_UCU_msg[] =
616 "Uncorrectable E-cache ECC error for ifetch/data";
617 static const char CHAFSR_UCC_msg[] =
618 "SW Correctable E-cache ECC error for ifetch/data";
619 static const char CHAFSR_UE_msg[] =
620 "Uncorrectable system bus data ECC error for read";
621 static const char CHAFSR_EDU_msg[] =
622 "Uncorrectable E-cache ECC error for stmerge/blkld";
623 static const char CHAFSR_EMU_msg[] =
624 "Uncorrectable system bus MTAG error";
625 static const char CHAFSR_WDU_msg[] =
626 "Uncorrectable E-cache ECC error for writeback";
627 static const char CHAFSR_CPU_msg[] =
628 "Uncorrectable ECC error for copyout";
629 static const char CHAFSR_CE_msg[] =
630 "HW corrected system bus data ECC error for read";
631 static const char CHAFSR_EDC_msg[] =
632 "HW corrected E-cache ECC error for stmerge/blkld";
633 static const char CHAFSR_EMC_msg[] =
634 "HW corrected system bus MTAG ECC error";
635 static const char CHAFSR_WDC_msg[] =
636 "HW corrected E-cache ECC error for writeback";
637 static const char CHAFSR_CPC_msg[] =
638 "HW corrected ECC error for copyout";
639 static const char CHAFSR_TO_msg[] =
640 "Unmapped error from system bus";
641 static const char CHAFSR_BERR_msg[] =
642 "Bus error response from system bus";
643 static const char CHAFSR_IVC_msg[] =
644 "HW corrected system bus data ECC error for ivec read";
645 static const char CHAFSR_IVU_msg[] =
646 "Uncorrectable system bus data ECC error for ivec read";
647 static struct afsr_error_table __cheetah_error_table[] = {
648 { CHAFSR_PERR, CHAFSR_PERR_msg },
649 { CHAFSR_IERR, CHAFSR_IERR_msg },
650 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
651 { CHAFSR_UCU, CHAFSR_UCU_msg },
652 { CHAFSR_UCC, CHAFSR_UCC_msg },
653 { CHAFSR_UE, CHAFSR_UE_msg },
654 { CHAFSR_EDU, CHAFSR_EDU_msg },
655 { CHAFSR_EMU, CHAFSR_EMU_msg },
656 { CHAFSR_WDU, CHAFSR_WDU_msg },
657 { CHAFSR_CPU, CHAFSR_CPU_msg },
658 { CHAFSR_CE, CHAFSR_CE_msg },
659 { CHAFSR_EDC, CHAFSR_EDC_msg },
660 { CHAFSR_EMC, CHAFSR_EMC_msg },
661 { CHAFSR_WDC, CHAFSR_WDC_msg },
662 { CHAFSR_CPC, CHAFSR_CPC_msg },
663 { CHAFSR_TO, CHAFSR_TO_msg },
664 { CHAFSR_BERR, CHAFSR_BERR_msg },
665 /* These two do not update the AFAR. */
666 { CHAFSR_IVC, CHAFSR_IVC_msg },
667 { CHAFSR_IVU, CHAFSR_IVU_msg },
670 static const char CHPAFSR_DTO_msg[] =
671 "System bus unmapped error for prefetch/storequeue-read";
672 static const char CHPAFSR_DBERR_msg[] =
673 "System bus error for prefetch/storequeue-read";
674 static const char CHPAFSR_THCE_msg[] =
675 "Hardware corrected E-cache Tag ECC error";
676 static const char CHPAFSR_TSCE_msg[] =
677 "SW handled correctable E-cache Tag ECC error";
678 static const char CHPAFSR_TUE_msg[] =
679 "Uncorrectable E-cache Tag ECC error";
680 static const char CHPAFSR_DUE_msg[] =
681 "System bus uncorrectable data ECC error due to prefetch/store-fill";
682 static struct afsr_error_table __cheetah_plus_error_table[] = {
683 { CHAFSR_PERR, CHAFSR_PERR_msg },
684 { CHAFSR_IERR, CHAFSR_IERR_msg },
685 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
686 { CHAFSR_UCU, CHAFSR_UCU_msg },
687 { CHAFSR_UCC, CHAFSR_UCC_msg },
688 { CHAFSR_UE, CHAFSR_UE_msg },
689 { CHAFSR_EDU, CHAFSR_EDU_msg },
690 { CHAFSR_EMU, CHAFSR_EMU_msg },
691 { CHAFSR_WDU, CHAFSR_WDU_msg },
692 { CHAFSR_CPU, CHAFSR_CPU_msg },
693 { CHAFSR_CE, CHAFSR_CE_msg },
694 { CHAFSR_EDC, CHAFSR_EDC_msg },
695 { CHAFSR_EMC, CHAFSR_EMC_msg },
696 { CHAFSR_WDC, CHAFSR_WDC_msg },
697 { CHAFSR_CPC, CHAFSR_CPC_msg },
698 { CHAFSR_TO, CHAFSR_TO_msg },
699 { CHAFSR_BERR, CHAFSR_BERR_msg },
700 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
701 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
702 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
703 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
704 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
705 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
706 /* These two do not update the AFAR. */
707 { CHAFSR_IVC, CHAFSR_IVC_msg },
708 { CHAFSR_IVU, CHAFSR_IVU_msg },
711 static const char JPAFSR_JETO_msg[] =
712 "System interface protocol error, hw timeout caused";
713 static const char JPAFSR_SCE_msg[] =
714 "Parity error on system snoop results";
715 static const char JPAFSR_JEIC_msg[] =
716 "System interface protocol error, illegal command detected";
717 static const char JPAFSR_JEIT_msg[] =
718 "System interface protocol error, illegal ADTYPE detected";
719 static const char JPAFSR_OM_msg[] =
720 "Out of range memory error has occurred";
721 static const char JPAFSR_ETP_msg[] =
722 "Parity error on L2 cache tag SRAM";
723 static const char JPAFSR_UMS_msg[] =
724 "Error due to unsupported store";
725 static const char JPAFSR_RUE_msg[] =
726 "Uncorrectable ECC error from remote cache/memory";
727 static const char JPAFSR_RCE_msg[] =
728 "Correctable ECC error from remote cache/memory";
729 static const char JPAFSR_BP_msg[] =
730 "JBUS parity error on returned read data";
731 static const char JPAFSR_WBP_msg[] =
732 "JBUS parity error on data for writeback or block store";
733 static const char JPAFSR_FRC_msg[] =
734 "Foreign read to DRAM incurring correctable ECC error";
735 static const char JPAFSR_FRU_msg[] =
736 "Foreign read to DRAM incurring uncorrectable ECC error";
737 static struct afsr_error_table __jalapeno_error_table[] = {
738 { JPAFSR_JETO, JPAFSR_JETO_msg },
739 { JPAFSR_SCE, JPAFSR_SCE_msg },
740 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
741 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
742 { CHAFSR_PERR, CHAFSR_PERR_msg },
743 { CHAFSR_IERR, CHAFSR_IERR_msg },
744 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
745 { CHAFSR_UCU, CHAFSR_UCU_msg },
746 { CHAFSR_UCC, CHAFSR_UCC_msg },
747 { CHAFSR_UE, CHAFSR_UE_msg },
748 { CHAFSR_EDU, CHAFSR_EDU_msg },
749 { JPAFSR_OM, JPAFSR_OM_msg },
750 { CHAFSR_WDU, CHAFSR_WDU_msg },
751 { CHAFSR_CPU, CHAFSR_CPU_msg },
752 { CHAFSR_CE, CHAFSR_CE_msg },
753 { CHAFSR_EDC, CHAFSR_EDC_msg },
754 { JPAFSR_ETP, JPAFSR_ETP_msg },
755 { CHAFSR_WDC, CHAFSR_WDC_msg },
756 { CHAFSR_CPC, CHAFSR_CPC_msg },
757 { CHAFSR_TO, CHAFSR_TO_msg },
758 { CHAFSR_BERR, CHAFSR_BERR_msg },
759 { JPAFSR_UMS, JPAFSR_UMS_msg },
760 { JPAFSR_RUE, JPAFSR_RUE_msg },
761 { JPAFSR_RCE, JPAFSR_RCE_msg },
762 { JPAFSR_BP, JPAFSR_BP_msg },
763 { JPAFSR_WBP, JPAFSR_WBP_msg },
764 { JPAFSR_FRC, JPAFSR_FRC_msg },
765 { JPAFSR_FRU, JPAFSR_FRU_msg },
766 /* These two do not update the AFAR. */
767 { CHAFSR_IVU, CHAFSR_IVU_msg },
770 static struct afsr_error_table *cheetah_error_table;
771 static unsigned long cheetah_afsr_errors;
773 /* This is allocated at boot time based upon the largest hardware
774 * cpu ID in the system. We allocate two entries per cpu, one for
775 * TL==0 logging and one for TL >= 1 logging.
777 struct cheetah_err_info *cheetah_error_log;
779 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
781 struct cheetah_err_info *p;
782 int cpu = smp_processor_id();
784 if (!cheetah_error_log)
787 p = cheetah_error_log + (cpu * 2);
788 if ((afsr & CHAFSR_TL1) != 0UL)
794 extern unsigned int tl0_icpe[], tl1_icpe[];
795 extern unsigned int tl0_dcpe[], tl1_dcpe[];
796 extern unsigned int tl0_fecc[], tl1_fecc[];
797 extern unsigned int tl0_cee[], tl1_cee[];
798 extern unsigned int tl0_iae[], tl1_iae[];
799 extern unsigned int tl0_dae[], tl1_dae[];
800 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
801 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
802 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
803 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
804 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
806 void __init cheetah_ecache_flush_init(void)
808 unsigned long largest_size, smallest_linesize, order, ver;
809 int node, i, instance;
811 /* Scan all cpu device tree nodes, note two values:
812 * 1) largest E-cache size
813 * 2) smallest E-cache line size
816 smallest_linesize = ~0UL;
819 while (!cpu_find_by_instance(instance, &node, NULL)) {
822 val = prom_getintdefault(node, "ecache-size",
824 if (val > largest_size)
826 val = prom_getintdefault(node, "ecache-line-size", 64);
827 if (val < smallest_linesize)
828 smallest_linesize = val;
832 if (largest_size == 0UL || smallest_linesize == ~0UL) {
833 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
838 ecache_flush_size = (2 * largest_size);
839 ecache_flush_linesize = smallest_linesize;
841 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
843 if (ecache_flush_physbase == ~0UL) {
844 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
845 "contiguous physical memory.\n",
850 /* Now allocate error trap reporting scoreboard. */
851 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
852 for (order = 0; order < MAX_ORDER; order++) {
853 if ((PAGE_SIZE << order) >= node)
856 cheetah_error_log = (struct cheetah_err_info *)
857 __get_free_pages(GFP_KERNEL, order);
858 if (!cheetah_error_log) {
859 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
860 "error logging scoreboard (%d bytes).\n", node);
863 memset(cheetah_error_log, 0, PAGE_SIZE << order);
865 /* Mark all AFSRs as invalid so that the trap handler will
866 * log new new information there.
868 for (i = 0; i < 2 * NR_CPUS; i++)
869 cheetah_error_log[i].afsr = CHAFSR_INVALID;
871 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
872 if ((ver >> 32) == __JALAPENO_ID ||
873 (ver >> 32) == __SERRANO_ID) {
874 cheetah_error_table = &__jalapeno_error_table[0];
875 cheetah_afsr_errors = JPAFSR_ERRORS;
876 } else if ((ver >> 32) == 0x003e0015) {
877 cheetah_error_table = &__cheetah_plus_error_table[0];
878 cheetah_afsr_errors = CHPAFSR_ERRORS;
880 cheetah_error_table = &__cheetah_error_table[0];
881 cheetah_afsr_errors = CHAFSR_ERRORS;
884 /* Now patch trap tables. */
885 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
886 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
887 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
888 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
889 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
890 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
891 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
892 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
893 if (tlb_type == cheetah_plus) {
894 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
895 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
896 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
897 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
902 static void cheetah_flush_ecache(void)
904 unsigned long flush_base = ecache_flush_physbase;
905 unsigned long flush_linesize = ecache_flush_linesize;
906 unsigned long flush_size = ecache_flush_size;
908 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
909 " bne,pt %%xcc, 1b\n\t"
910 " ldxa [%2 + %0] %3, %%g0\n\t"
912 : "0" (flush_size), "r" (flush_base),
913 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
916 static void cheetah_flush_ecache_line(unsigned long physaddr)
920 physaddr &= ~(8UL - 1UL);
921 physaddr = (ecache_flush_physbase +
922 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
923 alias = physaddr + (ecache_flush_size >> 1UL);
924 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
925 "ldxa [%1] %2, %%g0\n\t"
928 : "r" (physaddr), "r" (alias),
929 "i" (ASI_PHYS_USE_EC));
932 /* Unfortunately, the diagnostic access to the I-cache tags we need to
933 * use to clear the thing interferes with I-cache coherency transactions.
935 * So we must only flush the I-cache when it is disabled.
937 static void __cheetah_flush_icache(void)
939 unsigned int icache_size, icache_line_size;
942 icache_size = local_cpu_data().icache_size;
943 icache_line_size = local_cpu_data().icache_line_size;
945 /* Clear the valid bits in all the tags. */
946 for (addr = 0; addr < icache_size; addr += icache_line_size) {
947 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
950 : "r" (addr | (2 << 3)),
955 static void cheetah_flush_icache(void)
957 unsigned long dcu_save;
959 /* Save current DCU, disable I-cache. */
960 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
961 "or %0, %2, %%g1\n\t"
962 "stxa %%g1, [%%g0] %1\n\t"
965 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
968 __cheetah_flush_icache();
970 /* Restore DCU register */
971 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
974 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
977 static void cheetah_flush_dcache(void)
979 unsigned int dcache_size, dcache_line_size;
982 dcache_size = local_cpu_data().dcache_size;
983 dcache_line_size = local_cpu_data().dcache_line_size;
985 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
986 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
989 : "r" (addr), "i" (ASI_DCACHE_TAG));
993 /* In order to make the even parity correct we must do two things.
994 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
995 * Next, we clear out all 32-bytes of data for that line. Data of
996 * all-zero + tag parity value of zero == correct parity.
998 static void cheetah_plus_zap_dcache_parity(void)
1000 unsigned int dcache_size, dcache_line_size;
1003 dcache_size = local_cpu_data().dcache_size;
1004 dcache_line_size = local_cpu_data().dcache_line_size;
1006 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1007 unsigned long tag = (addr >> 14);
1010 __asm__ __volatile__("membar #Sync\n\t"
1011 "stxa %0, [%1] %2\n\t"
1014 : "r" (tag), "r" (addr),
1015 "i" (ASI_DCACHE_UTAG));
1016 for (line = addr; line < addr + dcache_line_size; line += 8)
1017 __asm__ __volatile__("membar #Sync\n\t"
1018 "stxa %%g0, [%0] %1\n\t"
1022 "i" (ASI_DCACHE_DATA));
1026 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1027 * something palatable to the memory controller driver get_unumber
1051 static unsigned char cheetah_ecc_syntab[] = {
1052 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1053 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1054 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1055 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1056 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1057 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1058 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1059 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1060 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1061 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1062 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1063 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1064 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1065 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1066 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1067 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1068 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1069 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1070 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1071 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1072 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1073 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1074 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1075 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1076 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1077 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1078 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1079 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1080 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1081 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1082 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1083 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1085 static unsigned char cheetah_mtag_syntab[] = {
1096 /* Return the highest priority error conditon mentioned. */
1097 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1099 unsigned long tmp = 0;
1102 for (i = 0; cheetah_error_table[i].mask; i++) {
1103 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1109 static const char *cheetah_get_string(unsigned long bit)
1113 for (i = 0; cheetah_error_table[i].mask; i++) {
1114 if ((bit & cheetah_error_table[i].mask) != 0UL)
1115 return cheetah_error_table[i].name;
1120 extern int chmc_getunumber(int, unsigned long, char *, int);
1122 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1123 unsigned long afsr, unsigned long afar, int recoverable)
1125 unsigned long hipri;
1128 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1129 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1131 (afsr & CHAFSR_TL1) ? 1 : 0);
1132 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 regs->tpc, regs->tnpc, regs->tstate);
1135 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1136 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1137 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1138 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1139 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1140 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1141 hipri = cheetah_get_hipri(afsr);
1142 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1143 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1144 hipri, cheetah_get_string(hipri));
1146 /* Try to get unumber if relevant. */
1147 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1148 CHAFSR_CPC | CHAFSR_CPU | \
1149 CHAFSR_UE | CHAFSR_CE | \
1150 CHAFSR_EDC | CHAFSR_EDU | \
1151 CHAFSR_UCC | CHAFSR_UCU | \
1152 CHAFSR_WDU | CHAFSR_WDC)
1153 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1154 if (afsr & ESYND_ERRORS) {
1158 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1159 syndrome = cheetah_ecc_syntab[syndrome];
1160 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1162 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT),
1164 smp_processor_id(), unum);
1165 } else if (afsr & MSYND_ERRORS) {
1169 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1170 syndrome = cheetah_mtag_syntab[syndrome];
1171 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1173 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1174 (recoverable ? KERN_WARNING : KERN_CRIT),
1175 smp_processor_id(), unum);
1178 /* Now dump the cache snapshots. */
1179 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1180 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1181 (int) info->dcache_index,
1185 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1186 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1187 info->dcache_data[0],
1188 info->dcache_data[1],
1189 info->dcache_data[2],
1190 info->dcache_data[3]);
1191 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1192 "u[%016lx] l[%016lx]\n",
1193 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1194 (int) info->icache_index,
1199 info->icache_lower);
1200 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1201 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 info->icache_data[0],
1203 info->icache_data[1],
1204 info->icache_data[2],
1205 info->icache_data[3]);
1206 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1207 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1208 info->icache_data[4],
1209 info->icache_data[5],
1210 info->icache_data[6],
1211 info->icache_data[7]);
1212 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1213 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1214 (int) info->ecache_index, info->ecache_tag);
1215 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1216 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1217 info->ecache_data[0],
1218 info->ecache_data[1],
1219 info->ecache_data[2],
1220 info->ecache_data[3]);
1222 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1223 while (afsr != 0UL) {
1224 unsigned long bit = cheetah_get_hipri(afsr);
1226 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1227 (recoverable ? KERN_WARNING : KERN_CRIT),
1228 bit, cheetah_get_string(bit));
1234 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1237 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1239 unsigned long afsr, afar;
1242 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1245 if ((afsr & cheetah_afsr_errors) != 0) {
1247 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1255 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1257 : : "r" (afsr), "i" (ASI_AFSR));
1262 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1264 struct cheetah_err_info local_snapshot, *p;
1268 cheetah_flush_ecache();
1270 p = cheetah_get_error_log(afsr);
1272 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1274 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1275 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1279 /* Grab snapshot of logged error. */
1280 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1282 /* If the current trap snapshot does not match what the
1283 * trap handler passed along into our args, big trouble.
1284 * In such a case, mark the local copy as invalid.
1286 * Else, it matches and we mark the afsr in the non-local
1287 * copy as invalid so we may log new error traps there.
1289 if (p->afsr != afsr || p->afar != afar)
1290 local_snapshot.afsr = CHAFSR_INVALID;
1292 p->afsr = CHAFSR_INVALID;
1294 cheetah_flush_icache();
1295 cheetah_flush_dcache();
1297 /* Re-enable I-cache/D-cache */
1298 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1299 "or %%g1, %1, %%g1\n\t"
1300 "stxa %%g1, [%%g0] %0\n\t"
1303 : "i" (ASI_DCU_CONTROL_REG),
1304 "i" (DCU_DC | DCU_IC)
1307 /* Re-enable error reporting */
1308 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1309 "or %%g1, %1, %%g1\n\t"
1310 "stxa %%g1, [%%g0] %0\n\t"
1313 : "i" (ASI_ESTATE_ERROR_EN),
1314 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1317 /* Decide if we can continue after handling this trap and
1318 * logging the error.
1321 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1324 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1325 * error was logged while we had error reporting traps disabled.
1327 if (cheetah_recheck_errors(&local_snapshot)) {
1328 unsigned long new_afsr = local_snapshot.afsr;
1330 /* If we got a new asynchronous error, die... */
1331 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1332 CHAFSR_WDU | CHAFSR_CPU |
1333 CHAFSR_IVU | CHAFSR_UE |
1334 CHAFSR_BERR | CHAFSR_TO))
1339 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1342 panic("Irrecoverable Fast-ECC error trap.\n");
1344 /* Flush E-cache to kick the error trap handlers out. */
1345 cheetah_flush_ecache();
1348 /* Try to fix a correctable error by pushing the line out from
1349 * the E-cache. Recheck error reporting registers to see if the
1350 * problem is intermittent.
1352 static int cheetah_fix_ce(unsigned long physaddr)
1354 unsigned long orig_estate;
1355 unsigned long alias1, alias2;
1358 /* Make sure correctable error traps are disabled. */
1359 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1360 "andn %0, %1, %%g1\n\t"
1361 "stxa %%g1, [%%g0] %2\n\t"
1363 : "=&r" (orig_estate)
1364 : "i" (ESTATE_ERROR_CEEN),
1365 "i" (ASI_ESTATE_ERROR_EN)
1368 /* We calculate alias addresses that will force the
1369 * cache line in question out of the E-cache. Then
1370 * we bring it back in with an atomic instruction so
1371 * that we get it in some modified/exclusive state,
1372 * then we displace it again to try and get proper ECC
1373 * pushed back into the system.
1375 physaddr &= ~(8UL - 1UL);
1376 alias1 = (ecache_flush_physbase +
1377 (physaddr & ((ecache_flush_size >> 1) - 1)));
1378 alias2 = alias1 + (ecache_flush_size >> 1);
1379 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1380 "ldxa [%1] %3, %%g0\n\t"
1381 "casxa [%2] %3, %%g0, %%g0\n\t"
1382 "membar #StoreLoad | #StoreStore\n\t"
1383 "ldxa [%0] %3, %%g0\n\t"
1384 "ldxa [%1] %3, %%g0\n\t"
1387 : "r" (alias1), "r" (alias2),
1388 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1390 /* Did that trigger another error? */
1391 if (cheetah_recheck_errors(NULL)) {
1392 /* Try one more time. */
1393 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1395 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1396 if (cheetah_recheck_errors(NULL))
1401 /* No new error, intermittent problem. */
1405 /* Restore error enables. */
1406 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1408 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1413 /* Return non-zero if PADDR is a valid physical memory address. */
1414 static int cheetah_check_main_memory(unsigned long paddr)
1416 unsigned long vaddr = PAGE_OFFSET + paddr;
1418 if (vaddr > (unsigned long) high_memory)
1421 return kern_addr_valid(vaddr);
1424 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1426 struct cheetah_err_info local_snapshot, *p;
1427 int recoverable, is_memory;
1429 p = cheetah_get_error_log(afsr);
1431 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1433 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1434 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1438 /* Grab snapshot of logged error. */
1439 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1441 /* If the current trap snapshot does not match what the
1442 * trap handler passed along into our args, big trouble.
1443 * In such a case, mark the local copy as invalid.
1445 * Else, it matches and we mark the afsr in the non-local
1446 * copy as invalid so we may log new error traps there.
1448 if (p->afsr != afsr || p->afar != afar)
1449 local_snapshot.afsr = CHAFSR_INVALID;
1451 p->afsr = CHAFSR_INVALID;
1453 is_memory = cheetah_check_main_memory(afar);
1455 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1456 /* XXX Might want to log the results of this operation
1457 * XXX somewhere... -DaveM
1459 cheetah_fix_ce(afar);
1463 int flush_all, flush_line;
1465 flush_all = flush_line = 0;
1466 if ((afsr & CHAFSR_EDC) != 0UL) {
1467 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1471 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1472 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1478 /* Trap handler only disabled I-cache, flush it. */
1479 cheetah_flush_icache();
1481 /* Re-enable I-cache */
1482 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1483 "or %%g1, %1, %%g1\n\t"
1484 "stxa %%g1, [%%g0] %0\n\t"
1487 : "i" (ASI_DCU_CONTROL_REG),
1492 cheetah_flush_ecache();
1493 else if (flush_line)
1494 cheetah_flush_ecache_line(afar);
1497 /* Re-enable error reporting */
1498 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1499 "or %%g1, %1, %%g1\n\t"
1500 "stxa %%g1, [%%g0] %0\n\t"
1503 : "i" (ASI_ESTATE_ERROR_EN),
1504 "i" (ESTATE_ERROR_CEEN)
1507 /* Decide if we can continue after handling this trap and
1508 * logging the error.
1511 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1514 /* Re-check AFSR/AFAR */
1515 (void) cheetah_recheck_errors(&local_snapshot);
1518 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1521 panic("Irrecoverable Correctable-ECC error trap.\n");
1524 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1526 struct cheetah_err_info local_snapshot, *p;
1527 int recoverable, is_memory;
1530 /* Check for the special PCI poke sequence. */
1531 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1532 cheetah_flush_icache();
1533 cheetah_flush_dcache();
1535 /* Re-enable I-cache/D-cache */
1536 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1537 "or %%g1, %1, %%g1\n\t"
1538 "stxa %%g1, [%%g0] %0\n\t"
1541 : "i" (ASI_DCU_CONTROL_REG),
1542 "i" (DCU_DC | DCU_IC)
1545 /* Re-enable error reporting */
1546 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547 "or %%g1, %1, %%g1\n\t"
1548 "stxa %%g1, [%%g0] %0\n\t"
1551 : "i" (ASI_ESTATE_ERROR_EN),
1552 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1555 (void) cheetah_recheck_errors(NULL);
1557 pci_poke_faulted = 1;
1559 regs->tnpc = regs->tpc + 4;
1564 p = cheetah_get_error_log(afsr);
1566 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1568 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1569 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1573 /* Grab snapshot of logged error. */
1574 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1576 /* If the current trap snapshot does not match what the
1577 * trap handler passed along into our args, big trouble.
1578 * In such a case, mark the local copy as invalid.
1580 * Else, it matches and we mark the afsr in the non-local
1581 * copy as invalid so we may log new error traps there.
1583 if (p->afsr != afsr || p->afar != afar)
1584 local_snapshot.afsr = CHAFSR_INVALID;
1586 p->afsr = CHAFSR_INVALID;
1588 is_memory = cheetah_check_main_memory(afar);
1591 int flush_all, flush_line;
1593 flush_all = flush_line = 0;
1594 if ((afsr & CHAFSR_EDU) != 0UL) {
1595 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1599 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1600 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1606 cheetah_flush_icache();
1607 cheetah_flush_dcache();
1609 /* Re-enable I/D caches */
1610 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1611 "or %%g1, %1, %%g1\n\t"
1612 "stxa %%g1, [%%g0] %0\n\t"
1615 : "i" (ASI_DCU_CONTROL_REG),
1616 "i" (DCU_IC | DCU_DC)
1620 cheetah_flush_ecache();
1621 else if (flush_line)
1622 cheetah_flush_ecache_line(afar);
1625 /* Re-enable error reporting */
1626 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1627 "or %%g1, %1, %%g1\n\t"
1628 "stxa %%g1, [%%g0] %0\n\t"
1631 : "i" (ASI_ESTATE_ERROR_EN),
1632 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1635 /* Decide if we can continue after handling this trap and
1636 * logging the error.
1639 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1642 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1643 * error was logged while we had error reporting traps disabled.
1645 if (cheetah_recheck_errors(&local_snapshot)) {
1646 unsigned long new_afsr = local_snapshot.afsr;
1648 /* If we got a new asynchronous error, die... */
1649 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1650 CHAFSR_WDU | CHAFSR_CPU |
1651 CHAFSR_IVU | CHAFSR_UE |
1652 CHAFSR_BERR | CHAFSR_TO))
1657 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1659 /* "Recoverable" here means we try to yank the page from ever
1660 * being newly used again. This depends upon a few things:
1661 * 1) Must be main memory, and AFAR must be valid.
1662 * 2) If we trapped from user, OK.
1663 * 3) Else, if we trapped from kernel we must find exception
1664 * table entry (ie. we have to have been accessing user
1667 * If AFAR is not in main memory, or we trapped from kernel
1668 * and cannot find an exception table entry, it is unacceptable
1669 * to try and continue.
1671 if (recoverable && is_memory) {
1672 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1673 /* OK, usermode access. */
1676 const struct exception_table_entry *entry;
1678 entry = search_exception_tables(regs->tpc);
1680 /* OK, kernel access to userspace. */
1684 /* BAD, privileged state is corrupted. */
1689 if (pfn_valid(afar >> PAGE_SHIFT))
1690 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1694 /* Only perform fixup if we still have a
1695 * recoverable condition.
1698 regs->tpc = entry->fixup;
1699 regs->tnpc = regs->tpc + 4;
1708 panic("Irrecoverable deferred error trap.\n");
1711 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1713 * Bit0: 0=dcache,1=icache
1714 * Bit1: 0=recoverable,1=unrecoverable
1716 * The hardware has disabled both the I-cache and D-cache in
1717 * the %dcr register.
1719 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1722 __cheetah_flush_icache();
1724 cheetah_plus_zap_dcache_parity();
1725 cheetah_flush_dcache();
1727 /* Re-enable I-cache/D-cache */
1728 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1729 "or %%g1, %1, %%g1\n\t"
1730 "stxa %%g1, [%%g0] %0\n\t"
1733 : "i" (ASI_DCU_CONTROL_REG),
1734 "i" (DCU_DC | DCU_IC)
1738 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1740 (type & 0x1) ? 'I' : 'D',
1742 panic("Irrecoverable Cheetah+ parity error.");
1745 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1747 (type & 0x1) ? 'I' : 'D',
1751 struct sun4v_error_entry {
1756 #define SUN4V_ERR_TYPE_UNDEFINED 0
1757 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1758 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1759 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1760 #define SUN4V_ERR_TYPE_WARNING_RES 4
1763 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1764 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1765 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1766 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1767 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1768 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1769 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1770 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1778 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1779 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1781 static const char *sun4v_err_type_to_str(u32 type)
1784 case SUN4V_ERR_TYPE_UNDEFINED:
1786 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1787 return "uncorrected resumable";
1788 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1789 return "precise nonresumable";
1790 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1791 return "deferred nonresumable";
1792 case SUN4V_ERR_TYPE_WARNING_RES:
1793 return "warning resumable";
1799 static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1803 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1804 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1806 ent->err_handle, ent->err_stick,
1808 sun4v_err_type_to_str(ent->err_type));
1809 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1819 "integer-regs" : ""),
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1826 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1827 "queue-full" : ""));
1828 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1830 ent->err_raddr, ent->err_size, ent->err_cpu);
1832 if ((cnt = atomic_read(ocnt)) != 0) {
1833 atomic_set(ocnt, 0);
1835 printk("%s: Queue overflowed %d times.\n",
1840 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1841 * Log the event and clear the first word of the entry.
1843 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1845 struct sun4v_error_entry *ent, local_copy;
1846 struct trap_per_cpu *tb;
1847 unsigned long paddr;
1852 tb = &trap_block[cpu];
1853 paddr = tb->resum_kernel_buf_pa + offset;
1856 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1858 /* We have a local copy now, so release the entry. */
1859 ent->err_handle = 0;
1864 sun4v_log_error(&local_copy, cpu,
1865 KERN_ERR "RESUMABLE ERROR",
1866 &sun4v_resum_oflow_cnt);
1869 /* If we try to printk() we'll probably make matters worse, by trying
1870 * to retake locks this cpu already holds or causing more errors. So
1871 * just bump a counter, and we'll report these counter bumps above.
1873 void sun4v_resum_overflow(struct pt_regs *regs)
1875 atomic_inc(&sun4v_resum_oflow_cnt);
1878 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1879 * Log the event, clear the first word of the entry, and die.
1881 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1883 struct sun4v_error_entry *ent, local_copy;
1884 struct trap_per_cpu *tb;
1885 unsigned long paddr;
1890 tb = &trap_block[cpu];
1891 paddr = tb->nonresum_kernel_buf_pa + offset;
1894 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1896 /* We have a local copy now, so release the entry. */
1897 ent->err_handle = 0;
1903 /* Check for the special PCI poke sequence. */
1904 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1905 pci_poke_faulted = 1;
1907 regs->tnpc = regs->tpc + 4;
1912 sun4v_log_error(&local_copy, cpu,
1913 KERN_EMERG "NON-RESUMABLE ERROR",
1914 &sun4v_nonresum_oflow_cnt);
1916 panic("Non-resumable error.");
1919 /* If we try to printk() we'll probably make matters worse, by trying
1920 * to retake locks this cpu already holds or causing more errors. So
1921 * just bump a counter, and we'll report these counter bumps above.
1923 void sun4v_nonresum_overflow(struct pt_regs *regs)
1925 /* XXX Actually even this can make not that much sense. Perhaps
1926 * XXX we should just pull the plug and panic directly from here?
1928 atomic_inc(&sun4v_nonresum_oflow_cnt);
1931 unsigned long sun4v_err_itlb_vaddr;
1932 unsigned long sun4v_err_itlb_ctx;
1933 unsigned long sun4v_err_itlb_pte;
1934 unsigned long sun4v_err_itlb_error;
1936 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1939 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1941 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1943 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1944 "pte[%lx] error[%lx]\n",
1945 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1946 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1951 unsigned long sun4v_err_dtlb_vaddr;
1952 unsigned long sun4v_err_dtlb_ctx;
1953 unsigned long sun4v_err_dtlb_pte;
1954 unsigned long sun4v_err_dtlb_error;
1956 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1959 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1961 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1963 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1964 "pte[%lx] error[%lx]\n",
1965 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1966 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1971 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1973 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1977 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1979 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1983 void do_fpe_common(struct pt_regs *regs)
1985 if (regs->tstate & TSTATE_PRIV) {
1986 regs->tpc = regs->tnpc;
1989 unsigned long fsr = current_thread_info()->xfsr[0];
1992 if (test_thread_flag(TIF_32BIT)) {
1993 regs->tpc &= 0xffffffff;
1994 regs->tnpc &= 0xffffffff;
1996 info.si_signo = SIGFPE;
1998 info.si_addr = (void __user *)regs->tpc;
2000 info.si_code = __SI_FAULT;
2001 if ((fsr & 0x1c000) == (1 << 14)) {
2003 info.si_code = FPE_FLTINV;
2004 else if (fsr & 0x08)
2005 info.si_code = FPE_FLTOVF;
2006 else if (fsr & 0x04)
2007 info.si_code = FPE_FLTUND;
2008 else if (fsr & 0x02)
2009 info.si_code = FPE_FLTDIV;
2010 else if (fsr & 0x01)
2011 info.si_code = FPE_FLTRES;
2013 force_sig_info(SIGFPE, &info, current);
2017 void do_fpieee(struct pt_regs *regs)
2019 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2020 0, 0x24, SIGFPE) == NOTIFY_STOP)
2023 do_fpe_common(regs);
2026 extern int do_mathemu(struct pt_regs *, struct fpustate *);
2028 void do_fpother(struct pt_regs *regs)
2030 struct fpustate *f = FPUSTATE;
2033 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2034 0, 0x25, SIGFPE) == NOTIFY_STOP)
2037 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2038 case (2 << 14): /* unfinished_FPop */
2039 case (3 << 14): /* unimplemented_FPop */
2040 ret = do_mathemu(regs, f);
2045 do_fpe_common(regs);
2048 void do_tof(struct pt_regs *regs)
2052 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2053 0, 0x26, SIGEMT) == NOTIFY_STOP)
2056 if (regs->tstate & TSTATE_PRIV)
2057 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2058 if (test_thread_flag(TIF_32BIT)) {
2059 regs->tpc &= 0xffffffff;
2060 regs->tnpc &= 0xffffffff;
2062 info.si_signo = SIGEMT;
2064 info.si_code = EMT_TAGOVF;
2065 info.si_addr = (void __user *)regs->tpc;
2067 force_sig_info(SIGEMT, &info, current);
2070 void do_div0(struct pt_regs *regs)
2074 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2075 0, 0x28, SIGFPE) == NOTIFY_STOP)
2078 if (regs->tstate & TSTATE_PRIV)
2079 die_if_kernel("TL0: Kernel divide by zero.", regs);
2080 if (test_thread_flag(TIF_32BIT)) {
2081 regs->tpc &= 0xffffffff;
2082 regs->tnpc &= 0xffffffff;
2084 info.si_signo = SIGFPE;
2086 info.si_code = FPE_INTDIV;
2087 info.si_addr = (void __user *)regs->tpc;
2089 force_sig_info(SIGFPE, &info, current);
2092 void instruction_dump (unsigned int *pc)
2096 if ((((unsigned long) pc) & 3))
2099 printk("Instruction DUMP:");
2100 for (i = -3; i < 6; i++)
2101 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2105 static void user_instruction_dump (unsigned int __user *pc)
2108 unsigned int buf[9];
2110 if ((((unsigned long) pc) & 3))
2113 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2116 printk("Instruction DUMP:");
2117 for (i = 0; i < 9; i++)
2118 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2122 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2124 unsigned long pc, fp, thread_base, ksp;
2125 void *tp = task_stack_page(tsk);
2126 struct reg_window *rw;
2129 ksp = (unsigned long) _ksp;
2131 if (tp == current_thread_info())
2134 fp = ksp + STACK_BIAS;
2135 thread_base = (unsigned long) tp;
2137 printk("Call Trace:");
2138 #ifdef CONFIG_KALLSYMS
2142 /* Bogus frame pointer? */
2143 if (fp < (thread_base + sizeof(struct thread_info)) ||
2144 fp >= (thread_base + THREAD_SIZE))
2146 rw = (struct reg_window *)fp;
2148 printk(" [%016lx] ", pc);
2149 print_symbol("%s\n", pc);
2150 fp = rw->ins[6] + STACK_BIAS;
2151 } while (++count < 16);
2152 #ifndef CONFIG_KALLSYMS
2157 void dump_stack(void)
2161 __asm__ __volatile__("mov %%fp, %0"
2163 show_stack(current, ksp);
2166 EXPORT_SYMBOL(dump_stack);
2168 static inline int is_kernel_stack(struct task_struct *task,
2169 struct reg_window *rw)
2171 unsigned long rw_addr = (unsigned long) rw;
2172 unsigned long thread_base, thread_end;
2174 if (rw_addr < PAGE_OFFSET) {
2175 if (task != &init_task)
2179 thread_base = (unsigned long) task_stack_page(task);
2180 thread_end = thread_base + sizeof(union thread_union);
2181 if (rw_addr >= thread_base &&
2182 rw_addr < thread_end &&
2189 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2191 unsigned long fp = rw->ins[6];
2196 return (struct reg_window *) (fp + STACK_BIAS);
2199 void die_if_kernel(char *str, struct pt_regs *regs)
2201 static int die_counter;
2202 extern void __show_regs(struct pt_regs * regs);
2203 extern void smp_report_regs(void);
2206 /* Amuse the user. */
2209 " \"@'/ .. \\`@\"\n"
2213 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2214 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2215 __asm__ __volatile__("flushw");
2217 if (regs->tstate & TSTATE_PRIV) {
2218 struct reg_window *rw = (struct reg_window *)
2219 (regs->u_regs[UREG_FP] + STACK_BIAS);
2221 /* Stop the back trace when we hit userland or we
2222 * find some badly aligned kernel stack.
2226 is_kernel_stack(current, rw)) {
2227 printk("Caller[%016lx]", rw->ins[7]);
2228 print_symbol(": %s", rw->ins[7]);
2231 rw = kernel_stack_up(rw);
2233 instruction_dump ((unsigned int *) regs->tpc);
2235 if (test_thread_flag(TIF_32BIT)) {
2236 regs->tpc &= 0xffffffff;
2237 regs->tnpc &= 0xffffffff;
2239 user_instruction_dump ((unsigned int __user *) regs->tpc);
2246 if (regs->tstate & TSTATE_PRIV)
2251 extern int handle_popc(u32 insn, struct pt_regs *regs);
2252 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2254 void do_illegal_instruction(struct pt_regs *regs)
2256 unsigned long pc = regs->tpc;
2257 unsigned long tstate = regs->tstate;
2261 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2262 0, 0x10, SIGILL) == NOTIFY_STOP)
2265 if (tstate & TSTATE_PRIV)
2266 die_if_kernel("Kernel illegal instruction", regs);
2267 if (test_thread_flag(TIF_32BIT))
2269 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2270 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2271 if (handle_popc(insn, regs))
2273 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2274 if (handle_ldf_stq(insn, regs))
2276 } else if (tlb_type == hypervisor) {
2277 extern int vis_emul(struct pt_regs *, unsigned int);
2279 if (!vis_emul(regs, insn))
2283 info.si_signo = SIGILL;
2285 info.si_code = ILL_ILLOPC;
2286 info.si_addr = (void __user *)pc;
2288 force_sig_info(SIGILL, &info, current);
2291 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2293 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2297 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2298 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2301 if (regs->tstate & TSTATE_PRIV) {
2302 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2305 info.si_signo = SIGBUS;
2307 info.si_code = BUS_ADRALN;
2308 info.si_addr = (void __user *)sfar;
2310 force_sig_info(SIGBUS, &info, current);
2313 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2317 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2318 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2321 if (regs->tstate & TSTATE_PRIV) {
2322 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2325 info.si_signo = SIGBUS;
2327 info.si_code = BUS_ADRALN;
2328 info.si_addr = (void __user *) addr;
2330 force_sig_info(SIGBUS, &info, current);
2333 void do_privop(struct pt_regs *regs)
2337 if (notify_die(DIE_TRAP, "privileged operation", regs,
2338 0, 0x11, SIGILL) == NOTIFY_STOP)
2341 if (test_thread_flag(TIF_32BIT)) {
2342 regs->tpc &= 0xffffffff;
2343 regs->tnpc &= 0xffffffff;
2345 info.si_signo = SIGILL;
2347 info.si_code = ILL_PRVOPC;
2348 info.si_addr = (void __user *)regs->tpc;
2350 force_sig_info(SIGILL, &info, current);
2353 void do_privact(struct pt_regs *regs)
2358 /* Trap level 1 stuff or other traps we should never see... */
2359 void do_cee(struct pt_regs *regs)
2361 die_if_kernel("TL0: Cache Error Exception", regs);
2364 void do_cee_tl1(struct pt_regs *regs)
2366 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2367 die_if_kernel("TL1: Cache Error Exception", regs);
2370 void do_dae_tl1(struct pt_regs *regs)
2372 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2373 die_if_kernel("TL1: Data Access Exception", regs);
2376 void do_iae_tl1(struct pt_regs *regs)
2378 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2379 die_if_kernel("TL1: Instruction Access Exception", regs);
2382 void do_div0_tl1(struct pt_regs *regs)
2384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2385 die_if_kernel("TL1: DIV0 Exception", regs);
2388 void do_fpdis_tl1(struct pt_regs *regs)
2390 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2391 die_if_kernel("TL1: FPU Disabled", regs);
2394 void do_fpieee_tl1(struct pt_regs *regs)
2396 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2397 die_if_kernel("TL1: FPU IEEE Exception", regs);
2400 void do_fpother_tl1(struct pt_regs *regs)
2402 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2403 die_if_kernel("TL1: FPU Other Exception", regs);
2406 void do_ill_tl1(struct pt_regs *regs)
2408 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2409 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2412 void do_irq_tl1(struct pt_regs *regs)
2414 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2415 die_if_kernel("TL1: IRQ Exception", regs);
2418 void do_lddfmna_tl1(struct pt_regs *regs)
2420 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2421 die_if_kernel("TL1: LDDF Exception", regs);
2424 void do_stdfmna_tl1(struct pt_regs *regs)
2426 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2427 die_if_kernel("TL1: STDF Exception", regs);
2430 void do_paw(struct pt_regs *regs)
2432 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2435 void do_paw_tl1(struct pt_regs *regs)
2437 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2438 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2441 void do_vaw(struct pt_regs *regs)
2443 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2446 void do_vaw_tl1(struct pt_regs *regs)
2448 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2449 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2452 void do_tof_tl1(struct pt_regs *regs)
2454 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2455 die_if_kernel("TL1: Tag Overflow Exception", regs);
2458 void do_getpsr(struct pt_regs *regs)
2460 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2461 regs->tpc = regs->tnpc;
2463 if (test_thread_flag(TIF_32BIT)) {
2464 regs->tpc &= 0xffffffff;
2465 regs->tnpc &= 0xffffffff;
2469 struct trap_per_cpu trap_block[NR_CPUS];
2471 /* This can get invoked before sched_init() so play it super safe
2472 * and use hard_smp_processor_id().
2474 void init_cur_cpu_trap(struct thread_info *t)
2476 int cpu = hard_smp_processor_id();
2477 struct trap_per_cpu *p = &trap_block[cpu];
2483 extern void thread_info_offsets_are_bolixed_dave(void);
2484 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2485 extern void tsb_config_offsets_are_bolixed_dave(void);
2487 /* Only invoked on boot processor. */
2488 void __init trap_init(void)
2490 /* Compile time sanity check. */
2491 if (TI_TASK != offsetof(struct thread_info, task) ||
2492 TI_FLAGS != offsetof(struct thread_info, flags) ||
2493 TI_CPU != offsetof(struct thread_info, cpu) ||
2494 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2495 TI_KSP != offsetof(struct thread_info, ksp) ||
2496 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2497 TI_KREGS != offsetof(struct thread_info, kregs) ||
2498 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2499 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2500 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2501 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2502 TI_GSR != offsetof(struct thread_info, gsr) ||
2503 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2504 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2505 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2506 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2507 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2508 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2509 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2510 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2511 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2512 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2513 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2514 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2515 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2516 (TI_FPREGS & (64 - 1)))
2517 thread_info_offsets_are_bolixed_dave();
2519 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2520 (TRAP_PER_CPU_PGD_PADDR !=
2521 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2522 (TRAP_PER_CPU_CPU_MONDO_PA !=
2523 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2524 (TRAP_PER_CPU_DEV_MONDO_PA !=
2525 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2526 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2527 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2528 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2529 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2530 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2531 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2532 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2533 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2534 (TRAP_PER_CPU_FAULT_INFO !=
2535 offsetof(struct trap_per_cpu, fault_info)) ||
2536 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2537 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2538 (TRAP_PER_CPU_CPU_LIST_PA !=
2539 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2540 (TRAP_PER_CPU_TSB_HUGE !=
2541 offsetof(struct trap_per_cpu, tsb_huge)) ||
2542 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2543 offsetof(struct trap_per_cpu, tsb_huge_temp)))
2544 trap_per_cpu_offsets_are_bolixed_dave();
2546 if ((TSB_CONFIG_TSB !=
2547 offsetof(struct tsb_config, tsb)) ||
2548 (TSB_CONFIG_RSS_LIMIT !=
2549 offsetof(struct tsb_config, tsb_rss_limit)) ||
2550 (TSB_CONFIG_NENTRIES !=
2551 offsetof(struct tsb_config, tsb_nentries)) ||
2552 (TSB_CONFIG_REG_VAL !=
2553 offsetof(struct tsb_config, tsb_reg_val)) ||
2554 (TSB_CONFIG_MAP_VADDR !=
2555 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2556 (TSB_CONFIG_MAP_PTE !=
2557 offsetof(struct tsb_config, tsb_map_pte)))
2558 tsb_config_offsets_are_bolixed_dave();
2560 /* Attach to the address space of init_task. On SMP we
2561 * do this in smp.c:smp_callin for other cpus.
2563 atomic_inc(&init_mm.mm_count);
2564 current->active_mm = &init_mm;