1 /* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
9 * I like traps on v9, :))))
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for jiffies */
15 #include <linux/kernel.h>
16 #include <linux/kallsyms.h>
17 #include <linux/signal.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
21 #include <linux/init.h>
23 #include <asm/delay.h>
24 #include <asm/system.h>
25 #include <asm/ptrace.h>
26 #include <asm/oplib.h>
28 #include <asm/pgtable.h>
29 #include <asm/unistd.h>
30 #include <asm/uaccess.h>
31 #include <asm/fpumacro.h>
34 #include <asm/estate.h>
35 #include <asm/chafsr.h>
36 #include <asm/sfafsr.h>
37 #include <asm/psrcompat.h>
38 #include <asm/processor.h>
39 #include <asm/timer.h>
40 #include <asm/kdebug.h>
43 #include <linux/kmod.h>
46 struct notifier_block *sparc64die_chain;
47 static DEFINE_SPINLOCK(die_notifier_lock);
49 int register_die_notifier(struct notifier_block *nb)
53 spin_lock_irqsave(&die_notifier_lock, flags);
54 err = notifier_chain_register(&sparc64die_chain, nb);
55 spin_unlock_irqrestore(&die_notifier_lock, flags);
59 /* When an irrecoverable trap occurs at tl > 0, the trap entry
60 * code logs the trap state registers at every level in the trap
61 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
74 static void dump_tl1_traplog(struct tl1_traplog *p)
78 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
80 for (i = 0; i < 4; i++) {
82 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
83 "TNPC[%016lx] TT[%lx]\n",
85 p->trapstack[i].tstate, p->trapstack[i].tpc,
86 p->trapstack[i].tnpc, p->trapstack[i].tt);
90 void do_call_debug(struct pt_regs *regs)
92 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
95 void bad_trap(struct pt_regs *regs, long lvl)
100 if (notify_die(DIE_TRAP, "bad trap", regs,
101 0, lvl, SIGTRAP) == NOTIFY_STOP)
105 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
106 die_if_kernel(buffer, regs);
110 if (regs->tstate & TSTATE_PRIV) {
111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
112 die_if_kernel(buffer, regs);
114 if (test_thread_flag(TIF_32BIT)) {
115 regs->tpc &= 0xffffffff;
116 regs->tnpc &= 0xffffffff;
118 info.si_signo = SIGILL;
120 info.si_code = ILL_ILLTRP;
121 info.si_addr = (void __user *)regs->tpc;
122 info.si_trapno = lvl;
123 force_sig_info(SIGILL, &info, current);
126 void bad_trap_tl1(struct pt_regs *regs, long lvl)
130 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
131 0, lvl, SIGTRAP) == NOTIFY_STOP)
134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
137 die_if_kernel (buffer, regs);
140 #ifdef CONFIG_DEBUG_BUGVERBOSE
141 void do_BUG(const char *file, int line)
144 printk("kernel BUG at %s:%d!\n", file, line);
148 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
152 if (notify_die(DIE_TRAP, "instruction access exception", regs,
153 0, 0x8, SIGTRAP) == NOTIFY_STOP)
156 if (regs->tstate & TSTATE_PRIV) {
157 printk("spitfire_insn_access_exception: SFSR[%016lx] "
158 "SFAR[%016lx], going.\n", sfsr, sfar);
159 die_if_kernel("Iax", regs);
161 if (test_thread_flag(TIF_32BIT)) {
162 regs->tpc &= 0xffffffff;
163 regs->tnpc &= 0xffffffff;
165 info.si_signo = SIGSEGV;
167 info.si_code = SEGV_MAPERR;
168 info.si_addr = (void __user *)regs->tpc;
170 force_sig_info(SIGSEGV, &info, current);
173 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
176 0, 0x8, SIGTRAP) == NOTIFY_STOP)
179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
180 spitfire_insn_access_exception(regs, sfsr, sfar);
183 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
185 unsigned short type = (type_ctx >> 16);
186 unsigned short ctx = (type_ctx & 0xffff);
189 if (notify_die(DIE_TRAP, "instruction access exception", regs,
190 0, 0x8, SIGTRAP) == NOTIFY_STOP)
193 if (regs->tstate & TSTATE_PRIV) {
194 printk("sun4v_insn_access_exception: ADDR[%016lx] "
195 "CTX[%04x] TYPE[%04x], going.\n",
197 die_if_kernel("Iax", regs);
200 if (test_thread_flag(TIF_32BIT)) {
201 regs->tpc &= 0xffffffff;
202 regs->tnpc &= 0xffffffff;
204 info.si_signo = SIGSEGV;
206 info.si_code = SEGV_MAPERR;
207 info.si_addr = (void __user *) addr;
209 force_sig_info(SIGSEGV, &info, current);
212 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
214 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
215 0, 0x8, SIGTRAP) == NOTIFY_STOP)
218 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
219 sun4v_insn_access_exception(regs, addr, type_ctx);
222 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
226 if (notify_die(DIE_TRAP, "data access exception", regs,
227 0, 0x30, SIGTRAP) == NOTIFY_STOP)
230 if (regs->tstate & TSTATE_PRIV) {
231 /* Test if this comes from uaccess places. */
232 const struct exception_table_entry *entry;
234 entry = search_exception_tables(regs->tpc);
236 /* Ouch, somebody is trying VM hole tricks on us... */
237 #ifdef DEBUG_EXCEPTIONS
238 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
239 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
240 regs->tpc, entry->fixup);
242 regs->tpc = entry->fixup;
243 regs->tnpc = regs->tpc + 4;
247 printk("spitfire_data_access_exception: SFSR[%016lx] "
248 "SFAR[%016lx], going.\n", sfsr, sfar);
249 die_if_kernel("Dax", regs);
252 info.si_signo = SIGSEGV;
254 info.si_code = SEGV_MAPERR;
255 info.si_addr = (void __user *)sfar;
257 force_sig_info(SIGSEGV, &info, current);
260 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
262 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
263 0, 0x30, SIGTRAP) == NOTIFY_STOP)
266 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
267 spitfire_data_access_exception(regs, sfsr, sfar);
270 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
272 unsigned short type = (type_ctx >> 16);
273 unsigned short ctx = (type_ctx & 0xffff);
276 if (notify_die(DIE_TRAP, "data access exception", regs,
277 0, 0x8, SIGTRAP) == NOTIFY_STOP)
280 if (regs->tstate & TSTATE_PRIV) {
281 printk("sun4v_data_access_exception: ADDR[%016lx] "
282 "CTX[%04x] TYPE[%04x], going.\n",
284 die_if_kernel("Iax", regs);
287 if (test_thread_flag(TIF_32BIT)) {
288 regs->tpc &= 0xffffffff;
289 regs->tnpc &= 0xffffffff;
291 info.si_signo = SIGSEGV;
293 info.si_code = SEGV_MAPERR;
294 info.si_addr = (void __user *) addr;
296 force_sig_info(SIGSEGV, &info, current);
299 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
301 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
302 0, 0x8, SIGTRAP) == NOTIFY_STOP)
305 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
306 sun4v_data_access_exception(regs, addr, type_ctx);
310 /* This is really pathetic... */
311 extern volatile int pci_poke_in_progress;
312 extern volatile int pci_poke_cpu;
313 extern volatile int pci_poke_faulted;
316 /* When access exceptions happen, we must do this. */
317 static void spitfire_clean_and_reenable_l1_caches(void)
321 if (tlb_type != spitfire)
325 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
326 spitfire_put_icache_tag(va, 0x0);
327 spitfire_put_dcache_tag(va, 0x0);
330 /* Re-enable in LSU. */
331 __asm__ __volatile__("flush %%g6\n\t"
333 "stxa %0, [%%g0] %1\n\t"
336 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
337 LSU_CONTROL_IM | LSU_CONTROL_DM),
338 "i" (ASI_LSU_CONTROL)
342 static void spitfire_enable_estate_errors(void)
344 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
347 : "r" (ESTATE_ERR_ALL),
348 "i" (ASI_ESTATE_ERROR_EN));
351 static char ecc_syndrome_table[] = {
352 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
353 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
354 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
355 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
356 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
357 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
358 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
359 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
360 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
361 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
362 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
363 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
364 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
365 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
366 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
367 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
368 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
369 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
370 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
371 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
372 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
373 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
374 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
375 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
376 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
377 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
378 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
379 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
380 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
381 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
382 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
383 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
386 static char *syndrome_unknown = "<Unknown>";
388 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
390 unsigned short scode;
391 char memmod_str[64], *p;
394 scode = ecc_syndrome_table[udbl & 0xff];
395 if (prom_getunumber(scode, afar,
396 memmod_str, sizeof(memmod_str)) == -1)
397 p = syndrome_unknown;
400 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
401 "Memory Module \"%s\"\n",
402 smp_processor_id(), scode, p);
406 scode = ecc_syndrome_table[udbh & 0xff];
407 if (prom_getunumber(scode, afar,
408 memmod_str, sizeof(memmod_str)) == -1)
409 p = syndrome_unknown;
412 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
413 "Memory Module \"%s\"\n",
414 smp_processor_id(), scode, p);
419 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
422 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
423 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
424 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
426 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
428 /* We always log it, even if someone is listening for this
431 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
432 0, TRAP_TYPE_CEE, SIGTRAP);
434 /* The Correctable ECC Error trap does not disable I/D caches. So
435 * we only have to restore the ESTATE Error Enable register.
437 spitfire_enable_estate_errors();
440 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
444 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
445 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
446 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
448 /* XXX add more human friendly logging of the error status
449 * XXX as is implemented for cheetah
452 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
454 /* We always log it, even if someone is listening for this
457 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
460 if (regs->tstate & TSTATE_PRIV) {
462 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
463 die_if_kernel("UE", regs);
466 /* XXX need more intelligent processing here, such as is implemented
467 * XXX for cheetah errors, in fact if the E-cache still holds the
468 * XXX line with bad parity this will loop
471 spitfire_clean_and_reenable_l1_caches();
472 spitfire_enable_estate_errors();
474 if (test_thread_flag(TIF_32BIT)) {
475 regs->tpc &= 0xffffffff;
476 regs->tnpc &= 0xffffffff;
478 info.si_signo = SIGBUS;
480 info.si_code = BUS_OBJERR;
481 info.si_addr = (void *)0;
483 force_sig_info(SIGBUS, &info, current);
486 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
488 unsigned long afsr, tt, udbh, udbl;
491 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
492 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
493 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
494 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
495 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
498 if (tt == TRAP_TYPE_DAE &&
499 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
500 spitfire_clean_and_reenable_l1_caches();
501 spitfire_enable_estate_errors();
503 pci_poke_faulted = 1;
504 regs->tnpc = regs->tpc + 4;
509 if (afsr & SFAFSR_UE)
510 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
512 if (tt == TRAP_TYPE_CEE) {
513 /* Handle the case where we took a CEE trap, but ACK'd
514 * only the UE state in the UDB error registers.
516 if (afsr & SFAFSR_UE) {
517 if (udbh & UDBE_CE) {
518 __asm__ __volatile__(
519 "stxa %0, [%1] %2\n\t"
522 : "r" (udbh & UDBE_CE),
523 "r" (0x0), "i" (ASI_UDB_ERROR_W));
525 if (udbl & UDBE_CE) {
526 __asm__ __volatile__(
527 "stxa %0, [%1] %2\n\t"
530 : "r" (udbl & UDBE_CE),
531 "r" (0x18), "i" (ASI_UDB_ERROR_W));
535 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
539 int cheetah_pcache_forced_on;
541 void cheetah_enable_pcache(void)
545 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
548 __asm__ __volatile__("ldxa [%%g0] %1, %0"
550 : "i" (ASI_DCU_CONTROL_REG));
551 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
552 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
555 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
558 /* Cheetah error trap handling. */
559 static unsigned long ecache_flush_physbase;
560 static unsigned long ecache_flush_linesize;
561 static unsigned long ecache_flush_size;
563 /* WARNING: The error trap handlers in assembly know the precise
564 * layout of the following structure.
566 * C-level handlers below use this information to log the error
567 * and then determine how to recover (if possible).
569 struct cheetah_err_info {
574 /*0x10*/u64 dcache_data[4]; /* The actual data */
575 /*0x30*/u64 dcache_index; /* D-cache index */
576 /*0x38*/u64 dcache_tag; /* D-cache tag/valid */
577 /*0x40*/u64 dcache_utag; /* D-cache microtag */
578 /*0x48*/u64 dcache_stag; /* D-cache snooptag */
581 /*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
582 /*0x90*/u64 icache_index; /* I-cache index */
583 /*0x98*/u64 icache_tag; /* I-cache phys tag */
584 /*0xa0*/u64 icache_utag; /* I-cache microtag */
585 /*0xa8*/u64 icache_stag; /* I-cache snooptag */
586 /*0xb0*/u64 icache_upper; /* I-cache upper-tag */
587 /*0xb8*/u64 icache_lower; /* I-cache lower-tag */
590 /*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
591 /*0xe0*/u64 ecache_index; /* E-cache index */
592 /*0xe8*/u64 ecache_tag; /* E-cache tag/state */
594 /*0xf0*/u64 __pad[32 - 30];
596 #define CHAFSR_INVALID ((u64)-1L)
598 /* This table is ordered in priority of errors and matches the
599 * AFAR overwrite policy as well.
602 struct afsr_error_table {
607 static const char CHAFSR_PERR_msg[] =
608 "System interface protocol error";
609 static const char CHAFSR_IERR_msg[] =
610 "Internal processor error";
611 static const char CHAFSR_ISAP_msg[] =
612 "System request parity error on incoming addresss";
613 static const char CHAFSR_UCU_msg[] =
614 "Uncorrectable E-cache ECC error for ifetch/data";
615 static const char CHAFSR_UCC_msg[] =
616 "SW Correctable E-cache ECC error for ifetch/data";
617 static const char CHAFSR_UE_msg[] =
618 "Uncorrectable system bus data ECC error for read";
619 static const char CHAFSR_EDU_msg[] =
620 "Uncorrectable E-cache ECC error for stmerge/blkld";
621 static const char CHAFSR_EMU_msg[] =
622 "Uncorrectable system bus MTAG error";
623 static const char CHAFSR_WDU_msg[] =
624 "Uncorrectable E-cache ECC error for writeback";
625 static const char CHAFSR_CPU_msg[] =
626 "Uncorrectable ECC error for copyout";
627 static const char CHAFSR_CE_msg[] =
628 "HW corrected system bus data ECC error for read";
629 static const char CHAFSR_EDC_msg[] =
630 "HW corrected E-cache ECC error for stmerge/blkld";
631 static const char CHAFSR_EMC_msg[] =
632 "HW corrected system bus MTAG ECC error";
633 static const char CHAFSR_WDC_msg[] =
634 "HW corrected E-cache ECC error for writeback";
635 static const char CHAFSR_CPC_msg[] =
636 "HW corrected ECC error for copyout";
637 static const char CHAFSR_TO_msg[] =
638 "Unmapped error from system bus";
639 static const char CHAFSR_BERR_msg[] =
640 "Bus error response from system bus";
641 static const char CHAFSR_IVC_msg[] =
642 "HW corrected system bus data ECC error for ivec read";
643 static const char CHAFSR_IVU_msg[] =
644 "Uncorrectable system bus data ECC error for ivec read";
645 static struct afsr_error_table __cheetah_error_table[] = {
646 { CHAFSR_PERR, CHAFSR_PERR_msg },
647 { CHAFSR_IERR, CHAFSR_IERR_msg },
648 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
649 { CHAFSR_UCU, CHAFSR_UCU_msg },
650 { CHAFSR_UCC, CHAFSR_UCC_msg },
651 { CHAFSR_UE, CHAFSR_UE_msg },
652 { CHAFSR_EDU, CHAFSR_EDU_msg },
653 { CHAFSR_EMU, CHAFSR_EMU_msg },
654 { CHAFSR_WDU, CHAFSR_WDU_msg },
655 { CHAFSR_CPU, CHAFSR_CPU_msg },
656 { CHAFSR_CE, CHAFSR_CE_msg },
657 { CHAFSR_EDC, CHAFSR_EDC_msg },
658 { CHAFSR_EMC, CHAFSR_EMC_msg },
659 { CHAFSR_WDC, CHAFSR_WDC_msg },
660 { CHAFSR_CPC, CHAFSR_CPC_msg },
661 { CHAFSR_TO, CHAFSR_TO_msg },
662 { CHAFSR_BERR, CHAFSR_BERR_msg },
663 /* These two do not update the AFAR. */
664 { CHAFSR_IVC, CHAFSR_IVC_msg },
665 { CHAFSR_IVU, CHAFSR_IVU_msg },
668 static const char CHPAFSR_DTO_msg[] =
669 "System bus unmapped error for prefetch/storequeue-read";
670 static const char CHPAFSR_DBERR_msg[] =
671 "System bus error for prefetch/storequeue-read";
672 static const char CHPAFSR_THCE_msg[] =
673 "Hardware corrected E-cache Tag ECC error";
674 static const char CHPAFSR_TSCE_msg[] =
675 "SW handled correctable E-cache Tag ECC error";
676 static const char CHPAFSR_TUE_msg[] =
677 "Uncorrectable E-cache Tag ECC error";
678 static const char CHPAFSR_DUE_msg[] =
679 "System bus uncorrectable data ECC error due to prefetch/store-fill";
680 static struct afsr_error_table __cheetah_plus_error_table[] = {
681 { CHAFSR_PERR, CHAFSR_PERR_msg },
682 { CHAFSR_IERR, CHAFSR_IERR_msg },
683 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
684 { CHAFSR_UCU, CHAFSR_UCU_msg },
685 { CHAFSR_UCC, CHAFSR_UCC_msg },
686 { CHAFSR_UE, CHAFSR_UE_msg },
687 { CHAFSR_EDU, CHAFSR_EDU_msg },
688 { CHAFSR_EMU, CHAFSR_EMU_msg },
689 { CHAFSR_WDU, CHAFSR_WDU_msg },
690 { CHAFSR_CPU, CHAFSR_CPU_msg },
691 { CHAFSR_CE, CHAFSR_CE_msg },
692 { CHAFSR_EDC, CHAFSR_EDC_msg },
693 { CHAFSR_EMC, CHAFSR_EMC_msg },
694 { CHAFSR_WDC, CHAFSR_WDC_msg },
695 { CHAFSR_CPC, CHAFSR_CPC_msg },
696 { CHAFSR_TO, CHAFSR_TO_msg },
697 { CHAFSR_BERR, CHAFSR_BERR_msg },
698 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
699 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
700 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
701 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
702 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
703 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
704 /* These two do not update the AFAR. */
705 { CHAFSR_IVC, CHAFSR_IVC_msg },
706 { CHAFSR_IVU, CHAFSR_IVU_msg },
709 static const char JPAFSR_JETO_msg[] =
710 "System interface protocol error, hw timeout caused";
711 static const char JPAFSR_SCE_msg[] =
712 "Parity error on system snoop results";
713 static const char JPAFSR_JEIC_msg[] =
714 "System interface protocol error, illegal command detected";
715 static const char JPAFSR_JEIT_msg[] =
716 "System interface protocol error, illegal ADTYPE detected";
717 static const char JPAFSR_OM_msg[] =
718 "Out of range memory error has occurred";
719 static const char JPAFSR_ETP_msg[] =
720 "Parity error on L2 cache tag SRAM";
721 static const char JPAFSR_UMS_msg[] =
722 "Error due to unsupported store";
723 static const char JPAFSR_RUE_msg[] =
724 "Uncorrectable ECC error from remote cache/memory";
725 static const char JPAFSR_RCE_msg[] =
726 "Correctable ECC error from remote cache/memory";
727 static const char JPAFSR_BP_msg[] =
728 "JBUS parity error on returned read data";
729 static const char JPAFSR_WBP_msg[] =
730 "JBUS parity error on data for writeback or block store";
731 static const char JPAFSR_FRC_msg[] =
732 "Foreign read to DRAM incurring correctable ECC error";
733 static const char JPAFSR_FRU_msg[] =
734 "Foreign read to DRAM incurring uncorrectable ECC error";
735 static struct afsr_error_table __jalapeno_error_table[] = {
736 { JPAFSR_JETO, JPAFSR_JETO_msg },
737 { JPAFSR_SCE, JPAFSR_SCE_msg },
738 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
739 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
740 { CHAFSR_PERR, CHAFSR_PERR_msg },
741 { CHAFSR_IERR, CHAFSR_IERR_msg },
742 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
743 { CHAFSR_UCU, CHAFSR_UCU_msg },
744 { CHAFSR_UCC, CHAFSR_UCC_msg },
745 { CHAFSR_UE, CHAFSR_UE_msg },
746 { CHAFSR_EDU, CHAFSR_EDU_msg },
747 { JPAFSR_OM, JPAFSR_OM_msg },
748 { CHAFSR_WDU, CHAFSR_WDU_msg },
749 { CHAFSR_CPU, CHAFSR_CPU_msg },
750 { CHAFSR_CE, CHAFSR_CE_msg },
751 { CHAFSR_EDC, CHAFSR_EDC_msg },
752 { JPAFSR_ETP, JPAFSR_ETP_msg },
753 { CHAFSR_WDC, CHAFSR_WDC_msg },
754 { CHAFSR_CPC, CHAFSR_CPC_msg },
755 { CHAFSR_TO, CHAFSR_TO_msg },
756 { CHAFSR_BERR, CHAFSR_BERR_msg },
757 { JPAFSR_UMS, JPAFSR_UMS_msg },
758 { JPAFSR_RUE, JPAFSR_RUE_msg },
759 { JPAFSR_RCE, JPAFSR_RCE_msg },
760 { JPAFSR_BP, JPAFSR_BP_msg },
761 { JPAFSR_WBP, JPAFSR_WBP_msg },
762 { JPAFSR_FRC, JPAFSR_FRC_msg },
763 { JPAFSR_FRU, JPAFSR_FRU_msg },
764 /* These two do not update the AFAR. */
765 { CHAFSR_IVU, CHAFSR_IVU_msg },
768 static struct afsr_error_table *cheetah_error_table;
769 static unsigned long cheetah_afsr_errors;
771 /* This is allocated at boot time based upon the largest hardware
772 * cpu ID in the system. We allocate two entries per cpu, one for
773 * TL==0 logging and one for TL >= 1 logging.
775 struct cheetah_err_info *cheetah_error_log;
777 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
779 struct cheetah_err_info *p;
780 int cpu = smp_processor_id();
782 if (!cheetah_error_log)
785 p = cheetah_error_log + (cpu * 2);
786 if ((afsr & CHAFSR_TL1) != 0UL)
792 extern unsigned int tl0_icpe[], tl1_icpe[];
793 extern unsigned int tl0_dcpe[], tl1_dcpe[];
794 extern unsigned int tl0_fecc[], tl1_fecc[];
795 extern unsigned int tl0_cee[], tl1_cee[];
796 extern unsigned int tl0_iae[], tl1_iae[];
797 extern unsigned int tl0_dae[], tl1_dae[];
798 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
799 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
800 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
801 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
802 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
804 void __init cheetah_ecache_flush_init(void)
806 unsigned long largest_size, smallest_linesize, order, ver;
807 int node, i, instance;
809 /* Scan all cpu device tree nodes, note two values:
810 * 1) largest E-cache size
811 * 2) smallest E-cache line size
814 smallest_linesize = ~0UL;
817 while (!cpu_find_by_instance(instance, &node, NULL)) {
820 val = prom_getintdefault(node, "ecache-size",
822 if (val > largest_size)
824 val = prom_getintdefault(node, "ecache-line-size", 64);
825 if (val < smallest_linesize)
826 smallest_linesize = val;
830 if (largest_size == 0UL || smallest_linesize == ~0UL) {
831 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
836 ecache_flush_size = (2 * largest_size);
837 ecache_flush_linesize = smallest_linesize;
839 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
841 if (ecache_flush_physbase == ~0UL) {
842 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
843 "contiguous physical memory.\n",
848 /* Now allocate error trap reporting scoreboard. */
849 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
850 for (order = 0; order < MAX_ORDER; order++) {
851 if ((PAGE_SIZE << order) >= node)
854 cheetah_error_log = (struct cheetah_err_info *)
855 __get_free_pages(GFP_KERNEL, order);
856 if (!cheetah_error_log) {
857 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
858 "error logging scoreboard (%d bytes).\n", node);
861 memset(cheetah_error_log, 0, PAGE_SIZE << order);
863 /* Mark all AFSRs as invalid so that the trap handler will
864 * log new new information there.
866 for (i = 0; i < 2 * NR_CPUS; i++)
867 cheetah_error_log[i].afsr = CHAFSR_INVALID;
869 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
870 if ((ver >> 32) == __JALAPENO_ID ||
871 (ver >> 32) == __SERRANO_ID) {
872 cheetah_error_table = &__jalapeno_error_table[0];
873 cheetah_afsr_errors = JPAFSR_ERRORS;
874 } else if ((ver >> 32) == 0x003e0015) {
875 cheetah_error_table = &__cheetah_plus_error_table[0];
876 cheetah_afsr_errors = CHPAFSR_ERRORS;
878 cheetah_error_table = &__cheetah_error_table[0];
879 cheetah_afsr_errors = CHAFSR_ERRORS;
882 /* Now patch trap tables. */
883 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
884 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
885 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
886 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
887 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
888 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
889 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
890 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
891 if (tlb_type == cheetah_plus) {
892 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
893 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
894 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
895 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
900 static void cheetah_flush_ecache(void)
902 unsigned long flush_base = ecache_flush_physbase;
903 unsigned long flush_linesize = ecache_flush_linesize;
904 unsigned long flush_size = ecache_flush_size;
906 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
907 " bne,pt %%xcc, 1b\n\t"
908 " ldxa [%2 + %0] %3, %%g0\n\t"
910 : "0" (flush_size), "r" (flush_base),
911 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
914 static void cheetah_flush_ecache_line(unsigned long physaddr)
918 physaddr &= ~(8UL - 1UL);
919 physaddr = (ecache_flush_physbase +
920 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
921 alias = physaddr + (ecache_flush_size >> 1UL);
922 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
923 "ldxa [%1] %2, %%g0\n\t"
926 : "r" (physaddr), "r" (alias),
927 "i" (ASI_PHYS_USE_EC));
930 /* Unfortunately, the diagnostic access to the I-cache tags we need to
931 * use to clear the thing interferes with I-cache coherency transactions.
933 * So we must only flush the I-cache when it is disabled.
935 static void __cheetah_flush_icache(void)
937 unsigned int icache_size, icache_line_size;
940 icache_size = local_cpu_data().icache_size;
941 icache_line_size = local_cpu_data().icache_line_size;
943 /* Clear the valid bits in all the tags. */
944 for (addr = 0; addr < icache_size; addr += icache_line_size) {
945 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
948 : "r" (addr | (2 << 3)),
953 static void cheetah_flush_icache(void)
955 unsigned long dcu_save;
957 /* Save current DCU, disable I-cache. */
958 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
959 "or %0, %2, %%g1\n\t"
960 "stxa %%g1, [%%g0] %1\n\t"
963 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
966 __cheetah_flush_icache();
968 /* Restore DCU register */
969 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
972 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
975 static void cheetah_flush_dcache(void)
977 unsigned int dcache_size, dcache_line_size;
980 dcache_size = local_cpu_data().dcache_size;
981 dcache_line_size = local_cpu_data().dcache_line_size;
983 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
984 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
987 : "r" (addr), "i" (ASI_DCACHE_TAG));
991 /* In order to make the even parity correct we must do two things.
992 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
993 * Next, we clear out all 32-bytes of data for that line. Data of
994 * all-zero + tag parity value of zero == correct parity.
996 static void cheetah_plus_zap_dcache_parity(void)
998 unsigned int dcache_size, dcache_line_size;
1001 dcache_size = local_cpu_data().dcache_size;
1002 dcache_line_size = local_cpu_data().dcache_line_size;
1004 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1005 unsigned long tag = (addr >> 14);
1008 __asm__ __volatile__("membar #Sync\n\t"
1009 "stxa %0, [%1] %2\n\t"
1012 : "r" (tag), "r" (addr),
1013 "i" (ASI_DCACHE_UTAG));
1014 for (line = addr; line < addr + dcache_line_size; line += 8)
1015 __asm__ __volatile__("membar #Sync\n\t"
1016 "stxa %%g0, [%0] %1\n\t"
1020 "i" (ASI_DCACHE_DATA));
1024 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1025 * something palatable to the memory controller driver get_unumber
1049 static unsigned char cheetah_ecc_syntab[] = {
1050 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1051 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1052 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1053 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1054 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1055 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1056 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1057 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1058 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1059 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1060 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1061 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1062 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1063 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1064 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1065 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1066 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1067 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1068 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1069 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1070 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1071 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1072 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1073 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1074 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1075 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1076 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1077 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1078 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1079 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1080 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1081 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1083 static unsigned char cheetah_mtag_syntab[] = {
1094 /* Return the highest priority error conditon mentioned. */
1095 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1097 unsigned long tmp = 0;
1100 for (i = 0; cheetah_error_table[i].mask; i++) {
1101 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1107 static const char *cheetah_get_string(unsigned long bit)
1111 for (i = 0; cheetah_error_table[i].mask; i++) {
1112 if ((bit & cheetah_error_table[i].mask) != 0UL)
1113 return cheetah_error_table[i].name;
1118 extern int chmc_getunumber(int, unsigned long, char *, int);
1120 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1121 unsigned long afsr, unsigned long afar, int recoverable)
1123 unsigned long hipri;
1126 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1129 (afsr & CHAFSR_TL1) ? 1 : 0);
1130 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1131 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1132 regs->tpc, regs->tnpc, regs->tstate);
1133 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1136 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1137 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1138 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1139 hipri = cheetah_get_hipri(afsr);
1140 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1141 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1142 hipri, cheetah_get_string(hipri));
1144 /* Try to get unumber if relevant. */
1145 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1146 CHAFSR_CPC | CHAFSR_CPU | \
1147 CHAFSR_UE | CHAFSR_CE | \
1148 CHAFSR_EDC | CHAFSR_EDU | \
1149 CHAFSR_UCC | CHAFSR_UCU | \
1150 CHAFSR_WDU | CHAFSR_WDC)
1151 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1152 if (afsr & ESYND_ERRORS) {
1156 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1157 syndrome = cheetah_ecc_syntab[syndrome];
1158 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1160 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1161 (recoverable ? KERN_WARNING : KERN_CRIT),
1162 smp_processor_id(), unum);
1163 } else if (afsr & MSYND_ERRORS) {
1167 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1168 syndrome = cheetah_mtag_syntab[syndrome];
1169 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1171 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1172 (recoverable ? KERN_WARNING : KERN_CRIT),
1173 smp_processor_id(), unum);
1176 /* Now dump the cache snapshots. */
1177 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1178 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1179 (int) info->dcache_index,
1183 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1184 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1185 info->dcache_data[0],
1186 info->dcache_data[1],
1187 info->dcache_data[2],
1188 info->dcache_data[3]);
1189 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1190 "u[%016lx] l[%016lx]\n",
1191 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1192 (int) info->icache_index,
1197 info->icache_lower);
1198 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1199 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1200 info->icache_data[0],
1201 info->icache_data[1],
1202 info->icache_data[2],
1203 info->icache_data[3]);
1204 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1205 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1206 info->icache_data[4],
1207 info->icache_data[5],
1208 info->icache_data[6],
1209 info->icache_data[7]);
1210 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1211 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1212 (int) info->ecache_index, info->ecache_tag);
1213 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1214 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1215 info->ecache_data[0],
1216 info->ecache_data[1],
1217 info->ecache_data[2],
1218 info->ecache_data[3]);
1220 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1221 while (afsr != 0UL) {
1222 unsigned long bit = cheetah_get_hipri(afsr);
1224 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1225 (recoverable ? KERN_WARNING : KERN_CRIT),
1226 bit, cheetah_get_string(bit));
1232 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1235 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1237 unsigned long afsr, afar;
1240 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1243 if ((afsr & cheetah_afsr_errors) != 0) {
1245 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1253 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1255 : : "r" (afsr), "i" (ASI_AFSR));
1260 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1262 struct cheetah_err_info local_snapshot, *p;
1266 cheetah_flush_ecache();
1268 p = cheetah_get_error_log(afsr);
1270 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1272 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1273 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1277 /* Grab snapshot of logged error. */
1278 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1280 /* If the current trap snapshot does not match what the
1281 * trap handler passed along into our args, big trouble.
1282 * In such a case, mark the local copy as invalid.
1284 * Else, it matches and we mark the afsr in the non-local
1285 * copy as invalid so we may log new error traps there.
1287 if (p->afsr != afsr || p->afar != afar)
1288 local_snapshot.afsr = CHAFSR_INVALID;
1290 p->afsr = CHAFSR_INVALID;
1292 cheetah_flush_icache();
1293 cheetah_flush_dcache();
1295 /* Re-enable I-cache/D-cache */
1296 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1297 "or %%g1, %1, %%g1\n\t"
1298 "stxa %%g1, [%%g0] %0\n\t"
1301 : "i" (ASI_DCU_CONTROL_REG),
1302 "i" (DCU_DC | DCU_IC)
1305 /* Re-enable error reporting */
1306 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1307 "or %%g1, %1, %%g1\n\t"
1308 "stxa %%g1, [%%g0] %0\n\t"
1311 : "i" (ASI_ESTATE_ERROR_EN),
1312 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1315 /* Decide if we can continue after handling this trap and
1316 * logging the error.
1319 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1322 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1323 * error was logged while we had error reporting traps disabled.
1325 if (cheetah_recheck_errors(&local_snapshot)) {
1326 unsigned long new_afsr = local_snapshot.afsr;
1328 /* If we got a new asynchronous error, die... */
1329 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1330 CHAFSR_WDU | CHAFSR_CPU |
1331 CHAFSR_IVU | CHAFSR_UE |
1332 CHAFSR_BERR | CHAFSR_TO))
1337 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1340 panic("Irrecoverable Fast-ECC error trap.\n");
1342 /* Flush E-cache to kick the error trap handlers out. */
1343 cheetah_flush_ecache();
1346 /* Try to fix a correctable error by pushing the line out from
1347 * the E-cache. Recheck error reporting registers to see if the
1348 * problem is intermittent.
1350 static int cheetah_fix_ce(unsigned long physaddr)
1352 unsigned long orig_estate;
1353 unsigned long alias1, alias2;
1356 /* Make sure correctable error traps are disabled. */
1357 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1358 "andn %0, %1, %%g1\n\t"
1359 "stxa %%g1, [%%g0] %2\n\t"
1361 : "=&r" (orig_estate)
1362 : "i" (ESTATE_ERROR_CEEN),
1363 "i" (ASI_ESTATE_ERROR_EN)
1366 /* We calculate alias addresses that will force the
1367 * cache line in question out of the E-cache. Then
1368 * we bring it back in with an atomic instruction so
1369 * that we get it in some modified/exclusive state,
1370 * then we displace it again to try and get proper ECC
1371 * pushed back into the system.
1373 physaddr &= ~(8UL - 1UL);
1374 alias1 = (ecache_flush_physbase +
1375 (physaddr & ((ecache_flush_size >> 1) - 1)));
1376 alias2 = alias1 + (ecache_flush_size >> 1);
1377 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1378 "ldxa [%1] %3, %%g0\n\t"
1379 "casxa [%2] %3, %%g0, %%g0\n\t"
1380 "membar #StoreLoad | #StoreStore\n\t"
1381 "ldxa [%0] %3, %%g0\n\t"
1382 "ldxa [%1] %3, %%g0\n\t"
1385 : "r" (alias1), "r" (alias2),
1386 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1388 /* Did that trigger another error? */
1389 if (cheetah_recheck_errors(NULL)) {
1390 /* Try one more time. */
1391 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1393 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1394 if (cheetah_recheck_errors(NULL))
1399 /* No new error, intermittent problem. */
1403 /* Restore error enables. */
1404 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1406 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1411 /* Return non-zero if PADDR is a valid physical memory address. */
1412 static int cheetah_check_main_memory(unsigned long paddr)
1414 unsigned long vaddr = PAGE_OFFSET + paddr;
1416 if (vaddr > (unsigned long) high_memory)
1419 return kern_addr_valid(vaddr);
1422 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1424 struct cheetah_err_info local_snapshot, *p;
1425 int recoverable, is_memory;
1427 p = cheetah_get_error_log(afsr);
1429 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1431 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1432 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1436 /* Grab snapshot of logged error. */
1437 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1439 /* If the current trap snapshot does not match what the
1440 * trap handler passed along into our args, big trouble.
1441 * In such a case, mark the local copy as invalid.
1443 * Else, it matches and we mark the afsr in the non-local
1444 * copy as invalid so we may log new error traps there.
1446 if (p->afsr != afsr || p->afar != afar)
1447 local_snapshot.afsr = CHAFSR_INVALID;
1449 p->afsr = CHAFSR_INVALID;
1451 is_memory = cheetah_check_main_memory(afar);
1453 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1454 /* XXX Might want to log the results of this operation
1455 * XXX somewhere... -DaveM
1457 cheetah_fix_ce(afar);
1461 int flush_all, flush_line;
1463 flush_all = flush_line = 0;
1464 if ((afsr & CHAFSR_EDC) != 0UL) {
1465 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1469 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1470 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1476 /* Trap handler only disabled I-cache, flush it. */
1477 cheetah_flush_icache();
1479 /* Re-enable I-cache */
1480 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1481 "or %%g1, %1, %%g1\n\t"
1482 "stxa %%g1, [%%g0] %0\n\t"
1485 : "i" (ASI_DCU_CONTROL_REG),
1490 cheetah_flush_ecache();
1491 else if (flush_line)
1492 cheetah_flush_ecache_line(afar);
1495 /* Re-enable error reporting */
1496 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1497 "or %%g1, %1, %%g1\n\t"
1498 "stxa %%g1, [%%g0] %0\n\t"
1501 : "i" (ASI_ESTATE_ERROR_EN),
1502 "i" (ESTATE_ERROR_CEEN)
1505 /* Decide if we can continue after handling this trap and
1506 * logging the error.
1509 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1512 /* Re-check AFSR/AFAR */
1513 (void) cheetah_recheck_errors(&local_snapshot);
1516 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1519 panic("Irrecoverable Correctable-ECC error trap.\n");
1522 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1524 struct cheetah_err_info local_snapshot, *p;
1525 int recoverable, is_memory;
1528 /* Check for the special PCI poke sequence. */
1529 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1530 cheetah_flush_icache();
1531 cheetah_flush_dcache();
1533 /* Re-enable I-cache/D-cache */
1534 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1535 "or %%g1, %1, %%g1\n\t"
1536 "stxa %%g1, [%%g0] %0\n\t"
1539 : "i" (ASI_DCU_CONTROL_REG),
1540 "i" (DCU_DC | DCU_IC)
1543 /* Re-enable error reporting */
1544 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1545 "or %%g1, %1, %%g1\n\t"
1546 "stxa %%g1, [%%g0] %0\n\t"
1549 : "i" (ASI_ESTATE_ERROR_EN),
1550 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1553 (void) cheetah_recheck_errors(NULL);
1555 pci_poke_faulted = 1;
1557 regs->tnpc = regs->tpc + 4;
1562 p = cheetah_get_error_log(afsr);
1564 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1566 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1567 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1571 /* Grab snapshot of logged error. */
1572 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1574 /* If the current trap snapshot does not match what the
1575 * trap handler passed along into our args, big trouble.
1576 * In such a case, mark the local copy as invalid.
1578 * Else, it matches and we mark the afsr in the non-local
1579 * copy as invalid so we may log new error traps there.
1581 if (p->afsr != afsr || p->afar != afar)
1582 local_snapshot.afsr = CHAFSR_INVALID;
1584 p->afsr = CHAFSR_INVALID;
1586 is_memory = cheetah_check_main_memory(afar);
1589 int flush_all, flush_line;
1591 flush_all = flush_line = 0;
1592 if ((afsr & CHAFSR_EDU) != 0UL) {
1593 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1597 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1598 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1604 cheetah_flush_icache();
1605 cheetah_flush_dcache();
1607 /* Re-enable I/D caches */
1608 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1609 "or %%g1, %1, %%g1\n\t"
1610 "stxa %%g1, [%%g0] %0\n\t"
1613 : "i" (ASI_DCU_CONTROL_REG),
1614 "i" (DCU_IC | DCU_DC)
1618 cheetah_flush_ecache();
1619 else if (flush_line)
1620 cheetah_flush_ecache_line(afar);
1623 /* Re-enable error reporting */
1624 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1625 "or %%g1, %1, %%g1\n\t"
1626 "stxa %%g1, [%%g0] %0\n\t"
1629 : "i" (ASI_ESTATE_ERROR_EN),
1630 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1633 /* Decide if we can continue after handling this trap and
1634 * logging the error.
1637 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1640 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1641 * error was logged while we had error reporting traps disabled.
1643 if (cheetah_recheck_errors(&local_snapshot)) {
1644 unsigned long new_afsr = local_snapshot.afsr;
1646 /* If we got a new asynchronous error, die... */
1647 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1648 CHAFSR_WDU | CHAFSR_CPU |
1649 CHAFSR_IVU | CHAFSR_UE |
1650 CHAFSR_BERR | CHAFSR_TO))
1655 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1657 /* "Recoverable" here means we try to yank the page from ever
1658 * being newly used again. This depends upon a few things:
1659 * 1) Must be main memory, and AFAR must be valid.
1660 * 2) If we trapped from user, OK.
1661 * 3) Else, if we trapped from kernel we must find exception
1662 * table entry (ie. we have to have been accessing user
1665 * If AFAR is not in main memory, or we trapped from kernel
1666 * and cannot find an exception table entry, it is unacceptable
1667 * to try and continue.
1669 if (recoverable && is_memory) {
1670 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1671 /* OK, usermode access. */
1674 const struct exception_table_entry *entry;
1676 entry = search_exception_tables(regs->tpc);
1678 /* OK, kernel access to userspace. */
1682 /* BAD, privileged state is corrupted. */
1687 if (pfn_valid(afar >> PAGE_SHIFT))
1688 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1692 /* Only perform fixup if we still have a
1693 * recoverable condition.
1696 regs->tpc = entry->fixup;
1697 regs->tnpc = regs->tpc + 4;
1706 panic("Irrecoverable deferred error trap.\n");
1709 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1711 * Bit0: 0=dcache,1=icache
1712 * Bit1: 0=recoverable,1=unrecoverable
1714 * The hardware has disabled both the I-cache and D-cache in
1715 * the %dcr register.
1717 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1720 __cheetah_flush_icache();
1722 cheetah_plus_zap_dcache_parity();
1723 cheetah_flush_dcache();
1725 /* Re-enable I-cache/D-cache */
1726 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1727 "or %%g1, %1, %%g1\n\t"
1728 "stxa %%g1, [%%g0] %0\n\t"
1731 : "i" (ASI_DCU_CONTROL_REG),
1732 "i" (DCU_DC | DCU_IC)
1736 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1738 (type & 0x1) ? 'I' : 'D',
1740 panic("Irrecoverable Cheetah+ parity error.");
1743 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1745 (type & 0x1) ? 'I' : 'D',
1749 struct sun4v_error_entry {
1754 #define SUN4V_ERR_TYPE_UNDEFINED 0
1755 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1756 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1757 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1758 #define SUN4V_ERR_TYPE_WARNING_RES 4
1761 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1762 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1763 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1764 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1765 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1766 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1767 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1768 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1776 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1777 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1779 static const char *sun4v_err_type_to_str(u32 type)
1782 case SUN4V_ERR_TYPE_UNDEFINED:
1784 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1785 return "uncorrected resumable";
1786 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1787 return "precise nonresumable";
1788 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1789 return "deferred nonresumable";
1790 case SUN4V_ERR_TYPE_WARNING_RES:
1791 return "warning resumable";
1797 static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1801 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1802 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1804 ent->err_handle, ent->err_stick,
1806 sun4v_err_type_to_str(ent->err_type));
1807 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1810 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1812 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1814 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1816 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1817 "integer-regs" : ""),
1818 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1820 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1822 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1824 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1825 "queue-full" : ""));
1826 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1828 ent->err_raddr, ent->err_size, ent->err_cpu);
1830 if ((cnt = atomic_read(ocnt)) != 0) {
1831 atomic_set(ocnt, 0);
1833 printk("%s: Queue overflowed %d times.\n",
1838 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1839 * Log the event and clear the first word of the entry.
1841 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1843 struct sun4v_error_entry *ent, local_copy;
1844 struct trap_per_cpu *tb;
1845 unsigned long paddr;
1850 tb = &trap_block[cpu];
1851 paddr = tb->resum_kernel_buf_pa + offset;
1854 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1856 /* We have a local copy now, so release the entry. */
1857 ent->err_handle = 0;
1862 sun4v_log_error(&local_copy, cpu,
1863 KERN_ERR "RESUMABLE ERROR",
1864 &sun4v_resum_oflow_cnt);
1867 /* If we try to printk() we'll probably make matters worse, by trying
1868 * to retake locks this cpu already holds or causing more errors. So
1869 * just bump a counter, and we'll report these counter bumps above.
1871 void sun4v_resum_overflow(struct pt_regs *regs)
1873 atomic_inc(&sun4v_resum_oflow_cnt);
1876 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1877 * Log the event, clear the first word of the entry, and die.
1879 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1881 struct sun4v_error_entry *ent, local_copy;
1882 struct trap_per_cpu *tb;
1883 unsigned long paddr;
1888 tb = &trap_block[cpu];
1889 paddr = tb->nonresum_kernel_buf_pa + offset;
1892 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1894 /* We have a local copy now, so release the entry. */
1895 ent->err_handle = 0;
1901 /* Check for the special PCI poke sequence. */
1902 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1903 pci_poke_faulted = 1;
1905 regs->tnpc = regs->tpc + 4;
1910 sun4v_log_error(&local_copy, cpu,
1911 KERN_EMERG "NON-RESUMABLE ERROR",
1912 &sun4v_nonresum_oflow_cnt);
1914 panic("Non-resumable error.");
1917 /* If we try to printk() we'll probably make matters worse, by trying
1918 * to retake locks this cpu already holds or causing more errors. So
1919 * just bump a counter, and we'll report these counter bumps above.
1921 void sun4v_nonresum_overflow(struct pt_regs *regs)
1923 /* XXX Actually even this can make not that much sense. Perhaps
1924 * XXX we should just pull the plug and panic directly from here?
1926 atomic_inc(&sun4v_nonresum_oflow_cnt);
1929 void do_fpe_common(struct pt_regs *regs)
1931 if (regs->tstate & TSTATE_PRIV) {
1932 regs->tpc = regs->tnpc;
1935 unsigned long fsr = current_thread_info()->xfsr[0];
1938 if (test_thread_flag(TIF_32BIT)) {
1939 regs->tpc &= 0xffffffff;
1940 regs->tnpc &= 0xffffffff;
1942 info.si_signo = SIGFPE;
1944 info.si_addr = (void __user *)regs->tpc;
1946 info.si_code = __SI_FAULT;
1947 if ((fsr & 0x1c000) == (1 << 14)) {
1949 info.si_code = FPE_FLTINV;
1950 else if (fsr & 0x08)
1951 info.si_code = FPE_FLTOVF;
1952 else if (fsr & 0x04)
1953 info.si_code = FPE_FLTUND;
1954 else if (fsr & 0x02)
1955 info.si_code = FPE_FLTDIV;
1956 else if (fsr & 0x01)
1957 info.si_code = FPE_FLTRES;
1959 force_sig_info(SIGFPE, &info, current);
1963 void do_fpieee(struct pt_regs *regs)
1965 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1966 0, 0x24, SIGFPE) == NOTIFY_STOP)
1969 do_fpe_common(regs);
1972 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1974 void do_fpother(struct pt_regs *regs)
1976 struct fpustate *f = FPUSTATE;
1979 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1980 0, 0x25, SIGFPE) == NOTIFY_STOP)
1983 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1984 case (2 << 14): /* unfinished_FPop */
1985 case (3 << 14): /* unimplemented_FPop */
1986 ret = do_mathemu(regs, f);
1991 do_fpe_common(regs);
1994 void do_tof(struct pt_regs *regs)
1998 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1999 0, 0x26, SIGEMT) == NOTIFY_STOP)
2002 if (regs->tstate & TSTATE_PRIV)
2003 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2004 if (test_thread_flag(TIF_32BIT)) {
2005 regs->tpc &= 0xffffffff;
2006 regs->tnpc &= 0xffffffff;
2008 info.si_signo = SIGEMT;
2010 info.si_code = EMT_TAGOVF;
2011 info.si_addr = (void __user *)regs->tpc;
2013 force_sig_info(SIGEMT, &info, current);
2016 void do_div0(struct pt_regs *regs)
2020 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2021 0, 0x28, SIGFPE) == NOTIFY_STOP)
2024 if (regs->tstate & TSTATE_PRIV)
2025 die_if_kernel("TL0: Kernel divide by zero.", regs);
2026 if (test_thread_flag(TIF_32BIT)) {
2027 regs->tpc &= 0xffffffff;
2028 regs->tnpc &= 0xffffffff;
2030 info.si_signo = SIGFPE;
2032 info.si_code = FPE_INTDIV;
2033 info.si_addr = (void __user *)regs->tpc;
2035 force_sig_info(SIGFPE, &info, current);
2038 void instruction_dump (unsigned int *pc)
2042 if ((((unsigned long) pc) & 3))
2045 printk("Instruction DUMP:");
2046 for (i = -3; i < 6; i++)
2047 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2051 static void user_instruction_dump (unsigned int __user *pc)
2054 unsigned int buf[9];
2056 if ((((unsigned long) pc) & 3))
2059 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2062 printk("Instruction DUMP:");
2063 for (i = 0; i < 9; i++)
2064 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2068 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2070 unsigned long pc, fp, thread_base, ksp;
2071 void *tp = task_stack_page(tsk);
2072 struct reg_window *rw;
2075 ksp = (unsigned long) _ksp;
2077 if (tp == current_thread_info())
2080 fp = ksp + STACK_BIAS;
2081 thread_base = (unsigned long) tp;
2083 printk("Call Trace:");
2084 #ifdef CONFIG_KALLSYMS
2088 /* Bogus frame pointer? */
2089 if (fp < (thread_base + sizeof(struct thread_info)) ||
2090 fp >= (thread_base + THREAD_SIZE))
2092 rw = (struct reg_window *)fp;
2094 printk(" [%016lx] ", pc);
2095 print_symbol("%s\n", pc);
2096 fp = rw->ins[6] + STACK_BIAS;
2097 } while (++count < 16);
2098 #ifndef CONFIG_KALLSYMS
2103 void dump_stack(void)
2107 __asm__ __volatile__("mov %%fp, %0"
2109 show_stack(current, ksp);
2112 EXPORT_SYMBOL(dump_stack);
2114 static inline int is_kernel_stack(struct task_struct *task,
2115 struct reg_window *rw)
2117 unsigned long rw_addr = (unsigned long) rw;
2118 unsigned long thread_base, thread_end;
2120 if (rw_addr < PAGE_OFFSET) {
2121 if (task != &init_task)
2125 thread_base = (unsigned long) task_stack_page(task);
2126 thread_end = thread_base + sizeof(union thread_union);
2127 if (rw_addr >= thread_base &&
2128 rw_addr < thread_end &&
2135 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2137 unsigned long fp = rw->ins[6];
2142 return (struct reg_window *) (fp + STACK_BIAS);
2145 void die_if_kernel(char *str, struct pt_regs *regs)
2147 static int die_counter;
2148 extern void __show_regs(struct pt_regs * regs);
2149 extern void smp_report_regs(void);
2152 /* Amuse the user. */
2155 " \"@'/ .. \\`@\"\n"
2159 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
2160 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2161 __asm__ __volatile__("flushw");
2163 if (regs->tstate & TSTATE_PRIV) {
2164 struct reg_window *rw = (struct reg_window *)
2165 (regs->u_regs[UREG_FP] + STACK_BIAS);
2167 /* Stop the back trace when we hit userland or we
2168 * find some badly aligned kernel stack.
2172 is_kernel_stack(current, rw)) {
2173 printk("Caller[%016lx]", rw->ins[7]);
2174 print_symbol(": %s", rw->ins[7]);
2177 rw = kernel_stack_up(rw);
2179 instruction_dump ((unsigned int *) regs->tpc);
2181 if (test_thread_flag(TIF_32BIT)) {
2182 regs->tpc &= 0xffffffff;
2183 regs->tnpc &= 0xffffffff;
2185 user_instruction_dump ((unsigned int __user *) regs->tpc);
2191 if (regs->tstate & TSTATE_PRIV)
2196 extern int handle_popc(u32 insn, struct pt_regs *regs);
2197 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2199 void do_illegal_instruction(struct pt_regs *regs)
2201 unsigned long pc = regs->tpc;
2202 unsigned long tstate = regs->tstate;
2206 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2207 0, 0x10, SIGILL) == NOTIFY_STOP)
2210 if (tstate & TSTATE_PRIV)
2211 die_if_kernel("Kernel illegal instruction", regs);
2212 if (test_thread_flag(TIF_32BIT))
2214 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2215 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2216 if (handle_popc(insn, regs))
2218 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2219 if (handle_ldf_stq(insn, regs))
2223 info.si_signo = SIGILL;
2225 info.si_code = ILL_ILLOPC;
2226 info.si_addr = (void __user *)pc;
2228 force_sig_info(SIGILL, &info, current);
2231 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2233 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2237 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2238 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2241 if (regs->tstate & TSTATE_PRIV) {
2242 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2245 info.si_signo = SIGBUS;
2247 info.si_code = BUS_ADRALN;
2248 info.si_addr = (void __user *)sfar;
2250 force_sig_info(SIGBUS, &info, current);
2253 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2257 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2258 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2261 if (regs->tstate & TSTATE_PRIV) {
2262 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2265 info.si_signo = SIGBUS;
2267 info.si_code = BUS_ADRALN;
2268 info.si_addr = (void __user *) addr;
2270 force_sig_info(SIGBUS, &info, current);
2273 void do_privop(struct pt_regs *regs)
2277 if (notify_die(DIE_TRAP, "privileged operation", regs,
2278 0, 0x11, SIGILL) == NOTIFY_STOP)
2281 if (test_thread_flag(TIF_32BIT)) {
2282 regs->tpc &= 0xffffffff;
2283 regs->tnpc &= 0xffffffff;
2285 info.si_signo = SIGILL;
2287 info.si_code = ILL_PRVOPC;
2288 info.si_addr = (void __user *)regs->tpc;
2290 force_sig_info(SIGILL, &info, current);
2293 void do_privact(struct pt_regs *regs)
2298 /* Trap level 1 stuff or other traps we should never see... */
2299 void do_cee(struct pt_regs *regs)
2301 die_if_kernel("TL0: Cache Error Exception", regs);
2304 void do_cee_tl1(struct pt_regs *regs)
2306 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2307 die_if_kernel("TL1: Cache Error Exception", regs);
2310 void do_dae_tl1(struct pt_regs *regs)
2312 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2313 die_if_kernel("TL1: Data Access Exception", regs);
2316 void do_iae_tl1(struct pt_regs *regs)
2318 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2319 die_if_kernel("TL1: Instruction Access Exception", regs);
2322 void do_div0_tl1(struct pt_regs *regs)
2324 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2325 die_if_kernel("TL1: DIV0 Exception", regs);
2328 void do_fpdis_tl1(struct pt_regs *regs)
2330 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2331 die_if_kernel("TL1: FPU Disabled", regs);
2334 void do_fpieee_tl1(struct pt_regs *regs)
2336 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2337 die_if_kernel("TL1: FPU IEEE Exception", regs);
2340 void do_fpother_tl1(struct pt_regs *regs)
2342 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2343 die_if_kernel("TL1: FPU Other Exception", regs);
2346 void do_ill_tl1(struct pt_regs *regs)
2348 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2349 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2352 void do_irq_tl1(struct pt_regs *regs)
2354 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2355 die_if_kernel("TL1: IRQ Exception", regs);
2358 void do_lddfmna_tl1(struct pt_regs *regs)
2360 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2361 die_if_kernel("TL1: LDDF Exception", regs);
2364 void do_stdfmna_tl1(struct pt_regs *regs)
2366 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2367 die_if_kernel("TL1: STDF Exception", regs);
2370 void do_paw(struct pt_regs *regs)
2372 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2375 void do_paw_tl1(struct pt_regs *regs)
2377 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2378 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2381 void do_vaw(struct pt_regs *regs)
2383 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2386 void do_vaw_tl1(struct pt_regs *regs)
2388 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2389 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2392 void do_tof_tl1(struct pt_regs *regs)
2394 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2395 die_if_kernel("TL1: Tag Overflow Exception", regs);
2398 void do_getpsr(struct pt_regs *regs)
2400 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2401 regs->tpc = regs->tnpc;
2403 if (test_thread_flag(TIF_32BIT)) {
2404 regs->tpc &= 0xffffffff;
2405 regs->tnpc &= 0xffffffff;
2409 struct trap_per_cpu trap_block[NR_CPUS];
2411 /* This can get invoked before sched_init() so play it super safe
2412 * and use hard_smp_processor_id().
2414 void init_cur_cpu_trap(void)
2416 int cpu = hard_smp_processor_id();
2417 struct trap_per_cpu *p = &trap_block[cpu];
2419 p->thread = current_thread_info();
2423 extern void thread_info_offsets_are_bolixed_dave(void);
2424 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2426 /* Only invoked on boot processor. */
2427 void __init trap_init(void)
2429 /* Compile time sanity check. */
2430 if (TI_TASK != offsetof(struct thread_info, task) ||
2431 TI_FLAGS != offsetof(struct thread_info, flags) ||
2432 TI_CPU != offsetof(struct thread_info, cpu) ||
2433 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2434 TI_KSP != offsetof(struct thread_info, ksp) ||
2435 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2436 TI_KREGS != offsetof(struct thread_info, kregs) ||
2437 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2438 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2439 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2440 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2441 TI_GSR != offsetof(struct thread_info, gsr) ||
2442 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2443 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2444 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2445 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2446 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2447 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2448 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2449 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2450 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2451 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2452 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2453 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2454 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2455 (TI_FPREGS & (64 - 1)))
2456 thread_info_offsets_are_bolixed_dave();
2458 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2459 (TRAP_PER_CPU_PGD_PADDR !=
2460 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2461 (TRAP_PER_CPU_CPU_MONDO_PA !=
2462 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2463 (TRAP_PER_CPU_DEV_MONDO_PA !=
2464 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2465 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2466 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2467 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2468 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2469 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2470 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2471 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2472 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2473 (TRAP_PER_CPU_FAULT_INFO !=
2474 offsetof(struct trap_per_cpu, fault_info)) ||
2475 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2476 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2477 (TRAP_PER_CPU_CPU_LIST_PA !=
2478 offsetof(struct trap_per_cpu, cpu_list_pa)))
2479 trap_per_cpu_offsets_are_bolixed_dave();
2481 /* Attach to the address space of init_task. On SMP we
2482 * do this in smp.c:smp_callin for other cpus.
2484 atomic_inc(&init_mm.mm_count);
2485 current->active_mm = &init_mm;