2 * arch/s390/kernel/traps.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
14 * 'Traps.c' handles hardware traps and faults after we have saved some
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/ptrace.h>
22 #include <linux/timer.h>
24 #include <linux/smp.h>
25 #include <linux/smp_lock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/module.h>
30 #include <linux/kallsyms.h>
31 #include <linux/reboot.h>
32 #include <linux/kprobes.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
37 #include <asm/atomic.h>
38 #include <asm/mathemu.h>
39 #include <asm/cpcmd.h>
40 #include <asm/s390_ext.h>
41 #include <asm/lowcore.h>
42 #include <asm/debug.h>
43 #include <asm/kdebug.h>
45 /* Called from entry.S only */
46 extern void handle_per_exception(struct pt_regs *regs);
48 typedef void pgm_check_handler_t(struct pt_regs *, long);
49 pgm_check_handler_t *pgm_check_table[128];
52 #ifdef CONFIG_PROCESS_DEBUG
53 int sysctl_userprocess_debug = 1;
55 int sysctl_userprocess_debug = 0;
59 extern pgm_check_handler_t do_protection_exception;
60 extern pgm_check_handler_t do_dat_exception;
61 extern pgm_check_handler_t do_monitor_call;
63 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
66 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
67 static int kstack_depth_to_print = 12;
68 #else /* CONFIG_64BIT */
69 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
70 static int kstack_depth_to_print = 20;
71 #endif /* CONFIG_64BIT */
73 ATOMIC_NOTIFIER_HEAD(s390die_chain);
75 int register_die_notifier(struct notifier_block *nb)
77 return atomic_notifier_chain_register(&s390die_chain, nb);
79 EXPORT_SYMBOL(register_die_notifier);
81 int unregister_die_notifier(struct notifier_block *nb)
83 return atomic_notifier_chain_unregister(&s390die_chain, nb);
85 EXPORT_SYMBOL(unregister_die_notifier);
88 * For show_trace we have tree different stack to consider:
89 * - the panic stack which is used if the kernel stack has overflown
90 * - the asynchronous interrupt stack (cpu related)
91 * - the synchronous kernel stack (process related)
92 * The stack trace can start at any of the three stack and can potentially
93 * touch all of them. The order is: panic stack, async stack, sync stack.
96 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
98 struct stack_frame *sf;
102 sp = sp & PSW_ADDR_INSN;
103 if (sp < low || sp > high - sizeof(*sf))
105 sf = (struct stack_frame *) sp;
106 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
107 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
108 /* Follow the backchain. */
111 sp = sf->back_chain & PSW_ADDR_INSN;
114 if (sp <= low || sp > high - sizeof(*sf))
116 sf = (struct stack_frame *) sp;
117 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
118 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
120 /* Zero backchain detected, check for interrupt frame. */
121 sp = (unsigned long) (sf + 1);
122 if (sp <= low || sp > high - sizeof(*regs))
124 regs = (struct pt_regs *) sp;
125 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
126 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
132 void show_trace(struct task_struct *task, unsigned long *stack)
134 register unsigned long __r15 asm ("15");
137 sp = (unsigned long) stack;
139 sp = task ? task->thread.ksp : __r15;
140 printk("Call Trace:\n");
141 #ifdef CONFIG_CHECK_STACK
142 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
143 S390_lowcore.panic_stack);
145 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
146 S390_lowcore.async_stack);
148 __show_trace(sp, (unsigned long) task_stack_page(task),
149 (unsigned long) task_stack_page(task) + THREAD_SIZE);
151 __show_trace(sp, S390_lowcore.thread_info,
152 S390_lowcore.thread_info + THREAD_SIZE);
156 debug_show_held_locks(task);
159 void show_stack(struct task_struct *task, unsigned long *sp)
161 register unsigned long * __r15 asm ("15");
162 unsigned long *stack;
166 stack = task ? (unsigned long *) task->thread.ksp : __r15;
170 for (i = 0; i < kstack_depth_to_print; i++) {
171 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
173 if (i && ((i * sizeof (long) % 32) == 0))
175 printk("%p ", (void *)*stack++);
178 show_trace(task, sp);
182 * The architecture-independent dump_stack generator
184 void dump_stack(void)
186 show_stack(NULL, NULL);
189 EXPORT_SYMBOL(dump_stack);
191 void show_registers(struct pt_regs *regs)
197 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
198 printk("%s PSW : %p %p",
199 mode, (void *) regs->psw.mask,
200 (void *) regs->psw.addr);
201 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
202 printk("%s GPRS: " FOURLONG, mode,
203 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
205 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
207 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
209 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
212 /* FIXME: this isn't needed any more but it changes the ksymoops
213 * input. To remove or not to remove ... */
214 save_access_regs(regs->acrs);
215 printk("%s ACRS: %08x %08x %08x %08x\n", mode,
216 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
217 printk(" %08x %08x %08x %08x\n",
218 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
219 printk(" %08x %08x %08x %08x\n",
220 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
221 printk(" %08x %08x %08x %08x\n",
222 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
226 * Print the first 20 byte of the instruction stream at the
230 if (regs->psw.mask & PSW_MASK_PSTATE)
234 printk("%s Code: ", mode);
235 for (i = 0; i < 20; i++) {
237 if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
248 /* This is called from fs/proc/array.c */
249 char *task_show_regs(struct task_struct *task, char *buffer)
251 struct pt_regs *regs;
253 regs = task_pt_regs(task);
254 buffer += sprintf(buffer, "task: %p, ksp: %p\n",
255 task, (void *)task->thread.ksp);
256 buffer += sprintf(buffer, "User PSW : %p %p\n",
257 (void *) regs->psw.mask, (void *)regs->psw.addr);
259 buffer += sprintf(buffer, "User GPRS: " FOURLONG,
260 regs->gprs[0], regs->gprs[1],
261 regs->gprs[2], regs->gprs[3]);
262 buffer += sprintf(buffer, " " FOURLONG,
263 regs->gprs[4], regs->gprs[5],
264 regs->gprs[6], regs->gprs[7]);
265 buffer += sprintf(buffer, " " FOURLONG,
266 regs->gprs[8], regs->gprs[9],
267 regs->gprs[10], regs->gprs[11]);
268 buffer += sprintf(buffer, " " FOURLONG,
269 regs->gprs[12], regs->gprs[13],
270 regs->gprs[14], regs->gprs[15]);
271 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
272 task->thread.acrs[0], task->thread.acrs[1],
273 task->thread.acrs[2], task->thread.acrs[3]);
274 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
275 task->thread.acrs[4], task->thread.acrs[5],
276 task->thread.acrs[6], task->thread.acrs[7]);
277 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
278 task->thread.acrs[8], task->thread.acrs[9],
279 task->thread.acrs[10], task->thread.acrs[11]);
280 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
281 task->thread.acrs[12], task->thread.acrs[13],
282 task->thread.acrs[14], task->thread.acrs[15]);
286 static DEFINE_SPINLOCK(die_lock);
288 void die(const char * str, struct pt_regs * regs, long err)
290 static int die_counter;
294 spin_lock_irq(&die_lock);
296 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
299 spin_unlock_irq(&die_lock);
301 panic("Fatal exception in interrupt");
303 panic("Fatal exception: panic_on_oops");
308 report_user_fault(long interruption_code, struct pt_regs *regs)
310 #if defined(CONFIG_SYSCTL)
311 if (!sysctl_userprocess_debug)
314 #if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
315 printk("User process fault: interruption code 0x%lX\n",
321 static void __kprobes inline do_trap(long interruption_code, int signr,
322 char *str, struct pt_regs *regs,
326 * We got all needed information from the lowcore and can
327 * now safely switch on interrupts.
329 if (regs->psw.mask & PSW_MASK_PSTATE)
332 if (notify_die(DIE_TRAP, str, regs, interruption_code,
333 interruption_code, signr) == NOTIFY_STOP)
336 if (regs->psw.mask & PSW_MASK_PSTATE) {
337 struct task_struct *tsk = current;
339 tsk->thread.trap_no = interruption_code & 0xffff;
340 force_sig_info(signr, info, tsk);
341 report_user_fault(interruption_code, regs);
343 const struct exception_table_entry *fixup;
344 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
346 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
348 die(str, regs, interruption_code);
352 static inline void __user *get_check_address(struct pt_regs *regs)
354 return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
357 void __kprobes do_single_step(struct pt_regs *regs)
359 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
360 SIGTRAP) == NOTIFY_STOP){
363 if ((current->ptrace & PT_PTRACED) != 0)
364 force_sig(SIGTRAP, current);
367 static void default_trap_handler(struct pt_regs * regs, long interruption_code)
369 if (regs->psw.mask & PSW_MASK_PSTATE) {
372 report_user_fault(interruption_code, regs);
374 die("Unknown program exception", regs, interruption_code);
377 #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
378 static void name(struct pt_regs * regs, long interruption_code) \
381 info.si_signo = signr; \
383 info.si_code = sicode; \
384 info.si_addr = siaddr; \
385 do_trap(interruption_code, signr, str, regs, &info); \
388 DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
389 ILL_ILLADR, get_check_address(regs))
390 DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
391 ILL_ILLOPN, get_check_address(regs))
392 DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
393 FPE_INTDIV, get_check_address(regs))
394 DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
395 FPE_INTOVF, get_check_address(regs))
396 DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
397 FPE_FLTOVF, get_check_address(regs))
398 DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
399 FPE_FLTUND, get_check_address(regs))
400 DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
401 FPE_FLTRES, get_check_address(regs))
402 DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
403 FPE_FLTDIV, get_check_address(regs))
404 DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
405 FPE_FLTINV, get_check_address(regs))
406 DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
407 ILL_ILLOPN, get_check_address(regs))
408 DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
409 ILL_PRVOPC, get_check_address(regs))
410 DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
411 ILL_ILLOPN, get_check_address(regs))
412 DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
413 ILL_ILLOPN, get_check_address(regs))
416 do_fp_trap(struct pt_regs *regs, void __user *location,
417 int fpc, long interruption_code)
421 si.si_signo = SIGFPE;
423 si.si_addr = location;
425 /* FPC[2] is Data Exception Code */
426 if ((fpc & 0x00000300) == 0) {
427 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
428 if (fpc & 0x8000) /* invalid fp operation */
429 si.si_code = FPE_FLTINV;
430 else if (fpc & 0x4000) /* div by 0 */
431 si.si_code = FPE_FLTDIV;
432 else if (fpc & 0x2000) /* overflow */
433 si.si_code = FPE_FLTOVF;
434 else if (fpc & 0x1000) /* underflow */
435 si.si_code = FPE_FLTUND;
436 else if (fpc & 0x0800) /* inexact */
437 si.si_code = FPE_FLTRES;
439 current->thread.ieee_instruction_pointer = (addr_t) location;
440 do_trap(interruption_code, SIGFPE,
441 "floating point exception", regs, &si);
444 static void illegal_op(struct pt_regs * regs, long interruption_code)
448 __u16 __user *location;
451 location = get_check_address(regs);
454 * We got all needed information from the lowcore and can
455 * now safely switch on interrupts.
457 if (regs->psw.mask & PSW_MASK_PSTATE)
460 if (regs->psw.mask & PSW_MASK_PSTATE) {
461 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
463 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
464 if (current->ptrace & PT_PTRACED)
465 force_sig(SIGTRAP, current);
468 #ifdef CONFIG_MATHEMU
469 } else if (opcode[0] == 0xb3) {
470 if (get_user(*((__u16 *) (opcode+2)), location+1))
472 signal = math_emu_b3(opcode, regs);
473 } else if (opcode[0] == 0xed) {
474 if (get_user(*((__u32 *) (opcode+2)),
475 (__u32 __user *)(location+1)))
477 signal = math_emu_ed(opcode, regs);
478 } else if (*((__u16 *) opcode) == 0xb299) {
479 if (get_user(*((__u16 *) (opcode+2)), location+1))
481 signal = math_emu_srnm(opcode, regs);
482 } else if (*((__u16 *) opcode) == 0xb29c) {
483 if (get_user(*((__u16 *) (opcode+2)), location+1))
485 signal = math_emu_stfpc(opcode, regs);
486 } else if (*((__u16 *) opcode) == 0xb29d) {
487 if (get_user(*((__u16 *) (opcode+2)), location+1))
489 signal = math_emu_lfpc(opcode, regs);
495 * If we get an illegal op in kernel mode, send it through the
496 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
498 if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
499 3, SIGTRAP) != NOTIFY_STOP)
503 #ifdef CONFIG_MATHEMU
504 if (signal == SIGFPE)
505 do_fp_trap(regs, location,
506 current->thread.fp_regs.fpc, interruption_code);
507 else if (signal == SIGSEGV) {
508 info.si_signo = signal;
510 info.si_code = SEGV_MAPERR;
511 info.si_addr = (void __user *) location;
512 do_trap(interruption_code, signal,
513 "user address fault", regs, &info);
517 info.si_signo = signal;
519 info.si_code = ILL_ILLOPC;
520 info.si_addr = (void __user *) location;
521 do_trap(interruption_code, signal,
522 "illegal operation", regs, &info);
527 #ifdef CONFIG_MATHEMU
529 specification_exception(struct pt_regs * regs, long interruption_code)
532 __u16 __user *location = NULL;
535 location = (__u16 __user *) get_check_address(regs);
538 * We got all needed information from the lowcore and can
539 * now safely switch on interrupts.
541 if (regs->psw.mask & PSW_MASK_PSTATE)
544 if (regs->psw.mask & PSW_MASK_PSTATE) {
545 get_user(*((__u16 *) opcode), location);
547 case 0x28: /* LDR Rx,Ry */
548 signal = math_emu_ldr(opcode);
550 case 0x38: /* LER Rx,Ry */
551 signal = math_emu_ler(opcode);
553 case 0x60: /* STD R,D(X,B) */
554 get_user(*((__u16 *) (opcode+2)), location+1);
555 signal = math_emu_std(opcode, regs);
557 case 0x68: /* LD R,D(X,B) */
558 get_user(*((__u16 *) (opcode+2)), location+1);
559 signal = math_emu_ld(opcode, regs);
561 case 0x70: /* STE R,D(X,B) */
562 get_user(*((__u16 *) (opcode+2)), location+1);
563 signal = math_emu_ste(opcode, regs);
565 case 0x78: /* LE R,D(X,B) */
566 get_user(*((__u16 *) (opcode+2)), location+1);
567 signal = math_emu_le(opcode, regs);
576 if (signal == SIGFPE)
577 do_fp_trap(regs, location,
578 current->thread.fp_regs.fpc, interruption_code);
581 info.si_signo = signal;
583 info.si_code = ILL_ILLOPN;
584 info.si_addr = location;
585 do_trap(interruption_code, signal,
586 "specification exception", regs, &info);
590 DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
591 ILL_ILLOPN, get_check_address(regs));
594 static void data_exception(struct pt_regs * regs, long interruption_code)
596 __u16 __user *location;
599 location = get_check_address(regs);
602 * We got all needed information from the lowcore and can
603 * now safely switch on interrupts.
605 if (regs->psw.mask & PSW_MASK_PSTATE)
608 if (MACHINE_HAS_IEEE)
609 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
611 #ifdef CONFIG_MATHEMU
612 else if (regs->psw.mask & PSW_MASK_PSTATE) {
614 get_user(*((__u16 *) opcode), location);
616 case 0x28: /* LDR Rx,Ry */
617 signal = math_emu_ldr(opcode);
619 case 0x38: /* LER Rx,Ry */
620 signal = math_emu_ler(opcode);
622 case 0x60: /* STD R,D(X,B) */
623 get_user(*((__u16 *) (opcode+2)), location+1);
624 signal = math_emu_std(opcode, regs);
626 case 0x68: /* LD R,D(X,B) */
627 get_user(*((__u16 *) (opcode+2)), location+1);
628 signal = math_emu_ld(opcode, regs);
630 case 0x70: /* STE R,D(X,B) */
631 get_user(*((__u16 *) (opcode+2)), location+1);
632 signal = math_emu_ste(opcode, regs);
634 case 0x78: /* LE R,D(X,B) */
635 get_user(*((__u16 *) (opcode+2)), location+1);
636 signal = math_emu_le(opcode, regs);
639 get_user(*((__u16 *) (opcode+2)), location+1);
640 signal = math_emu_b3(opcode, regs);
643 get_user(*((__u32 *) (opcode+2)),
644 (__u32 __user *)(location+1));
645 signal = math_emu_ed(opcode, regs);
648 if (opcode[1] == 0x99) {
649 get_user(*((__u16 *) (opcode+2)), location+1);
650 signal = math_emu_srnm(opcode, regs);
651 } else if (opcode[1] == 0x9c) {
652 get_user(*((__u16 *) (opcode+2)), location+1);
653 signal = math_emu_stfpc(opcode, regs);
654 } else if (opcode[1] == 0x9d) {
655 get_user(*((__u16 *) (opcode+2)), location+1);
656 signal = math_emu_lfpc(opcode, regs);
666 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
670 if (signal == SIGFPE)
671 do_fp_trap(regs, location,
672 current->thread.fp_regs.fpc, interruption_code);
675 info.si_signo = signal;
677 info.si_code = ILL_ILLOPN;
678 info.si_addr = location;
679 do_trap(interruption_code, signal,
680 "data exception", regs, &info);
684 static void space_switch_exception(struct pt_regs * regs, long int_code)
688 /* Set user psw back to home space mode. */
689 if (regs->psw.mask & PSW_MASK_PSTATE)
690 regs->psw.mask |= PSW_ASC_HOME;
692 info.si_signo = SIGILL;
694 info.si_code = ILL_PRVOPC;
695 info.si_addr = get_check_address(regs);
696 do_trap(int_code, SIGILL, "space switch event", regs, &info);
699 asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
702 printk("Kernel stack overflow.\n");
705 panic("Corrupt kernel stack, can't continue.");
708 /* init is done in lowcore.S and head.S */
710 void __init trap_init(void)
714 for (i = 0; i < 128; i++)
715 pgm_check_table[i] = &default_trap_handler;
716 pgm_check_table[1] = &illegal_op;
717 pgm_check_table[2] = &privileged_op;
718 pgm_check_table[3] = &execute_exception;
719 pgm_check_table[4] = &do_protection_exception;
720 pgm_check_table[5] = &addressing_exception;
721 pgm_check_table[6] = &specification_exception;
722 pgm_check_table[7] = &data_exception;
723 pgm_check_table[8] = &overflow_exception;
724 pgm_check_table[9] = ÷_exception;
725 pgm_check_table[0x0A] = &overflow_exception;
726 pgm_check_table[0x0B] = ÷_exception;
727 pgm_check_table[0x0C] = &hfp_overflow_exception;
728 pgm_check_table[0x0D] = &hfp_underflow_exception;
729 pgm_check_table[0x0E] = &hfp_significance_exception;
730 pgm_check_table[0x0F] = &hfp_divide_exception;
731 pgm_check_table[0x10] = &do_dat_exception;
732 pgm_check_table[0x11] = &do_dat_exception;
733 pgm_check_table[0x12] = &translation_exception;
734 pgm_check_table[0x13] = &special_op_exception;
736 pgm_check_table[0x38] = &do_dat_exception;
737 pgm_check_table[0x39] = &do_dat_exception;
738 pgm_check_table[0x3A] = &do_dat_exception;
739 pgm_check_table[0x3B] = &do_dat_exception;
740 #endif /* CONFIG_64BIT */
741 pgm_check_table[0x15] = &operand_exception;
742 pgm_check_table[0x1C] = &space_switch_exception;
743 pgm_check_table[0x1D] = &hfp_sqrt_exception;
744 pgm_check_table[0x40] = &do_monitor_call;