2 * linux/arch/x86-64/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/kallsyms.h>
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/nmi.h>
30 #include <linux/kprobes.h>
31 #include <linux/kexec.h>
32 #include <linux/unwind.h>
33 #include <linux/uaccess.h>
34 #include <linux/bug.h>
35 #include <linux/kdebug.h>
37 #include <asm/system.h>
39 #include <asm/atomic.h>
40 #include <asm/debugreg.h>
43 #include <asm/processor.h>
44 #include <asm/unwind.h>
46 #include <asm/pgalloc.h>
48 #include <asm/proto.h>
50 #include <asm/stacktrace.h>
52 asmlinkage void divide_error(void);
53 asmlinkage void debug(void);
54 asmlinkage void nmi(void);
55 asmlinkage void int3(void);
56 asmlinkage void overflow(void);
57 asmlinkage void bounds(void);
58 asmlinkage void invalid_op(void);
59 asmlinkage void device_not_available(void);
60 asmlinkage void double_fault(void);
61 asmlinkage void coprocessor_segment_overrun(void);
62 asmlinkage void invalid_TSS(void);
63 asmlinkage void segment_not_present(void);
64 asmlinkage void stack_segment(void);
65 asmlinkage void general_protection(void);
66 asmlinkage void page_fault(void);
67 asmlinkage void coprocessor_error(void);
68 asmlinkage void simd_coprocessor_error(void);
69 asmlinkage void reserved(void);
70 asmlinkage void alignment_check(void);
71 asmlinkage void machine_check(void);
72 asmlinkage void spurious_interrupt_bug(void);
74 static inline void conditional_sti(struct pt_regs *regs)
76 if (regs->eflags & X86_EFLAGS_IF)
80 static inline void preempt_conditional_sti(struct pt_regs *regs)
83 if (regs->eflags & X86_EFLAGS_IF)
87 static inline void preempt_conditional_cli(struct pt_regs *regs)
89 if (regs->eflags & X86_EFLAGS_IF)
91 /* Make sure to not schedule here because we could be running
92 on an exception stack. */
93 preempt_enable_no_resched();
96 int kstack_depth_to_print = 12;
98 #ifdef CONFIG_KALLSYMS
99 void printk_address(unsigned long address)
101 unsigned long offset = 0, symsize;
107 symname = kallsyms_lookup(address, &symsize, &offset,
110 printk(" [<%016lx>]\n", address);
114 modname = delim = "";
115 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
116 address, delim, modname, delim, symname, offset, symsize);
119 void printk_address(unsigned long address)
121 printk(" [<%016lx>]\n", address);
125 static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
126 unsigned *usedp, char **idp)
128 static char ids[][8] = {
129 [DEBUG_STACK - 1] = "#DB",
130 [NMI_STACK - 1] = "NMI",
131 [DOUBLEFAULT_STACK - 1] = "#DF",
132 [STACKFAULT_STACK - 1] = "#SS",
133 [MCE_STACK - 1] = "#MC",
134 #if DEBUG_STKSZ > EXCEPTION_STKSZ
135 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
141 * Iterate over all exception stacks, and figure out whether
142 * 'stack' is in one of them:
144 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
145 unsigned long end = per_cpu(orig_ist, cpu).ist[k];
147 * Is 'stack' above this exception frame's end?
148 * If yes then skip to the next frame.
153 * Is 'stack' above this exception frame's start address?
154 * If yes then we found the right frame.
156 if (stack >= end - EXCEPTION_STKSZ) {
158 * Make sure we only iterate through an exception
159 * stack once. If it comes up for the second time
160 * then there's something wrong going on - just
161 * break out and return NULL:
163 if (*usedp & (1U << k))
167 return (unsigned long *)end;
170 * If this is a debug stack, and if it has a larger size than
171 * the usual exception stacks, then 'stack' might still
172 * be within the lower portion of the debug stack:
174 #if DEBUG_STKSZ > EXCEPTION_STKSZ
175 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
176 unsigned j = N_EXCEPTION_STACKS - 1;
179 * Black magic. A large debug stack is composed of
180 * multiple exception stack entries, which we
181 * iterate through now. Dont look:
185 end -= EXCEPTION_STKSZ;
186 ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
187 } while (stack < end - EXCEPTION_STKSZ);
188 if (*usedp & (1U << j))
192 return (unsigned long *)end;
199 #define MSG(txt) ops->warning(data, txt)
202 * x86-64 can have upto three kernel stacks:
205 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
208 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
210 void *t = (void *)tinfo;
211 return p > t && p < t + THREAD_SIZE - 3;
214 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
215 unsigned long *stack,
216 struct stacktrace_ops *ops, void *data)
218 const unsigned cpu = get_cpu();
219 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
221 struct thread_info *tinfo;
229 if (tsk && tsk != current)
230 stack = (unsigned long *)tsk->thread.rsp;
234 * Print function call entries within a stack. 'cond' is the
235 * "end of stackframe" condition, that the 'stack++'
236 * iteration will eventually trigger.
238 #define HANDLE_STACK(cond) \
240 unsigned long addr = *stack++; \
241 /* Use unlocked access here because except for NMIs \
242 we should be already protected against module unloads */ \
243 if (__kernel_text_address(addr)) { \
245 * If the address is either in the text segment of the \
246 * kernel, or in the region which contains vmalloc'ed \
247 * memory, it *may* be the address of a calling \
248 * routine; if so, print it so that someone tracing \
249 * down the cause of the crash will be able to figure \
250 * out the call path that was taken. \
252 ops->address(data, addr); \
257 * Print function call entries in all stacks, starting at the
258 * current stack address. If the stacks consist of nested
263 unsigned long *estack_end;
264 estack_end = in_exception_stack(cpu, (unsigned long)stack,
268 if (ops->stack(data, id) < 0)
270 HANDLE_STACK (stack < estack_end);
271 ops->stack(data, "<EOE>");
273 * We link to the next stack via the
274 * second-to-last pointer (index -2 to end) in the
277 stack = (unsigned long *) estack_end[-2];
281 unsigned long *irqstack;
282 irqstack = irqstack_end -
283 (IRQSTACKSIZE - 64) / sizeof(*irqstack);
285 if (stack >= irqstack && stack < irqstack_end) {
286 if (ops->stack(data, "IRQ") < 0)
288 HANDLE_STACK (stack < irqstack_end);
290 * We link to the next stack (which would be
291 * the process stack normally) the last
292 * pointer (index -1 to end) in the IRQ stack:
294 stack = (unsigned long *) (irqstack_end[-1]);
296 ops->stack(data, "EOI");
304 * This handles the process stack:
306 tinfo = task_thread_info(tsk);
307 HANDLE_STACK (valid_stack_ptr(tinfo, stack));
311 EXPORT_SYMBOL(dump_trace);
314 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
316 print_symbol(msg, symbol);
320 static void print_trace_warning(void *data, char *msg)
325 static int print_trace_stack(void *data, char *name)
327 printk(" <%s> ", name);
331 static void print_trace_address(void *data, unsigned long addr)
333 printk_address(addr);
336 static struct stacktrace_ops print_trace_ops = {
337 .warning = print_trace_warning,
338 .warning_symbol = print_trace_warning_symbol,
339 .stack = print_trace_stack,
340 .address = print_trace_address,
344 show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
346 printk("\nCall Trace:\n");
347 dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
352 _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
354 unsigned long *stack;
356 const int cpu = smp_processor_id();
357 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
358 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
360 // debugging aid: "show_stack(NULL, NULL);" prints the
361 // back trace for this cpu.
365 rsp = (unsigned long *)tsk->thread.rsp;
367 rsp = (unsigned long *)&rsp;
371 for(i=0; i < kstack_depth_to_print; i++) {
372 if (stack >= irqstack && stack <= irqstack_end) {
373 if (stack == irqstack_end) {
374 stack = (unsigned long *) (irqstack_end[-1]);
378 if (((long) stack & (THREAD_SIZE-1)) == 0)
381 if (i && ((i % 4) == 0))
383 printk(" %016lx", *stack++);
384 touch_nmi_watchdog();
386 show_trace(tsk, regs, rsp);
389 void show_stack(struct task_struct *tsk, unsigned long * rsp)
391 _show_stack(tsk, NULL, rsp);
395 * The architecture-independent dump_stack generator
397 void dump_stack(void)
400 show_trace(NULL, NULL, &dummy);
403 EXPORT_SYMBOL(dump_stack);
405 void show_registers(struct pt_regs *regs)
408 int in_kernel = !user_mode(regs);
410 const int cpu = smp_processor_id();
411 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
414 printk("CPU %d ", cpu);
416 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
417 cur->comm, cur->pid, task_thread_info(cur), cur);
420 * When in-kernel, we also print out the stack and code at the
421 * time of the fault..
425 _show_stack(NULL, regs, (unsigned long*)rsp);
428 if (regs->rip < PAGE_OFFSET)
431 for (i=0; i<20; i++) {
433 if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
435 printk(" Bad RIP value.");
444 int is_valid_bugaddr(unsigned long rip)
448 if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
451 return ud2 == 0x0b0f;
455 void out_of_line_bug(void)
459 EXPORT_SYMBOL(out_of_line_bug);
462 static DEFINE_SPINLOCK(die_lock);
463 static int die_owner = -1;
464 static unsigned int die_nest_count;
466 unsigned __kprobes long oops_begin(void)
468 int cpu = smp_processor_id();
473 /* racy, but better than risking deadlock. */
474 local_irq_save(flags);
475 if (!spin_trylock(&die_lock)) {
476 if (cpu == die_owner)
477 /* nested oops. should stop eventually */;
479 spin_lock(&die_lock);
488 void __kprobes oops_end(unsigned long flags)
494 /* We still own the lock */
495 local_irq_restore(flags);
497 /* Nest count reaches zero, release the lock. */
498 spin_unlock_irqrestore(&die_lock, flags);
500 panic("Fatal exception");
504 void __kprobes __die(const char * str, struct pt_regs * regs, long err)
506 static int die_counter;
507 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
508 #ifdef CONFIG_PREEMPT
514 #ifdef CONFIG_DEBUG_PAGEALLOC
515 printk("DEBUG_PAGEALLOC");
518 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
519 show_registers(regs);
520 /* Executive summary in case the oops scrolled away */
521 printk(KERN_ALERT "RIP ");
522 printk_address(regs->rip);
523 printk(" RSP <%016lx>\n", regs->rsp);
524 if (kexec_should_crash(current))
528 void die(const char * str, struct pt_regs * regs, long err)
530 unsigned long flags = oops_begin();
532 if (!user_mode(regs))
533 report_bug(regs->rip);
535 __die(str, regs, err);
540 void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
542 unsigned long flags = oops_begin();
545 * We are in trouble anyway, lets at least try
546 * to get a message out.
548 printk(str, smp_processor_id());
549 show_registers(regs);
550 if (kexec_should_crash(current))
552 if (do_panic || panic_on_oops)
553 panic("Non maskable interrupt");
560 static void __kprobes do_trap(int trapnr, int signr, char *str,
561 struct pt_regs * regs, long error_code,
564 struct task_struct *tsk = current;
566 if (user_mode(regs)) {
568 * We want error_code and trap_no set for userspace
569 * faults and kernelspace faults which result in
570 * die(), but not kernelspace faults which are fixed
571 * up. die() gives the process no chance to handle
572 * the signal and notice the kernel fault information,
573 * so that won't result in polluting the information
574 * about previously queued, but not yet delivered,
575 * faults. See also do_general_protection below.
577 tsk->thread.error_code = error_code;
578 tsk->thread.trap_no = trapnr;
580 if (exception_trace && unhandled_signal(tsk, signr))
582 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
583 tsk->comm, tsk->pid, str,
584 regs->rip, regs->rsp, error_code);
587 force_sig_info(signr, info, tsk);
589 force_sig(signr, tsk);
596 const struct exception_table_entry *fixup;
597 fixup = search_exception_tables(regs->rip);
599 regs->rip = fixup->fixup;
601 tsk->thread.error_code = error_code;
602 tsk->thread.trap_no = trapnr;
603 die(str, regs, error_code);
609 #define DO_ERROR(trapnr, signr, str, name) \
610 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
612 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
615 conditional_sti(regs); \
616 do_trap(trapnr, signr, str, regs, error_code, NULL); \
619 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
620 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
623 info.si_signo = signr; \
625 info.si_code = sicode; \
626 info.si_addr = (void __user *)siaddr; \
627 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
630 conditional_sti(regs); \
631 do_trap(trapnr, signr, str, regs, error_code, &info); \
634 DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
635 DO_ERROR( 4, SIGSEGV, "overflow", overflow)
636 DO_ERROR( 5, SIGSEGV, "bounds", bounds)
637 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
638 DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
639 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
640 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
641 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
642 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
643 DO_ERROR(18, SIGSEGV, "reserved", reserved)
645 /* Runs on IST stack */
646 asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
648 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
649 12, SIGBUS) == NOTIFY_STOP)
651 preempt_conditional_sti(regs);
652 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
653 preempt_conditional_cli(regs);
656 asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
658 static const char str[] = "double fault";
659 struct task_struct *tsk = current;
661 /* Return not checked because double check cannot be ignored */
662 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
664 tsk->thread.error_code = error_code;
665 tsk->thread.trap_no = 8;
667 /* This is always a kernel trap and never fixable (and thus must
670 die(str, regs, error_code);
673 asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
676 struct task_struct *tsk = current;
678 conditional_sti(regs);
680 if (user_mode(regs)) {
681 tsk->thread.error_code = error_code;
682 tsk->thread.trap_no = 13;
684 if (exception_trace && unhandled_signal(tsk, SIGSEGV))
686 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
688 regs->rip, regs->rsp, error_code);
690 force_sig(SIGSEGV, tsk);
696 const struct exception_table_entry *fixup;
697 fixup = search_exception_tables(regs->rip);
699 regs->rip = fixup->fixup;
703 tsk->thread.error_code = error_code;
704 tsk->thread.trap_no = 13;
705 if (notify_die(DIE_GPF, "general protection fault", regs,
706 error_code, 13, SIGSEGV) == NOTIFY_STOP)
708 die("general protection fault", regs, error_code);
712 static __kprobes void
713 mem_parity_error(unsigned char reason, struct pt_regs * regs)
715 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
717 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
719 if (panic_on_unrecovered_nmi)
720 panic("NMI: Not continuing");
722 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
724 /* Clear and disable the memory parity error line. */
725 reason = (reason & 0xf) | 4;
729 static __kprobes void
730 io_check_error(unsigned char reason, struct pt_regs * regs)
732 printk("NMI: IOCK error (debug interrupt?)\n");
733 show_registers(regs);
735 /* Re-enable the IOCK line, wait for a few seconds */
736 reason = (reason & 0xf) | 8;
743 static __kprobes void
744 unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
746 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
748 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
750 if (panic_on_unrecovered_nmi)
751 panic("NMI: Not continuing");
753 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
756 /* Runs on IST stack. This code must keep interrupts off all the time.
757 Nested NMIs are prevented by the CPU. */
758 asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
760 unsigned char reason = 0;
763 cpu = smp_processor_id();
765 /* Only the BSP gets external NMIs from the system. */
767 reason = get_nmi_reason();
769 if (!(reason & 0xc0)) {
770 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
774 * Ok, so this is none of the documented NMI sources,
775 * so it must be the NMI watchdog.
777 if (nmi_watchdog_tick(regs,reason))
779 if (notify_die(DIE_NMI_POST, "nmi_post", regs, reason, 2, 0)
781 if (!do_nmi_callback(regs,cpu))
782 unknown_nmi_error(reason, regs);
786 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
789 /* AK: following checks seem to be broken on modern chipsets. FIXME */
792 mem_parity_error(reason, regs);
794 io_check_error(reason, regs);
797 /* runs on IST stack. */
798 asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
800 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
803 preempt_conditional_sti(regs);
804 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
805 preempt_conditional_cli(regs);
808 /* Help handler running on IST stack to switch back to user stack
809 for scheduling or signal handling. The actual stack switch is done in
811 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
813 struct pt_regs *regs = eregs;
814 /* Did already sync */
815 if (eregs == (struct pt_regs *)eregs->rsp)
817 /* Exception from user space */
818 else if (user_mode(eregs))
819 regs = task_pt_regs(current);
820 /* Exception from kernel and interrupts are enabled. Move to
821 kernel process stack. */
822 else if (eregs->eflags & X86_EFLAGS_IF)
823 regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
829 /* runs on IST stack. */
830 asmlinkage void __kprobes do_debug(struct pt_regs * regs,
831 unsigned long error_code)
833 unsigned long condition;
834 struct task_struct *tsk = current;
837 get_debugreg(condition, 6);
839 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
840 SIGTRAP) == NOTIFY_STOP)
843 preempt_conditional_sti(regs);
845 /* Mask out spurious debug traps due to lazy DR7 setting */
846 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
847 if (!tsk->thread.debugreg7) {
852 tsk->thread.debugreg6 = condition;
854 /* Mask out spurious TF errors due to lazy TF clearing */
855 if (condition & DR_STEP) {
857 * The TF error should be masked out only if the current
858 * process is not traced and if the TRAP flag has been set
859 * previously by a tracing process (condition detected by
860 * the PT_DTRACE flag); remember that the i386 TRAP flag
861 * can be modified by the process itself in user mode,
862 * allowing programs to debug themselves without the ptrace()
865 if (!user_mode(regs))
866 goto clear_TF_reenable;
868 * Was the TF flag set by a debugger? If so, clear it now,
869 * so that register information is correct.
871 if (tsk->ptrace & PT_DTRACE) {
872 regs->eflags &= ~TF_MASK;
873 tsk->ptrace &= ~PT_DTRACE;
877 /* Ok, finally something we can handle */
878 tsk->thread.trap_no = 1;
879 tsk->thread.error_code = error_code;
880 info.si_signo = SIGTRAP;
882 info.si_code = TRAP_BRKPT;
883 info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
884 force_sig_info(SIGTRAP, &info, tsk);
887 set_debugreg(0UL, 7);
888 preempt_conditional_cli(regs);
892 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
893 regs->eflags &= ~TF_MASK;
894 preempt_conditional_cli(regs);
897 static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
899 const struct exception_table_entry *fixup;
900 fixup = search_exception_tables(regs->rip);
902 regs->rip = fixup->fixup;
905 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
906 /* Illegal floating point operation in the kernel */
907 current->thread.trap_no = trapnr;
913 * Note that we play around with the 'TS' bit in an attempt to get
914 * the correct behaviour even in the presence of the asynchronous
917 asmlinkage void do_coprocessor_error(struct pt_regs *regs)
919 void __user *rip = (void __user *)(regs->rip);
920 struct task_struct * task;
922 unsigned short cwd, swd;
924 conditional_sti(regs);
925 if (!user_mode(regs) &&
926 kernel_math_error(regs, "kernel x87 math error", 16))
930 * Save the info for the exception handler and clear the error.
934 task->thread.trap_no = 16;
935 task->thread.error_code = 0;
936 info.si_signo = SIGFPE;
938 info.si_code = __SI_FAULT;
941 * (~cwd & swd) will mask out exceptions that are not set to unmasked
942 * status. 0x3f is the exception bits in these regs, 0x200 is the
943 * C1 reg you need in case of a stack fault, 0x040 is the stack
944 * fault bit. We should only be taking one exception at a time,
945 * so if this combination doesn't produce any single exception,
946 * then we have a bad program that isn't synchronizing its FPU usage
947 * and it will suffer the consequences since we won't be able to
948 * fully reproduce the context of the exception
950 cwd = get_fpu_cwd(task);
951 swd = get_fpu_swd(task);
952 switch (swd & ~cwd & 0x3f) {
956 case 0x001: /* Invalid Op */
958 * swd & 0x240 == 0x040: Stack Underflow
959 * swd & 0x240 == 0x240: Stack Overflow
960 * User must clear the SF bit (0x40) if set
962 info.si_code = FPE_FLTINV;
964 case 0x002: /* Denormalize */
965 case 0x010: /* Underflow */
966 info.si_code = FPE_FLTUND;
968 case 0x004: /* Zero Divide */
969 info.si_code = FPE_FLTDIV;
971 case 0x008: /* Overflow */
972 info.si_code = FPE_FLTOVF;
974 case 0x020: /* Precision */
975 info.si_code = FPE_FLTRES;
978 force_sig_info(SIGFPE, &info, task);
981 asmlinkage void bad_intr(void)
983 printk("bad interrupt");
986 asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
988 void __user *rip = (void __user *)(regs->rip);
989 struct task_struct * task;
991 unsigned short mxcsr;
993 conditional_sti(regs);
994 if (!user_mode(regs) &&
995 kernel_math_error(regs, "kernel simd math error", 19))
999 * Save the info for the exception handler and clear the error.
1002 save_init_fpu(task);
1003 task->thread.trap_no = 19;
1004 task->thread.error_code = 0;
1005 info.si_signo = SIGFPE;
1007 info.si_code = __SI_FAULT;
1010 * The SIMD FPU exceptions are handled a little differently, as there
1011 * is only a single status/control register. Thus, to determine which
1012 * unmasked exception was caught we must mask the exception mask bits
1013 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1015 mxcsr = get_fpu_mxcsr(task);
1016 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1020 case 0x001: /* Invalid Op */
1021 info.si_code = FPE_FLTINV;
1023 case 0x002: /* Denormalize */
1024 case 0x010: /* Underflow */
1025 info.si_code = FPE_FLTUND;
1027 case 0x004: /* Zero Divide */
1028 info.si_code = FPE_FLTDIV;
1030 case 0x008: /* Overflow */
1031 info.si_code = FPE_FLTOVF;
1033 case 0x020: /* Precision */
1034 info.si_code = FPE_FLTRES;
1037 force_sig_info(SIGFPE, &info, task);
1040 asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
1044 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
1048 asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
1053 * 'math_state_restore()' saves the current math information in the
1054 * old math state array, and gets the new ones from the current task
1056 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1057 * Don't touch unless you *really* know how it works.
1059 asmlinkage void math_state_restore(void)
1061 struct task_struct *me = current;
1062 clts(); /* Allow maths ops (or we recurse) */
1066 restore_fpu_checking(&me->thread.i387.fxsave);
1067 task_thread_info(me)->status |= TS_USEDFPU;
1071 void __init trap_init(void)
1073 set_intr_gate(0,÷_error);
1074 set_intr_gate_ist(1,&debug,DEBUG_STACK);
1075 set_intr_gate_ist(2,&nmi,NMI_STACK);
1076 set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
1077 set_system_gate(4,&overflow); /* int4 can be called from all */
1078 set_intr_gate(5,&bounds);
1079 set_intr_gate(6,&invalid_op);
1080 set_intr_gate(7,&device_not_available);
1081 set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
1082 set_intr_gate(9,&coprocessor_segment_overrun);
1083 set_intr_gate(10,&invalid_TSS);
1084 set_intr_gate(11,&segment_not_present);
1085 set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
1086 set_intr_gate(13,&general_protection);
1087 set_intr_gate(14,&page_fault);
1088 set_intr_gate(15,&spurious_interrupt_bug);
1089 set_intr_gate(16,&coprocessor_error);
1090 set_intr_gate(17,&alignment_check);
1091 #ifdef CONFIG_X86_MCE
1092 set_intr_gate_ist(18,&machine_check, MCE_STACK);
1094 set_intr_gate(19,&simd_coprocessor_error);
1096 #ifdef CONFIG_IA32_EMULATION
1097 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1101 * Should be a barrier for any external CPU state.
1107 static int __init oops_setup(char *s)
1111 if (!strcmp(s, "panic"))
1115 early_param("oops", oops_setup);
1117 static int __init kstack_setup(char *s)
1121 kstack_depth_to_print = simple_strtoul(s,NULL,0);
1124 early_param("kstack", kstack_setup);