2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 * C code is not supposed to know about undefined top of stack. Every time
54 * a C function with an pt_regs argument is called from the SYSCALL based
55 * fast path FIXUP_TOP_OF_STACK is needed.
56 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
60 /* %rsp:at FRAMEEND */
61 .macro FIXUP_TOP_OF_STACK tmp
62 movq %gs:pda_oldrsp,\tmp
64 movq $__USER_DS,SS(%rsp)
65 movq $__USER_CS,CS(%rsp)
67 movq R11(%rsp),\tmp /* get eflags */
68 movq \tmp,EFLAGS(%rsp)
71 .macro RESTORE_TOP_OF_STACK tmp,offset=0
72 movq RSP-\offset(%rsp),\tmp
73 movq \tmp,%gs:pda_oldrsp
74 movq EFLAGS-\offset(%rsp),\tmp
75 movq \tmp,R11-\offset(%rsp)
78 .macro FAKE_STACK_FRAME child_rip
79 /* push in order ss, rsp, eflags, cs, rip */
82 CFI_ADJUST_CFA_OFFSET 8
83 /*CFI_REL_OFFSET ss,0*/
85 CFI_ADJUST_CFA_OFFSET 8
87 pushq $(1<<9) /* eflags - interrupts on */
88 CFI_ADJUST_CFA_OFFSET 8
89 /*CFI_REL_OFFSET rflags,0*/
90 pushq $__KERNEL_CS /* cs */
91 CFI_ADJUST_CFA_OFFSET 8
92 /*CFI_REL_OFFSET cs,0*/
93 pushq \child_rip /* rip */
94 CFI_ADJUST_CFA_OFFSET 8
96 pushq %rax /* orig rax */
97 CFI_ADJUST_CFA_OFFSET 8
100 .macro UNFAKE_STACK_FRAME
102 CFI_ADJUST_CFA_OFFSET -(6*8)
105 .macro CFI_DEFAULT_STACK start=1
110 CFI_DEF_CFA_OFFSET SS+8
112 CFI_REL_OFFSET r15,R15
113 CFI_REL_OFFSET r14,R14
114 CFI_REL_OFFSET r13,R13
115 CFI_REL_OFFSET r12,R12
116 CFI_REL_OFFSET rbp,RBP
117 CFI_REL_OFFSET rbx,RBX
118 CFI_REL_OFFSET r11,R11
119 CFI_REL_OFFSET r10,R10
122 CFI_REL_OFFSET rax,RAX
123 CFI_REL_OFFSET rcx,RCX
124 CFI_REL_OFFSET rdx,RDX
125 CFI_REL_OFFSET rsi,RSI
126 CFI_REL_OFFSET rdi,RDI
127 CFI_REL_OFFSET rip,RIP
128 /*CFI_REL_OFFSET cs,CS*/
129 /*CFI_REL_OFFSET rflags,EFLAGS*/
130 CFI_REL_OFFSET rsp,RSP
131 /*CFI_REL_OFFSET ss,SS*/
134 * A newly forked process directly context switches into this.
140 GET_THREAD_INFO(%rcx)
141 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
145 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
146 je int_ret_from_sys_call
147 testl $_TIF_IA32,threadinfo_flags(%rcx)
148 jnz int_ret_from_sys_call
149 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
150 jmp ret_from_sys_call
153 call syscall_trace_leave
154 GET_THREAD_INFO(%rcx)
159 * System call entry. Upto 6 arguments in registers are supported.
161 * SYSCALL does not save anything on the stack and does not change the
167 * rax system call number
169 * rcx return address for syscall/sysret, C arg3
172 * r10 arg3 (--> moved to rcx for C)
175 * r11 eflags for syscall/sysret, temporary for C
176 * r12-r15,rbp,rbx saved by C code, not touched.
178 * Interrupts are off on entry.
179 * Only called from user space.
181 * XXX if we had a free scratch register we could save the RSP into the stack frame
182 * and report it properly in ps. Unfortunately we haven't.
184 * When user can change the frames always force IRET. That is because
185 * it deals with uncanonical addresses better. SYSRET has trouble
186 * with them due to bugs in both AMD and Intel CPUs.
193 /*CFI_REGISTER rflags,r11*/
195 movq %rsp,%gs:pda_oldrsp
196 movq %gs:pda_kernelstack,%rsp
199 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
200 movq %rcx,RIP-ARGOFFSET(%rsp)
201 CFI_REL_OFFSET rip,RIP-ARGOFFSET
202 GET_THREAD_INFO(%rcx)
203 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
206 cmpq $__NR_syscall_max,%rax
209 call *sys_call_table(,%rax,8) # XXX: rip relative
210 movq %rax,RAX-ARGOFFSET(%rsp)
212 * Syscall return path ending with SYSRET (fast path)
213 * Has incomplete stack frame and undefined top of stack.
215 .globl ret_from_sys_call
217 movl $_TIF_ALLWORK_MASK,%edi
220 GET_THREAD_INFO(%rcx)
222 movl threadinfo_flags(%rcx),%edx
226 movq RIP-ARGOFFSET(%rsp),%rcx
228 RESTORE_ARGS 0,-ARG_SKIP,1
229 /*CFI_REGISTER rflags,r11*/
230 movq %gs:pda_oldrsp,%rsp
234 /* Handle reschedules */
235 /* edx: work, edi: workmask */
238 bt $TIF_NEED_RESCHED,%edx
242 CFI_ADJUST_CFA_OFFSET 8
245 CFI_ADJUST_CFA_OFFSET -8
248 /* Handle a signal */
251 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
254 /* Really a signal */
255 /* edx: work flags (arg3) */
256 leaq do_notify_resume(%rip),%rax
257 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
258 xorl %esi,%esi # oldset -> arg2
259 call ptregscall_common
260 1: movl $_TIF_NEED_RESCHED,%edi
261 /* Use IRET because user could have changed frame. This
262 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
267 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
268 jmp ret_from_sys_call
270 /* Do syscall tracing */
274 movq $-ENOSYS,RAX(%rsp)
275 FIXUP_TOP_OF_STACK %rdi
277 call syscall_trace_enter
278 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
280 cmpq $__NR_syscall_max,%rax
282 movq %r10,%rcx /* fixup for C */
283 call *sys_call_table(,%rax,8)
284 movq %rax,RAX-ARGOFFSET(%rsp)
287 call syscall_trace_leave
288 RESTORE_TOP_OF_STACK %rbx
290 /* Use IRET because user could have changed frame */
291 jmp int_ret_from_sys_call
295 * Syscall return path ending with IRET.
296 * Has correct top of stack, but partial stack frame.
298 ENTRY(int_ret_from_sys_call)
300 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
301 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
302 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
303 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
304 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
305 CFI_REL_OFFSET rip,RIP-ARGOFFSET
306 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
307 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
308 CFI_REL_OFFSET rax,RAX-ARGOFFSET
309 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
310 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
311 CFI_REL_OFFSET r8,R8-ARGOFFSET
312 CFI_REL_OFFSET r9,R9-ARGOFFSET
313 CFI_REL_OFFSET r10,R10-ARGOFFSET
314 CFI_REL_OFFSET r11,R11-ARGOFFSET
316 testl $3,CS-ARGOFFSET(%rsp)
317 je retint_restore_args
318 movl $_TIF_ALLWORK_MASK,%edi
319 /* edi: mask to check */
321 GET_THREAD_INFO(%rcx)
322 movl threadinfo_flags(%rcx),%edx
325 andl $~TS_COMPAT,threadinfo_status(%rcx)
328 /* Either reschedule or signal or syscall exit tracking needed. */
329 /* First do a reschedule test. */
330 /* edx: work, edi: workmask */
332 bt $TIF_NEED_RESCHED,%edx
336 CFI_ADJUST_CFA_OFFSET 8
339 CFI_ADJUST_CFA_OFFSET -8
343 /* handle signals and tracing -- both require a full stack frame */
347 /* Check for syscall exit trace */
348 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
351 CFI_ADJUST_CFA_OFFSET 8
352 leaq 8(%rsp),%rdi # &ptregs -> arg1
353 call syscall_trace_leave
355 CFI_ADJUST_CFA_OFFSET -8
356 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
361 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
363 movq %rsp,%rdi # &ptregs -> arg1
364 xorl %esi,%esi # oldset -> arg2
365 call do_notify_resume
366 1: movl $_TIF_NEED_RESCHED,%edi
374 * Certain special system calls that need to save a complete full stack frame.
377 .macro PTREGSCALL label,func,arg
380 leaq \func(%rip),%rax
381 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
382 jmp ptregscall_common
387 PTREGSCALL stub_clone, sys_clone, %r8
388 PTREGSCALL stub_fork, sys_fork, %rdi
389 PTREGSCALL stub_vfork, sys_vfork, %rdi
390 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
391 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
392 PTREGSCALL stub_iopl, sys_iopl, %rsi
394 ENTRY(ptregscall_common)
396 CFI_ADJUST_CFA_OFFSET -8
397 CFI_REGISTER rip, r11
400 CFI_REGISTER rip, r15
401 FIXUP_TOP_OF_STACK %r11
403 RESTORE_TOP_OF_STACK %r11
405 CFI_REGISTER rip, r11
408 CFI_ADJUST_CFA_OFFSET 8
409 CFI_REL_OFFSET rip, 0
416 CFI_ADJUST_CFA_OFFSET -8
417 CFI_REGISTER rip, r11
419 FIXUP_TOP_OF_STACK %r11
421 RESTORE_TOP_OF_STACK %r11
424 jmp int_ret_from_sys_call
428 * sigreturn is special because it needs to restore all registers on return.
429 * This cannot be done with SYSRET, so use the IRET return path instead.
431 ENTRY(stub_rt_sigreturn)
434 CFI_ADJUST_CFA_OFFSET -8
437 FIXUP_TOP_OF_STACK %r11
438 call sys_rt_sigreturn
439 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
441 jmp int_ret_from_sys_call
445 * initial frame state for interrupts and exceptions
449 CFI_DEF_CFA rsp,SS+8-\ref
450 /*CFI_REL_OFFSET ss,SS-\ref*/
451 CFI_REL_OFFSET rsp,RSP-\ref
452 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
453 /*CFI_REL_OFFSET cs,CS-\ref*/
454 CFI_REL_OFFSET rip,RIP-\ref
457 /* initial frame state for interrupts (and exceptions without error code) */
458 #define INTR_FRAME _frame RIP
459 /* initial frame state for exceptions with error code (and interrupts with
460 vector already pushed) */
461 #define XCPT_FRAME _frame ORIG_RAX
464 * Interrupt entry/exit.
466 * Interrupt entry points save only callee clobbered registers in fast path.
468 * Entry runs with interrupts off.
471 /* 0(%rsp): interrupt number */
472 .macro interrupt func
474 #ifdef CONFIG_DEBUG_INFO
478 * Setup a stack frame pointer. This allows gdb to trace
479 * back to the original stack.
482 CFI_DEF_CFA_REGISTER rbp
485 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
490 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
491 movq %gs:pda_irqstackptr,%rax
492 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
493 pushq %rdi # save old stack
494 #ifndef CONFIG_DEBUG_INFO
495 CFI_ADJUST_CFA_OFFSET 8
500 ENTRY(common_interrupt)
503 /* 0(%rsp): oldrsp-ARGOFFSET */
506 #ifndef CONFIG_DEBUG_INFO
507 CFI_ADJUST_CFA_OFFSET -8
510 decl %gs:pda_irqcount
511 #ifdef CONFIG_DEBUG_INFO
513 CFI_DEF_CFA_REGISTER rsp
515 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
517 GET_THREAD_INFO(%rcx)
518 testl $3,CS-ARGOFFSET(%rsp)
521 /* Interrupt came from user space */
523 * Has a correct top of stack, but a partial stack frame
524 * %rcx: thread info. Interrupts off.
526 retint_with_reschedule:
527 movl $_TIF_WORK_MASK,%edi
529 movl threadinfo_flags(%rcx),%edx
541 .section __ex_table,"a"
542 .quad iret_label,bad_iret
545 /* force a signal here? this matches i386 behaviour */
546 /* running with kernel gs */
548 movq $11,%rdi /* SIGSEGV */
553 /* edi: workmask, edx: work */
556 bt $TIF_NEED_RESCHED,%edx
560 CFI_ADJUST_CFA_OFFSET 8
563 CFI_ADJUST_CFA_OFFSET -8
564 GET_THREAD_INFO(%rcx)
569 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
573 movq $-1,ORIG_RAX(%rsp)
574 xorl %esi,%esi # oldset
575 movq %rsp,%rdi # &pt_regs
576 call do_notify_resume
579 movl $_TIF_NEED_RESCHED,%edi
580 GET_THREAD_INFO(%rcx)
583 #ifdef CONFIG_PREEMPT
584 /* Returning to kernel space. Check if we need preemption */
585 /* rcx: threadinfo. interrupts off. */
588 cmpl $0,threadinfo_preempt_count(%rcx)
589 jnz retint_restore_args
590 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
591 jnc retint_restore_args
592 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
593 jnc retint_restore_args
594 call preempt_schedule_irq
602 .macro apicinterrupt num,func
605 CFI_ADJUST_CFA_OFFSET 8
611 ENTRY(thermal_interrupt)
612 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
614 ENTRY(threshold_interrupt)
615 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
618 ENTRY(reschedule_interrupt)
619 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
621 .macro INVALIDATE_ENTRY num
622 ENTRY(invalidate_interrupt\num)
623 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
635 ENTRY(call_function_interrupt)
636 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
639 #ifdef CONFIG_X86_LOCAL_APIC
640 ENTRY(apic_timer_interrupt)
641 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
643 ENTRY(error_interrupt)
644 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
646 ENTRY(spurious_interrupt)
647 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
651 * Exception entry points.
655 pushq $0 /* push error code/oldrax */
656 CFI_ADJUST_CFA_OFFSET 8
657 pushq %rax /* push real oldrax to the rdi slot */
658 CFI_ADJUST_CFA_OFFSET 8
664 .macro errorentry sym
667 CFI_ADJUST_CFA_OFFSET 8
673 /* error code is on the stack already */
674 /* handle NMI like exceptions that can happen everywhere */
675 .macro paranoidentry sym, ist=0
679 movl $MSR_GS_BASE,%ecx
687 movq %gs:pda_data_offset, %rbp
690 movq ORIG_RAX(%rsp),%rsi
691 movq $-1,ORIG_RAX(%rsp)
693 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
697 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
703 * Exception entry point. This expects an error code/orig_rax on the stack
704 * and the exception handler in %rax.
708 /* rdi slot contains rax, oldrax contains error code */
711 CFI_ADJUST_CFA_OFFSET (14*8)
713 CFI_REL_OFFSET rsi,RSI
714 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
716 CFI_REL_OFFSET rdx,RDX
718 CFI_REL_OFFSET rcx,RCX
719 movq %rsi,10*8(%rsp) /* store rax */
720 CFI_REL_OFFSET rax,RAX
726 CFI_REL_OFFSET r10,R10
728 CFI_REL_OFFSET r11,R11
730 CFI_REL_OFFSET rbx,RBX
732 CFI_REL_OFFSET rbp,RBP
734 CFI_REL_OFFSET r12,R12
736 CFI_REL_OFFSET r13,R13
738 CFI_REL_OFFSET r14,R14
740 CFI_REL_OFFSET r15,R15
749 movq ORIG_RAX(%rsp),%rsi /* get error code */
750 movq $-1,ORIG_RAX(%rsp)
752 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
757 GET_THREAD_INFO(%rcx)
760 movl threadinfo_flags(%rcx),%edx
761 movl $_TIF_WORK_MASK,%edi
771 /* There are two places in the kernel that can potentially fault with
772 usergs. Handle them here. The exception handlers after
773 iret run with kernel gs again, so don't set the user space flag.
774 B stepping K8s sometimes report an truncated RIP for IRET
775 exceptions returning to compat mode. Check for these here too. */
776 leaq iret_label(%rip),%rbp
779 movl %ebp,%ebp /* zero extend */
782 cmpq $gs_change,RIP(%rsp)
786 /* Reload gs selector with exception handling */
787 /* edi: new selector */
791 CFI_ADJUST_CFA_OFFSET 8
796 2: mfence /* workaround */
799 CFI_ADJUST_CFA_OFFSET -8
803 .section __ex_table,"a"
805 .quad gs_change,bad_gs
808 /* running with kernelgs */
810 swapgs /* switch back to user gs */
817 * Create a kernel thread.
819 * C extern interface:
820 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
822 * asm input arguments:
823 * rdi: fn, rsi: arg, rdx: flags
827 FAKE_STACK_FRAME $child_rip
830 # rdi: flags, rsi: usp, rdx: will be &pt_regs
832 orq kernel_thread_flags(%rip),%rdi
845 * It isn't worth to check for reschedule here,
846 * so internally to the x86_64 port you can rely on kernel_thread()
847 * not to reschedule the child before returning, this avoids the need
848 * of hacks for example to fork off the per-CPU idle tasks.
849 * [Hopefully no generic code relies on the reschedule -AK]
859 * Here we are in the child and the registers are set as they were
860 * at kernel_thread() invocation in the parent.
870 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
872 * C extern interface:
873 * extern long execve(char *name, char **argv, char **envp)
875 * asm input arguments:
876 * rdi: name, rsi: argv, rdx: envp
878 * We want to fallback into:
879 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
881 * do_sys_execve asm fallback arguments:
882 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
892 je int_ret_from_sys_call
898 KPROBE_ENTRY(page_fault)
899 errorentry do_page_fault
902 ENTRY(coprocessor_error)
903 zeroentry do_coprocessor_error
905 ENTRY(simd_coprocessor_error)
906 zeroentry do_simd_coprocessor_error
908 ENTRY(device_not_available)
909 zeroentry math_state_restore
911 /* runs on exception stack */
915 CFI_ADJUST_CFA_OFFSET 8
916 paranoidentry do_debug, DEBUG_STACK
921 /* runs on exception stack */
925 CFI_ADJUST_CFA_OFFSET 8
928 * "Paranoid" exit path from exception stack.
929 * Paranoid because this is used by NMIs and cannot take
930 * any kernel state for granted.
931 * We don't do kernel preemption checks here, because only
932 * NMI should be common and it does not enable IRQs and
933 * cannot get reschedule ticks.
935 /* ebx: no swapgs flag */
937 testl %ebx,%ebx /* swapgs needed? */
940 jnz paranoid_userspace
947 GET_THREAD_INFO(%rcx)
948 movl threadinfo_flags(%rcx),%ebx
949 andl $_TIF_WORK_MASK,%ebx
951 movq %rsp,%rdi /* &pt_regs */
953 movq %rax,%rsp /* switch stack for scheduling */
954 testl $_TIF_NEED_RESCHED,%ebx
955 jnz paranoid_schedule
956 movl %ebx,%edx /* arg3: thread flags */
958 xorl %esi,%esi /* arg2: oldset */
959 movq %rsp,%rdi /* arg1: &pt_regs */
960 call do_notify_resume
962 jmp paranoid_userspace
967 jmp paranoid_userspace
974 CFI_ADJUST_CFA_OFFSET 8
975 paranoidentry do_int3, DEBUG_STACK
981 zeroentry do_overflow
987 zeroentry do_invalid_op
989 ENTRY(coprocessor_segment_overrun)
990 zeroentry do_coprocessor_segment_overrun
993 zeroentry do_reserved
995 /* runs on exception stack */
998 paranoidentry do_double_fault
1003 errorentry do_invalid_TSS
1005 ENTRY(segment_not_present)
1006 errorentry do_segment_not_present
1008 /* runs on exception stack */
1009 ENTRY(stack_segment)
1011 paranoidentry do_stack_segment
1015 KPROBE_ENTRY(general_protection)
1016 errorentry do_general_protection
1019 ENTRY(alignment_check)
1020 errorentry do_alignment_check
1023 zeroentry do_divide_error
1025 ENTRY(spurious_interrupt_bug)
1026 zeroentry do_spurious_interrupt_bug
1028 #ifdef CONFIG_X86_MCE
1029 /* runs on exception stack */
1030 ENTRY(machine_check)
1033 CFI_ADJUST_CFA_OFFSET 8
1034 paranoidentry do_machine_check
1041 movq %gs:pda_irqstackptr,%rax
1043 CFI_DEF_CFA_REGISTER rdx
1044 incl %gs:pda_irqcount
1047 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1050 CFI_DEF_CFA_REGISTER rsp
1051 decl %gs:pda_irqcount