2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 * C code is not supposed to know about undefined top of stack. Every time
54 * a C function with an pt_regs argument is called from the SYSCALL based
55 * fast path FIXUP_TOP_OF_STACK is needed.
56 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
60 /* %rsp:at FRAMEEND */
61 .macro FIXUP_TOP_OF_STACK tmp
62 movq %gs:pda_oldrsp,\tmp
64 movq $__USER_DS,SS(%rsp)
65 movq $__USER_CS,CS(%rsp)
67 movq R11(%rsp),\tmp /* get eflags */
68 movq \tmp,EFLAGS(%rsp)
71 .macro RESTORE_TOP_OF_STACK tmp,offset=0
72 movq RSP-\offset(%rsp),\tmp
73 movq \tmp,%gs:pda_oldrsp
74 movq EFLAGS-\offset(%rsp),\tmp
75 movq \tmp,R11-\offset(%rsp)
78 .macro FAKE_STACK_FRAME child_rip
79 /* push in order ss, rsp, eflags, cs, rip */
82 CFI_ADJUST_CFA_OFFSET 8
83 /*CFI_REL_OFFSET ss,0*/
85 CFI_ADJUST_CFA_OFFSET 8
87 pushq $(1<<9) /* eflags - interrupts on */
88 CFI_ADJUST_CFA_OFFSET 8
89 /*CFI_REL_OFFSET rflags,0*/
90 pushq $__KERNEL_CS /* cs */
91 CFI_ADJUST_CFA_OFFSET 8
92 /*CFI_REL_OFFSET cs,0*/
93 pushq \child_rip /* rip */
94 CFI_ADJUST_CFA_OFFSET 8
96 pushq %rax /* orig rax */
97 CFI_ADJUST_CFA_OFFSET 8
100 .macro UNFAKE_STACK_FRAME
102 CFI_ADJUST_CFA_OFFSET -(6*8)
105 .macro CFI_DEFAULT_STACK start=1
110 CFI_DEF_CFA_OFFSET SS+8
112 CFI_REL_OFFSET r15,R15
113 CFI_REL_OFFSET r14,R14
114 CFI_REL_OFFSET r13,R13
115 CFI_REL_OFFSET r12,R12
116 CFI_REL_OFFSET rbp,RBP
117 CFI_REL_OFFSET rbx,RBX
118 CFI_REL_OFFSET r11,R11
119 CFI_REL_OFFSET r10,R10
122 CFI_REL_OFFSET rax,RAX
123 CFI_REL_OFFSET rcx,RCX
124 CFI_REL_OFFSET rdx,RDX
125 CFI_REL_OFFSET rsi,RSI
126 CFI_REL_OFFSET rdi,RDI
127 CFI_REL_OFFSET rip,RIP
128 /*CFI_REL_OFFSET cs,CS*/
129 /*CFI_REL_OFFSET rflags,EFLAGS*/
130 CFI_REL_OFFSET rsp,RSP
131 /*CFI_REL_OFFSET ss,SS*/
134 * A newly forked process directly context switches into this.
140 GET_THREAD_INFO(%rcx)
141 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
145 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
146 je int_ret_from_sys_call
147 testl $_TIF_IA32,threadinfo_flags(%rcx)
148 jnz int_ret_from_sys_call
149 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
150 jmp ret_from_sys_call
153 call syscall_trace_leave
154 GET_THREAD_INFO(%rcx)
159 * System call entry. Upto 6 arguments in registers are supported.
161 * SYSCALL does not save anything on the stack and does not change the
167 * rax system call number
169 * rcx return address for syscall/sysret, C arg3
172 * r10 arg3 (--> moved to rcx for C)
175 * r11 eflags for syscall/sysret, temporary for C
176 * r12-r15,rbp,rbx saved by C code, not touched.
178 * Interrupts are off on entry.
179 * Only called from user space.
181 * XXX if we had a free scratch register we could save the RSP into the stack frame
182 * and report it properly in ps. Unfortunately we haven't.
189 /*CFI_REGISTER rflags,r11*/
191 movq %rsp,%gs:pda_oldrsp
192 movq %gs:pda_kernelstack,%rsp
195 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
196 movq %rcx,RIP-ARGOFFSET(%rsp)
197 CFI_REL_OFFSET rip,RIP-ARGOFFSET
198 GET_THREAD_INFO(%rcx)
199 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
202 cmpq $__NR_syscall_max,%rax
205 call *sys_call_table(,%rax,8) # XXX: rip relative
206 movq %rax,RAX-ARGOFFSET(%rsp)
208 * Syscall return path ending with SYSRET (fast path)
209 * Has incomplete stack frame and undefined top of stack.
211 .globl ret_from_sys_call
213 movl $_TIF_ALLWORK_MASK,%edi
216 GET_THREAD_INFO(%rcx)
218 movl threadinfo_flags(%rcx),%edx
222 movq RIP-ARGOFFSET(%rsp),%rcx
224 RESTORE_ARGS 0,-ARG_SKIP,1
225 /*CFI_REGISTER rflags,r11*/
226 movq %gs:pda_oldrsp,%rsp
230 /* Handle reschedules */
231 /* edx: work, edi: workmask */
234 bt $TIF_NEED_RESCHED,%edx
238 CFI_ADJUST_CFA_OFFSET 8
241 CFI_ADJUST_CFA_OFFSET -8
244 /* Handle a signal */
247 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
250 /* Really a signal */
251 /* edx: work flags (arg3) */
252 leaq do_notify_resume(%rip),%rax
253 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
254 xorl %esi,%esi # oldset -> arg2
255 call ptregscall_common
256 1: movl $_TIF_NEED_RESCHED,%edi
260 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
261 jmp ret_from_sys_call
263 /* Do syscall tracing */
267 movq $-ENOSYS,RAX(%rsp)
268 FIXUP_TOP_OF_STACK %rdi
270 call syscall_trace_enter
271 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
273 cmpq $__NR_syscall_max,%rax
275 movq %r10,%rcx /* fixup for C */
276 call *sys_call_table(,%rax,8)
277 movq %rax,RAX-ARGOFFSET(%rsp)
280 call syscall_trace_leave
281 RESTORE_TOP_OF_STACK %rbx
283 jmp ret_from_sys_call
287 * Syscall return path ending with IRET.
288 * Has correct top of stack, but partial stack frame.
290 ENTRY(int_ret_from_sys_call)
292 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
293 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
294 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
295 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
296 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
297 CFI_REL_OFFSET rip,RIP-ARGOFFSET
298 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
299 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
300 CFI_REL_OFFSET rax,RAX-ARGOFFSET
301 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
302 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
303 CFI_REL_OFFSET r8,R8-ARGOFFSET
304 CFI_REL_OFFSET r9,R9-ARGOFFSET
305 CFI_REL_OFFSET r10,R10-ARGOFFSET
306 CFI_REL_OFFSET r11,R11-ARGOFFSET
308 testl $3,CS-ARGOFFSET(%rsp)
309 je retint_restore_args
310 movl $_TIF_ALLWORK_MASK,%edi
311 /* edi: mask to check */
313 GET_THREAD_INFO(%rcx)
314 movl threadinfo_flags(%rcx),%edx
317 andl $~TS_COMPAT,threadinfo_status(%rcx)
320 /* Either reschedule or signal or syscall exit tracking needed. */
321 /* First do a reschedule test. */
322 /* edx: work, edi: workmask */
324 bt $TIF_NEED_RESCHED,%edx
328 CFI_ADJUST_CFA_OFFSET 8
331 CFI_ADJUST_CFA_OFFSET -8
335 /* handle signals and tracing -- both require a full stack frame */
339 /* Check for syscall exit trace */
340 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
343 CFI_ADJUST_CFA_OFFSET 8
344 leaq 8(%rsp),%rdi # &ptregs -> arg1
345 call syscall_trace_leave
347 CFI_ADJUST_CFA_OFFSET -8
348 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
353 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
355 movq %rsp,%rdi # &ptregs -> arg1
356 xorl %esi,%esi # oldset -> arg2
357 call do_notify_resume
358 1: movl $_TIF_NEED_RESCHED,%edi
366 * Certain special system calls that need to save a complete full stack frame.
369 .macro PTREGSCALL label,func,arg
372 leaq \func(%rip),%rax
373 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
374 jmp ptregscall_common
379 PTREGSCALL stub_clone, sys_clone, %r8
380 PTREGSCALL stub_fork, sys_fork, %rdi
381 PTREGSCALL stub_vfork, sys_vfork, %rdi
382 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
383 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
384 PTREGSCALL stub_iopl, sys_iopl, %rsi
386 ENTRY(ptregscall_common)
388 CFI_ADJUST_CFA_OFFSET -8
389 CFI_REGISTER rip, r11
392 CFI_REGISTER rip, r15
393 FIXUP_TOP_OF_STACK %r11
395 RESTORE_TOP_OF_STACK %r11
397 CFI_REGISTER rip, r11
400 CFI_ADJUST_CFA_OFFSET 8
401 CFI_REL_OFFSET rip, 0
408 CFI_ADJUST_CFA_OFFSET -8
409 CFI_REGISTER rip, r11
412 CFI_REGISTER rip, r15
413 FIXUP_TOP_OF_STACK %r11
415 GET_THREAD_INFO(%rcx)
416 bt $TIF_IA32,threadinfo_flags(%rcx)
419 RESTORE_TOP_OF_STACK %r11
421 CFI_REGISTER rip, r11
424 CFI_ADJUST_CFA_OFFSET 8
425 CFI_REL_OFFSET rip, 0
432 jmp int_ret_from_sys_call
436 * sigreturn is special because it needs to restore all registers on return.
437 * This cannot be done with SYSRET, so use the IRET return path instead.
439 ENTRY(stub_rt_sigreturn)
442 CFI_ADJUST_CFA_OFFSET -8
445 FIXUP_TOP_OF_STACK %r11
446 call sys_rt_sigreturn
447 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
449 jmp int_ret_from_sys_call
453 * initial frame state for interrupts and exceptions
457 CFI_DEF_CFA rsp,SS+8-\ref
458 /*CFI_REL_OFFSET ss,SS-\ref*/
459 CFI_REL_OFFSET rsp,RSP-\ref
460 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
461 /*CFI_REL_OFFSET cs,CS-\ref*/
462 CFI_REL_OFFSET rip,RIP-\ref
465 /* initial frame state for interrupts (and exceptions without error code) */
466 #define INTR_FRAME _frame RIP
467 /* initial frame state for exceptions with error code (and interrupts with
468 vector already pushed) */
469 #define XCPT_FRAME _frame ORIG_RAX
472 * Interrupt entry/exit.
474 * Interrupt entry points save only callee clobbered registers in fast path.
476 * Entry runs with interrupts off.
479 /* 0(%rsp): interrupt number */
480 .macro interrupt func
482 #ifdef CONFIG_DEBUG_INFO
486 * Setup a stack frame pointer. This allows gdb to trace
487 * back to the original stack.
490 CFI_DEF_CFA_REGISTER rbp
493 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
498 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
499 movq %gs:pda_irqstackptr,%rax
500 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
501 pushq %rdi # save old stack
502 #ifndef CONFIG_DEBUG_INFO
503 CFI_ADJUST_CFA_OFFSET 8
508 ENTRY(common_interrupt)
511 /* 0(%rsp): oldrsp-ARGOFFSET */
514 #ifndef CONFIG_DEBUG_INFO
515 CFI_ADJUST_CFA_OFFSET -8
518 decl %gs:pda_irqcount
519 #ifdef CONFIG_DEBUG_INFO
521 CFI_DEF_CFA_REGISTER rsp
523 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
525 GET_THREAD_INFO(%rcx)
526 testl $3,CS-ARGOFFSET(%rsp)
529 /* Interrupt came from user space */
531 * Has a correct top of stack, but a partial stack frame
532 * %rcx: thread info. Interrupts off.
534 retint_with_reschedule:
535 movl $_TIF_WORK_MASK,%edi
537 movl threadinfo_flags(%rcx),%edx
549 .section __ex_table,"a"
550 .quad iret_label,bad_iret
553 /* force a signal here? this matches i386 behaviour */
554 /* running with kernel gs */
556 movq $11,%rdi /* SIGSEGV */
561 /* edi: workmask, edx: work */
564 bt $TIF_NEED_RESCHED,%edx
568 CFI_ADJUST_CFA_OFFSET 8
571 CFI_ADJUST_CFA_OFFSET -8
572 GET_THREAD_INFO(%rcx)
577 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
581 movq $-1,ORIG_RAX(%rsp)
582 xorl %esi,%esi # oldset
583 movq %rsp,%rdi # &pt_regs
584 call do_notify_resume
587 movl $_TIF_NEED_RESCHED,%edi
588 GET_THREAD_INFO(%rcx)
591 #ifdef CONFIG_PREEMPT
592 /* Returning to kernel space. Check if we need preemption */
593 /* rcx: threadinfo. interrupts off. */
596 cmpl $0,threadinfo_preempt_count(%rcx)
597 jnz retint_restore_args
598 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
599 jnc retint_restore_args
600 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
601 jnc retint_restore_args
602 call preempt_schedule_irq
610 .macro apicinterrupt num,func
613 CFI_ADJUST_CFA_OFFSET 8
619 ENTRY(thermal_interrupt)
620 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
622 ENTRY(threshold_interrupt)
623 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
626 ENTRY(reschedule_interrupt)
627 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
629 .macro INVALIDATE_ENTRY num
630 ENTRY(invalidate_interrupt\num)
631 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
643 ENTRY(call_function_interrupt)
644 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
647 #ifdef CONFIG_X86_LOCAL_APIC
648 ENTRY(apic_timer_interrupt)
649 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
651 ENTRY(error_interrupt)
652 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
654 ENTRY(spurious_interrupt)
655 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
659 * Exception entry points.
663 pushq $0 /* push error code/oldrax */
664 CFI_ADJUST_CFA_OFFSET 8
665 pushq %rax /* push real oldrax to the rdi slot */
666 CFI_ADJUST_CFA_OFFSET 8
672 .macro errorentry sym
675 CFI_ADJUST_CFA_OFFSET 8
681 /* error code is on the stack already */
682 /* handle NMI like exceptions that can happen everywhere */
683 .macro paranoidentry sym, ist=0
687 movl $MSR_GS_BASE,%ecx
695 movq %gs:pda_data_offset, %rbp
698 movq ORIG_RAX(%rsp),%rsi
699 movq $-1,ORIG_RAX(%rsp)
701 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
705 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
711 * Exception entry point. This expects an error code/orig_rax on the stack
712 * and the exception handler in %rax.
716 /* rdi slot contains rax, oldrax contains error code */
719 CFI_ADJUST_CFA_OFFSET (14*8)
721 CFI_REL_OFFSET rsi,RSI
722 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
724 CFI_REL_OFFSET rdx,RDX
726 CFI_REL_OFFSET rcx,RCX
727 movq %rsi,10*8(%rsp) /* store rax */
728 CFI_REL_OFFSET rax,RAX
734 CFI_REL_OFFSET r10,R10
736 CFI_REL_OFFSET r11,R11
738 CFI_REL_OFFSET rbx,RBX
740 CFI_REL_OFFSET rbp,RBP
742 CFI_REL_OFFSET r12,R12
744 CFI_REL_OFFSET r13,R13
746 CFI_REL_OFFSET r14,R14
748 CFI_REL_OFFSET r15,R15
757 movq ORIG_RAX(%rsp),%rsi /* get error code */
758 movq $-1,ORIG_RAX(%rsp)
760 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
765 GET_THREAD_INFO(%rcx)
768 movl threadinfo_flags(%rcx),%edx
769 movl $_TIF_WORK_MASK,%edi
779 /* There are two places in the kernel that can potentially fault with
780 usergs. Handle them here. The exception handlers after
781 iret run with kernel gs again, so don't set the user space flag.
782 B stepping K8s sometimes report an truncated RIP for IRET
783 exceptions returning to compat mode. Check for these here too. */
784 leaq iret_label(%rip),%rbp
787 movl %ebp,%ebp /* zero extend */
790 cmpq $gs_change,RIP(%rsp)
794 /* Reload gs selector with exception handling */
795 /* edi: new selector */
799 CFI_ADJUST_CFA_OFFSET 8
804 2: mfence /* workaround */
807 CFI_ADJUST_CFA_OFFSET -8
811 .section __ex_table,"a"
813 .quad gs_change,bad_gs
816 /* running with kernelgs */
818 swapgs /* switch back to user gs */
825 * Create a kernel thread.
827 * C extern interface:
828 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
830 * asm input arguments:
831 * rdi: fn, rsi: arg, rdx: flags
835 FAKE_STACK_FRAME $child_rip
838 # rdi: flags, rsi: usp, rdx: will be &pt_regs
840 orq kernel_thread_flags(%rip),%rdi
853 * It isn't worth to check for reschedule here,
854 * so internally to the x86_64 port you can rely on kernel_thread()
855 * not to reschedule the child before returning, this avoids the need
856 * of hacks for example to fork off the per-CPU idle tasks.
857 * [Hopefully no generic code relies on the reschedule -AK]
867 * Here we are in the child and the registers are set as they were
868 * at kernel_thread() invocation in the parent.
878 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
880 * C extern interface:
881 * extern long execve(char *name, char **argv, char **envp)
883 * asm input arguments:
884 * rdi: name, rsi: argv, rdx: envp
886 * We want to fallback into:
887 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
889 * do_sys_execve asm fallback arguments:
890 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
900 je int_ret_from_sys_call
906 KPROBE_ENTRY(page_fault)
907 errorentry do_page_fault
910 ENTRY(coprocessor_error)
911 zeroentry do_coprocessor_error
913 ENTRY(simd_coprocessor_error)
914 zeroentry do_simd_coprocessor_error
916 ENTRY(device_not_available)
917 zeroentry math_state_restore
919 /* runs on exception stack */
923 CFI_ADJUST_CFA_OFFSET 8
924 paranoidentry do_debug, DEBUG_STACK
929 /* runs on exception stack */
933 CFI_ADJUST_CFA_OFFSET 8
936 * "Paranoid" exit path from exception stack.
937 * Paranoid because this is used by NMIs and cannot take
938 * any kernel state for granted.
939 * We don't do kernel preemption checks here, because only
940 * NMI should be common and it does not enable IRQs and
941 * cannot get reschedule ticks.
943 /* ebx: no swapgs flag */
945 testl %ebx,%ebx /* swapgs needed? */
948 jnz paranoid_userspace
955 GET_THREAD_INFO(%rcx)
956 movl threadinfo_flags(%rcx),%ebx
957 andl $_TIF_WORK_MASK,%ebx
959 movq %rsp,%rdi /* &pt_regs */
961 movq %rax,%rsp /* switch stack for scheduling */
962 testl $_TIF_NEED_RESCHED,%ebx
963 jnz paranoid_schedule
964 movl %ebx,%edx /* arg3: thread flags */
966 xorl %esi,%esi /* arg2: oldset */
967 movq %rsp,%rdi /* arg1: &pt_regs */
968 call do_notify_resume
970 jmp paranoid_userspace
975 jmp paranoid_userspace
982 CFI_ADJUST_CFA_OFFSET 8
983 paranoidentry do_int3, DEBUG_STACK
989 zeroentry do_overflow
995 zeroentry do_invalid_op
997 ENTRY(coprocessor_segment_overrun)
998 zeroentry do_coprocessor_segment_overrun
1001 zeroentry do_reserved
1003 /* runs on exception stack */
1006 paranoidentry do_double_fault
1011 errorentry do_invalid_TSS
1013 ENTRY(segment_not_present)
1014 errorentry do_segment_not_present
1016 /* runs on exception stack */
1017 ENTRY(stack_segment)
1019 paranoidentry do_stack_segment
1023 KPROBE_ENTRY(general_protection)
1024 errorentry do_general_protection
1027 ENTRY(alignment_check)
1028 errorentry do_alignment_check
1031 zeroentry do_divide_error
1033 ENTRY(spurious_interrupt_bug)
1034 zeroentry do_spurious_interrupt_bug
1036 #ifdef CONFIG_X86_MCE
1037 /* runs on exception stack */
1038 ENTRY(machine_check)
1041 CFI_ADJUST_CFA_OFFSET 8
1042 paranoidentry do_machine_check
1049 movq %gs:pda_irqstackptr,%rax
1051 CFI_DEF_CFA_REGISTER rdx
1052 incl %gs:pda_irqcount
1055 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1058 CFI_DEF_CFA_REGISTER rsp
1059 decl %gs:pda_irqcount