2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 * C code is not supposed to know about undefined top of stack. Every time
54 * a C function with an pt_regs argument is called from the SYSCALL based
55 * fast path FIXUP_TOP_OF_STACK is needed.
56 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
60 /* %rsp:at FRAMEEND */
61 .macro FIXUP_TOP_OF_STACK tmp
62 movq %gs:pda_oldrsp,\tmp
64 movq $__USER_DS,SS(%rsp)
65 movq $__USER_CS,CS(%rsp)
67 movq R11(%rsp),\tmp /* get eflags */
68 movq \tmp,EFLAGS(%rsp)
71 .macro RESTORE_TOP_OF_STACK tmp,offset=0
72 movq RSP-\offset(%rsp),\tmp
73 movq \tmp,%gs:pda_oldrsp
74 movq EFLAGS-\offset(%rsp),\tmp
75 movq \tmp,R11-\offset(%rsp)
78 .macro FAKE_STACK_FRAME child_rip
79 /* push in order ss, rsp, eflags, cs, rip */
82 CFI_ADJUST_CFA_OFFSET 8
83 /*CFI_REL_OFFSET ss,0*/
85 CFI_ADJUST_CFA_OFFSET 8
87 pushq $(1<<9) /* eflags - interrupts on */
88 CFI_ADJUST_CFA_OFFSET 8
89 /*CFI_REL_OFFSET rflags,0*/
90 pushq $__KERNEL_CS /* cs */
91 CFI_ADJUST_CFA_OFFSET 8
92 /*CFI_REL_OFFSET cs,0*/
93 pushq \child_rip /* rip */
94 CFI_ADJUST_CFA_OFFSET 8
96 pushq %rax /* orig rax */
97 CFI_ADJUST_CFA_OFFSET 8
100 .macro UNFAKE_STACK_FRAME
102 CFI_ADJUST_CFA_OFFSET -(6*8)
105 .macro CFI_DEFAULT_STACK start=1
110 CFI_DEF_CFA_OFFSET SS+8
112 CFI_REL_OFFSET r15,R15
113 CFI_REL_OFFSET r14,R14
114 CFI_REL_OFFSET r13,R13
115 CFI_REL_OFFSET r12,R12
116 CFI_REL_OFFSET rbp,RBP
117 CFI_REL_OFFSET rbx,RBX
118 CFI_REL_OFFSET r11,R11
119 CFI_REL_OFFSET r10,R10
122 CFI_REL_OFFSET rax,RAX
123 CFI_REL_OFFSET rcx,RCX
124 CFI_REL_OFFSET rdx,RDX
125 CFI_REL_OFFSET rsi,RSI
126 CFI_REL_OFFSET rdi,RDI
127 CFI_REL_OFFSET rip,RIP
128 /*CFI_REL_OFFSET cs,CS*/
129 /*CFI_REL_OFFSET rflags,EFLAGS*/
130 CFI_REL_OFFSET rsp,RSP
131 /*CFI_REL_OFFSET ss,SS*/
134 * A newly forked process directly context switches into this.
140 GET_THREAD_INFO(%rcx)
141 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
145 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
146 je int_ret_from_sys_call
147 testl $_TIF_IA32,threadinfo_flags(%rcx)
148 jnz int_ret_from_sys_call
149 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
150 jmp ret_from_sys_call
153 call syscall_trace_leave
154 GET_THREAD_INFO(%rcx)
159 * System call entry. Upto 6 arguments in registers are supported.
161 * SYSCALL does not save anything on the stack and does not change the
167 * rax system call number
169 * rcx return address for syscall/sysret, C arg3
172 * r10 arg3 (--> moved to rcx for C)
175 * r11 eflags for syscall/sysret, temporary for C
176 * r12-r15,rbp,rbx saved by C code, not touched.
178 * Interrupts are off on entry.
179 * Only called from user space.
181 * XXX if we had a free scratch register we could save the RSP into the stack frame
182 * and report it properly in ps. Unfortunately we haven't.
184 * When user can change the frames always force IRET. That is because
185 * it deals with uncanonical addresses better. SYSRET has trouble
186 * with them due to bugs in both AMD and Intel CPUs.
193 /*CFI_REGISTER rflags,r11*/
195 movq %rsp,%gs:pda_oldrsp
196 movq %gs:pda_kernelstack,%rsp
199 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
200 movq %rcx,RIP-ARGOFFSET(%rsp)
201 CFI_REL_OFFSET rip,RIP-ARGOFFSET
202 GET_THREAD_INFO(%rcx)
203 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
206 cmpq $__NR_syscall_max,%rax
209 call *sys_call_table(,%rax,8) # XXX: rip relative
210 movq %rax,RAX-ARGOFFSET(%rsp)
212 * Syscall return path ending with SYSRET (fast path)
213 * Has incomplete stack frame and undefined top of stack.
215 .globl ret_from_sys_call
217 movl $_TIF_ALLWORK_MASK,%edi
220 GET_THREAD_INFO(%rcx)
222 movl threadinfo_flags(%rcx),%edx
226 movq RIP-ARGOFFSET(%rsp),%rcx
228 RESTORE_ARGS 0,-ARG_SKIP,1
229 /*CFI_REGISTER rflags,r11*/
230 movq %gs:pda_oldrsp,%rsp
234 /* Handle reschedules */
235 /* edx: work, edi: workmask */
238 bt $TIF_NEED_RESCHED,%edx
242 CFI_ADJUST_CFA_OFFSET 8
245 CFI_ADJUST_CFA_OFFSET -8
248 /* Handle a signal */
251 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
254 /* Really a signal */
255 /* edx: work flags (arg3) */
256 leaq do_notify_resume(%rip),%rax
257 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
258 xorl %esi,%esi # oldset -> arg2
259 call ptregscall_common
260 1: movl $_TIF_NEED_RESCHED,%edi
261 /* Use IRET because user could have changed frame. This
262 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
267 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
268 jmp ret_from_sys_call
270 /* Do syscall tracing */
274 movq $-ENOSYS,RAX(%rsp)
275 FIXUP_TOP_OF_STACK %rdi
277 call syscall_trace_enter
278 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
280 cmpq $__NR_syscall_max,%rax
282 movq %r10,%rcx /* fixup for C */
283 call *sys_call_table(,%rax,8)
284 1: movq %rax,RAX-ARGOFFSET(%rsp)
285 /* Use IRET because user could have changed frame */
286 jmp int_ret_from_sys_call
290 * Syscall return path ending with IRET.
291 * Has correct top of stack, but partial stack frame.
293 ENTRY(int_ret_from_sys_call)
295 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
296 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
297 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
298 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
299 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
300 CFI_REL_OFFSET rip,RIP-ARGOFFSET
301 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
302 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
303 CFI_REL_OFFSET rax,RAX-ARGOFFSET
304 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
305 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
306 CFI_REL_OFFSET r8,R8-ARGOFFSET
307 CFI_REL_OFFSET r9,R9-ARGOFFSET
308 CFI_REL_OFFSET r10,R10-ARGOFFSET
309 CFI_REL_OFFSET r11,R11-ARGOFFSET
311 testl $3,CS-ARGOFFSET(%rsp)
312 je retint_restore_args
313 movl $_TIF_ALLWORK_MASK,%edi
314 /* edi: mask to check */
316 GET_THREAD_INFO(%rcx)
317 movl threadinfo_flags(%rcx),%edx
320 andl $~TS_COMPAT,threadinfo_status(%rcx)
323 /* Either reschedule or signal or syscall exit tracking needed. */
324 /* First do a reschedule test. */
325 /* edx: work, edi: workmask */
327 bt $TIF_NEED_RESCHED,%edx
331 CFI_ADJUST_CFA_OFFSET 8
334 CFI_ADJUST_CFA_OFFSET -8
338 /* handle signals and tracing -- both require a full stack frame */
342 /* Check for syscall exit trace */
343 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
346 CFI_ADJUST_CFA_OFFSET 8
347 leaq 8(%rsp),%rdi # &ptregs -> arg1
348 call syscall_trace_leave
350 CFI_ADJUST_CFA_OFFSET -8
351 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
356 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
358 movq %rsp,%rdi # &ptregs -> arg1
359 xorl %esi,%esi # oldset -> arg2
360 call do_notify_resume
361 1: movl $_TIF_NEED_RESCHED,%edi
369 * Certain special system calls that need to save a complete full stack frame.
372 .macro PTREGSCALL label,func,arg
375 leaq \func(%rip),%rax
376 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
377 jmp ptregscall_common
382 PTREGSCALL stub_clone, sys_clone, %r8
383 PTREGSCALL stub_fork, sys_fork, %rdi
384 PTREGSCALL stub_vfork, sys_vfork, %rdi
385 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
386 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
387 PTREGSCALL stub_iopl, sys_iopl, %rsi
389 ENTRY(ptregscall_common)
391 CFI_ADJUST_CFA_OFFSET -8
392 CFI_REGISTER rip, r11
395 CFI_REGISTER rip, r15
396 FIXUP_TOP_OF_STACK %r11
398 RESTORE_TOP_OF_STACK %r11
400 CFI_REGISTER rip, r11
403 CFI_ADJUST_CFA_OFFSET 8
404 CFI_REL_OFFSET rip, 0
411 CFI_ADJUST_CFA_OFFSET -8
412 CFI_REGISTER rip, r11
414 FIXUP_TOP_OF_STACK %r11
416 RESTORE_TOP_OF_STACK %r11
419 jmp int_ret_from_sys_call
423 * sigreturn is special because it needs to restore all registers on return.
424 * This cannot be done with SYSRET, so use the IRET return path instead.
426 ENTRY(stub_rt_sigreturn)
429 CFI_ADJUST_CFA_OFFSET -8
432 FIXUP_TOP_OF_STACK %r11
433 call sys_rt_sigreturn
434 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
436 jmp int_ret_from_sys_call
440 * initial frame state for interrupts and exceptions
444 CFI_DEF_CFA rsp,SS+8-\ref
445 /*CFI_REL_OFFSET ss,SS-\ref*/
446 CFI_REL_OFFSET rsp,RSP-\ref
447 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
448 /*CFI_REL_OFFSET cs,CS-\ref*/
449 CFI_REL_OFFSET rip,RIP-\ref
452 /* initial frame state for interrupts (and exceptions without error code) */
453 #define INTR_FRAME _frame RIP
454 /* initial frame state for exceptions with error code (and interrupts with
455 vector already pushed) */
456 #define XCPT_FRAME _frame ORIG_RAX
459 * Interrupt entry/exit.
461 * Interrupt entry points save only callee clobbered registers in fast path.
463 * Entry runs with interrupts off.
466 /* 0(%rsp): interrupt number */
467 .macro interrupt func
469 #ifdef CONFIG_DEBUG_INFO
473 * Setup a stack frame pointer. This allows gdb to trace
474 * back to the original stack.
477 CFI_DEF_CFA_REGISTER rbp
480 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
485 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
486 movq %gs:pda_irqstackptr,%rax
487 cmoveq %rax,%rsp /*todo This needs CFI annotation! */
488 pushq %rdi # save old stack
489 #ifndef CONFIG_DEBUG_INFO
490 CFI_ADJUST_CFA_OFFSET 8
495 ENTRY(common_interrupt)
498 /* 0(%rsp): oldrsp-ARGOFFSET */
501 #ifndef CONFIG_DEBUG_INFO
502 CFI_ADJUST_CFA_OFFSET -8
505 decl %gs:pda_irqcount
506 #ifdef CONFIG_DEBUG_INFO
508 CFI_DEF_CFA_REGISTER rsp
510 leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
512 GET_THREAD_INFO(%rcx)
513 testl $3,CS-ARGOFFSET(%rsp)
516 /* Interrupt came from user space */
518 * Has a correct top of stack, but a partial stack frame
519 * %rcx: thread info. Interrupts off.
521 retint_with_reschedule:
522 movl $_TIF_WORK_MASK,%edi
524 movl threadinfo_flags(%rcx),%edx
536 .section __ex_table,"a"
537 .quad iret_label,bad_iret
540 /* force a signal here? this matches i386 behaviour */
541 /* running with kernel gs */
543 movq $11,%rdi /* SIGSEGV */
548 /* edi: workmask, edx: work */
551 bt $TIF_NEED_RESCHED,%edx
555 CFI_ADJUST_CFA_OFFSET 8
558 CFI_ADJUST_CFA_OFFSET -8
559 GET_THREAD_INFO(%rcx)
564 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
568 movq $-1,ORIG_RAX(%rsp)
569 xorl %esi,%esi # oldset
570 movq %rsp,%rdi # &pt_regs
571 call do_notify_resume
574 movl $_TIF_NEED_RESCHED,%edi
575 GET_THREAD_INFO(%rcx)
578 #ifdef CONFIG_PREEMPT
579 /* Returning to kernel space. Check if we need preemption */
580 /* rcx: threadinfo. interrupts off. */
583 cmpl $0,threadinfo_preempt_count(%rcx)
584 jnz retint_restore_args
585 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
586 jnc retint_restore_args
587 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
588 jnc retint_restore_args
589 call preempt_schedule_irq
597 .macro apicinterrupt num,func
600 CFI_ADJUST_CFA_OFFSET 8
606 ENTRY(thermal_interrupt)
607 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
609 ENTRY(threshold_interrupt)
610 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
613 ENTRY(reschedule_interrupt)
614 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
616 .macro INVALIDATE_ENTRY num
617 ENTRY(invalidate_interrupt\num)
618 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
630 ENTRY(call_function_interrupt)
631 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
634 #ifdef CONFIG_X86_LOCAL_APIC
635 ENTRY(apic_timer_interrupt)
636 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
638 ENTRY(error_interrupt)
639 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
641 ENTRY(spurious_interrupt)
642 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
646 * Exception entry points.
650 pushq $0 /* push error code/oldrax */
651 CFI_ADJUST_CFA_OFFSET 8
652 pushq %rax /* push real oldrax to the rdi slot */
653 CFI_ADJUST_CFA_OFFSET 8
659 .macro errorentry sym
662 CFI_ADJUST_CFA_OFFSET 8
668 /* error code is on the stack already */
669 /* handle NMI like exceptions that can happen everywhere */
670 .macro paranoidentry sym, ist=0
674 movl $MSR_GS_BASE,%ecx
682 movq %gs:pda_data_offset, %rbp
685 movq ORIG_RAX(%rsp),%rsi
686 movq $-1,ORIG_RAX(%rsp)
688 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
692 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
698 * Exception entry point. This expects an error code/orig_rax on the stack
699 * and the exception handler in %rax.
703 /* rdi slot contains rax, oldrax contains error code */
706 CFI_ADJUST_CFA_OFFSET (14*8)
708 CFI_REL_OFFSET rsi,RSI
709 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
711 CFI_REL_OFFSET rdx,RDX
713 CFI_REL_OFFSET rcx,RCX
714 movq %rsi,10*8(%rsp) /* store rax */
715 CFI_REL_OFFSET rax,RAX
721 CFI_REL_OFFSET r10,R10
723 CFI_REL_OFFSET r11,R11
725 CFI_REL_OFFSET rbx,RBX
727 CFI_REL_OFFSET rbp,RBP
729 CFI_REL_OFFSET r12,R12
731 CFI_REL_OFFSET r13,R13
733 CFI_REL_OFFSET r14,R14
735 CFI_REL_OFFSET r15,R15
744 movq ORIG_RAX(%rsp),%rsi /* get error code */
745 movq $-1,ORIG_RAX(%rsp)
747 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
752 GET_THREAD_INFO(%rcx)
755 movl threadinfo_flags(%rcx),%edx
756 movl $_TIF_WORK_MASK,%edi
766 /* There are two places in the kernel that can potentially fault with
767 usergs. Handle them here. The exception handlers after
768 iret run with kernel gs again, so don't set the user space flag.
769 B stepping K8s sometimes report an truncated RIP for IRET
770 exceptions returning to compat mode. Check for these here too. */
771 leaq iret_label(%rip),%rbp
774 movl %ebp,%ebp /* zero extend */
777 cmpq $gs_change,RIP(%rsp)
781 /* Reload gs selector with exception handling */
782 /* edi: new selector */
786 CFI_ADJUST_CFA_OFFSET 8
791 2: mfence /* workaround */
794 CFI_ADJUST_CFA_OFFSET -8
798 .section __ex_table,"a"
800 .quad gs_change,bad_gs
803 /* running with kernelgs */
805 swapgs /* switch back to user gs */
812 * Create a kernel thread.
814 * C extern interface:
815 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
817 * asm input arguments:
818 * rdi: fn, rsi: arg, rdx: flags
822 FAKE_STACK_FRAME $child_rip
825 # rdi: flags, rsi: usp, rdx: will be &pt_regs
827 orq kernel_thread_flags(%rip),%rdi
840 * It isn't worth to check for reschedule here,
841 * so internally to the x86_64 port you can rely on kernel_thread()
842 * not to reschedule the child before returning, this avoids the need
843 * of hacks for example to fork off the per-CPU idle tasks.
844 * [Hopefully no generic code relies on the reschedule -AK]
854 * Here we are in the child and the registers are set as they were
855 * at kernel_thread() invocation in the parent.
865 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
867 * C extern interface:
868 * extern long execve(char *name, char **argv, char **envp)
870 * asm input arguments:
871 * rdi: name, rsi: argv, rdx: envp
873 * We want to fallback into:
874 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
876 * do_sys_execve asm fallback arguments:
877 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
887 je int_ret_from_sys_call
893 KPROBE_ENTRY(page_fault)
894 errorentry do_page_fault
897 ENTRY(coprocessor_error)
898 zeroentry do_coprocessor_error
900 ENTRY(simd_coprocessor_error)
901 zeroentry do_simd_coprocessor_error
903 ENTRY(device_not_available)
904 zeroentry math_state_restore
906 /* runs on exception stack */
910 CFI_ADJUST_CFA_OFFSET 8
911 paranoidentry do_debug, DEBUG_STACK
916 /* runs on exception stack */
920 CFI_ADJUST_CFA_OFFSET 8
923 * "Paranoid" exit path from exception stack.
924 * Paranoid because this is used by NMIs and cannot take
925 * any kernel state for granted.
926 * We don't do kernel preemption checks here, because only
927 * NMI should be common and it does not enable IRQs and
928 * cannot get reschedule ticks.
930 /* ebx: no swapgs flag */
932 testl %ebx,%ebx /* swapgs needed? */
935 jnz paranoid_userspace
942 GET_THREAD_INFO(%rcx)
943 movl threadinfo_flags(%rcx),%ebx
944 andl $_TIF_WORK_MASK,%ebx
946 movq %rsp,%rdi /* &pt_regs */
948 movq %rax,%rsp /* switch stack for scheduling */
949 testl $_TIF_NEED_RESCHED,%ebx
950 jnz paranoid_schedule
951 movl %ebx,%edx /* arg3: thread flags */
953 xorl %esi,%esi /* arg2: oldset */
954 movq %rsp,%rdi /* arg1: &pt_regs */
955 call do_notify_resume
957 jmp paranoid_userspace
962 jmp paranoid_userspace
969 CFI_ADJUST_CFA_OFFSET 8
970 paranoidentry do_int3, DEBUG_STACK
976 zeroentry do_overflow
982 zeroentry do_invalid_op
984 ENTRY(coprocessor_segment_overrun)
985 zeroentry do_coprocessor_segment_overrun
988 zeroentry do_reserved
990 /* runs on exception stack */
993 paranoidentry do_double_fault
998 errorentry do_invalid_TSS
1000 ENTRY(segment_not_present)
1001 errorentry do_segment_not_present
1003 /* runs on exception stack */
1004 ENTRY(stack_segment)
1006 paranoidentry do_stack_segment
1010 KPROBE_ENTRY(general_protection)
1011 errorentry do_general_protection
1014 ENTRY(alignment_check)
1015 errorentry do_alignment_check
1018 zeroentry do_divide_error
1020 ENTRY(spurious_interrupt_bug)
1021 zeroentry do_spurious_interrupt_bug
1023 #ifdef CONFIG_X86_MCE
1024 /* runs on exception stack */
1025 ENTRY(machine_check)
1028 CFI_ADJUST_CFA_OFFSET 8
1029 paranoidentry do_machine_check
1036 movq %gs:pda_irqstackptr,%rax
1038 CFI_DEF_CFA_REGISTER rdx
1039 incl %gs:pda_irqcount
1042 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1045 CFI_DEF_CFA_REGISTER rsp
1046 decl %gs:pda_irqcount