2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/config.h>
32 #include <linux/linkage.h>
33 #include <asm/segment.h>
35 #include <asm/cache.h>
36 #include <asm/errno.h>
37 #include <asm/dwarf2.h>
38 #include <asm/calling.h>
39 #include <asm/asm-offsets.h>
41 #include <asm/unistd.h>
42 #include <asm/thread_info.h>
43 #include <asm/hw_irq.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 * C code is not supposed to know about undefined top of stack. Every time
54 * a C function with an pt_regs argument is called from the SYSCALL based
55 * fast path FIXUP_TOP_OF_STACK is needed.
56 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
60 /* %rsp:at FRAMEEND */
61 .macro FIXUP_TOP_OF_STACK tmp
62 movq %gs:pda_oldrsp,\tmp
64 movq $__USER_DS,SS(%rsp)
65 movq $__USER_CS,CS(%rsp)
67 movq R11(%rsp),\tmp /* get eflags */
68 movq \tmp,EFLAGS(%rsp)
71 .macro RESTORE_TOP_OF_STACK tmp,offset=0
72 movq RSP-\offset(%rsp),\tmp
73 movq \tmp,%gs:pda_oldrsp
74 movq EFLAGS-\offset(%rsp),\tmp
75 movq \tmp,R11-\offset(%rsp)
78 .macro FAKE_STACK_FRAME child_rip
79 /* push in order ss, rsp, eflags, cs, rip */
82 CFI_ADJUST_CFA_OFFSET 8
83 /*CFI_REL_OFFSET ss,0*/
85 CFI_ADJUST_CFA_OFFSET 8
87 pushq $(1<<9) /* eflags - interrupts on */
88 CFI_ADJUST_CFA_OFFSET 8
89 /*CFI_REL_OFFSET rflags,0*/
90 pushq $__KERNEL_CS /* cs */
91 CFI_ADJUST_CFA_OFFSET 8
92 /*CFI_REL_OFFSET cs,0*/
93 pushq \child_rip /* rip */
94 CFI_ADJUST_CFA_OFFSET 8
96 pushq %rax /* orig rax */
97 CFI_ADJUST_CFA_OFFSET 8
100 .macro UNFAKE_STACK_FRAME
102 CFI_ADJUST_CFA_OFFSET -(6*8)
105 .macro CFI_DEFAULT_STACK start=1
110 CFI_DEF_CFA_OFFSET SS+8
112 CFI_REL_OFFSET r15,R15
113 CFI_REL_OFFSET r14,R14
114 CFI_REL_OFFSET r13,R13
115 CFI_REL_OFFSET r12,R12
116 CFI_REL_OFFSET rbp,RBP
117 CFI_REL_OFFSET rbx,RBX
118 CFI_REL_OFFSET r11,R11
119 CFI_REL_OFFSET r10,R10
122 CFI_REL_OFFSET rax,RAX
123 CFI_REL_OFFSET rcx,RCX
124 CFI_REL_OFFSET rdx,RDX
125 CFI_REL_OFFSET rsi,RSI
126 CFI_REL_OFFSET rdi,RDI
127 CFI_REL_OFFSET rip,RIP
128 /*CFI_REL_OFFSET cs,CS*/
129 /*CFI_REL_OFFSET rflags,EFLAGS*/
130 CFI_REL_OFFSET rsp,RSP
131 /*CFI_REL_OFFSET ss,SS*/
134 * A newly forked process directly context switches into this.
140 GET_THREAD_INFO(%rcx)
141 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
145 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
146 je int_ret_from_sys_call
147 testl $_TIF_IA32,threadinfo_flags(%rcx)
148 jnz int_ret_from_sys_call
149 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
150 jmp ret_from_sys_call
153 call syscall_trace_leave
154 GET_THREAD_INFO(%rcx)
160 * System call entry. Upto 6 arguments in registers are supported.
162 * SYSCALL does not save anything on the stack and does not change the
168 * rax system call number
170 * rcx return address for syscall/sysret, C arg3
173 * r10 arg3 (--> moved to rcx for C)
176 * r11 eflags for syscall/sysret, temporary for C
177 * r12-r15,rbp,rbx saved by C code, not touched.
179 * Interrupts are off on entry.
180 * Only called from user space.
182 * XXX if we had a free scratch register we could save the RSP into the stack frame
183 * and report it properly in ps. Unfortunately we haven't.
185 * When user can change the frames always force IRET. That is because
186 * it deals with uncanonical addresses better. SYSRET has trouble
187 * with them due to bugs in both AMD and Intel CPUs.
192 CFI_DEF_CFA rsp,PDA_STACKOFFSET
194 /*CFI_REGISTER rflags,r11*/
196 movq %rsp,%gs:pda_oldrsp
197 movq %gs:pda_kernelstack,%rsp
200 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
201 movq %rcx,RIP-ARGOFFSET(%rsp)
202 CFI_REL_OFFSET rip,RIP-ARGOFFSET
203 GET_THREAD_INFO(%rcx)
204 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
207 cmpq $__NR_syscall_max,%rax
210 call *sys_call_table(,%rax,8) # XXX: rip relative
211 movq %rax,RAX-ARGOFFSET(%rsp)
213 * Syscall return path ending with SYSRET (fast path)
214 * Has incomplete stack frame and undefined top of stack.
216 .globl ret_from_sys_call
218 movl $_TIF_ALLWORK_MASK,%edi
221 GET_THREAD_INFO(%rcx)
223 movl threadinfo_flags(%rcx),%edx
227 movq RIP-ARGOFFSET(%rsp),%rcx
229 RESTORE_ARGS 0,-ARG_SKIP,1
230 /*CFI_REGISTER rflags,r11*/
231 movq %gs:pda_oldrsp,%rsp
235 /* Handle reschedules */
236 /* edx: work, edi: workmask */
239 bt $TIF_NEED_RESCHED,%edx
243 CFI_ADJUST_CFA_OFFSET 8
246 CFI_ADJUST_CFA_OFFSET -8
249 /* Handle a signal */
252 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
255 /* Really a signal */
256 /* edx: work flags (arg3) */
257 leaq do_notify_resume(%rip),%rax
258 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
259 xorl %esi,%esi # oldset -> arg2
260 call ptregscall_common
261 1: movl $_TIF_NEED_RESCHED,%edi
262 /* Use IRET because user could have changed frame. This
263 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
268 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
269 jmp ret_from_sys_call
271 /* Do syscall tracing */
275 movq $-ENOSYS,RAX(%rsp)
276 FIXUP_TOP_OF_STACK %rdi
278 call syscall_trace_enter
279 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
281 cmpq $__NR_syscall_max,%rax
283 movq %r10,%rcx /* fixup for C */
284 call *sys_call_table(,%rax,8)
285 1: movq %rax,RAX-ARGOFFSET(%rsp)
286 /* Use IRET because user could have changed frame */
287 jmp int_ret_from_sys_call
292 * Syscall return path ending with IRET.
293 * Has correct top of stack, but partial stack frame.
295 ENTRY(int_ret_from_sys_call)
297 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
298 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
299 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
300 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
301 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
302 CFI_REL_OFFSET rip,RIP-ARGOFFSET
303 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
304 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
305 CFI_REL_OFFSET rax,RAX-ARGOFFSET
306 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
307 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
308 CFI_REL_OFFSET r8,R8-ARGOFFSET
309 CFI_REL_OFFSET r9,R9-ARGOFFSET
310 CFI_REL_OFFSET r10,R10-ARGOFFSET
311 CFI_REL_OFFSET r11,R11-ARGOFFSET
313 testl $3,CS-ARGOFFSET(%rsp)
314 je retint_restore_args
315 movl $_TIF_ALLWORK_MASK,%edi
316 /* edi: mask to check */
318 GET_THREAD_INFO(%rcx)
319 movl threadinfo_flags(%rcx),%edx
322 andl $~TS_COMPAT,threadinfo_status(%rcx)
325 /* Either reschedule or signal or syscall exit tracking needed. */
326 /* First do a reschedule test. */
327 /* edx: work, edi: workmask */
329 bt $TIF_NEED_RESCHED,%edx
333 CFI_ADJUST_CFA_OFFSET 8
336 CFI_ADJUST_CFA_OFFSET -8
340 /* handle signals and tracing -- both require a full stack frame */
344 /* Check for syscall exit trace */
345 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
348 CFI_ADJUST_CFA_OFFSET 8
349 leaq 8(%rsp),%rdi # &ptregs -> arg1
350 call syscall_trace_leave
352 CFI_ADJUST_CFA_OFFSET -8
353 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
358 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
360 movq %rsp,%rdi # &ptregs -> arg1
361 xorl %esi,%esi # oldset -> arg2
362 call do_notify_resume
363 1: movl $_TIF_NEED_RESCHED,%edi
369 END(int_ret_from_sys_call)
372 * Certain special system calls that need to save a complete full stack frame.
375 .macro PTREGSCALL label,func,arg
378 leaq \func(%rip),%rax
379 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
380 jmp ptregscall_common
386 PTREGSCALL stub_clone, sys_clone, %r8
387 PTREGSCALL stub_fork, sys_fork, %rdi
388 PTREGSCALL stub_vfork, sys_vfork, %rdi
389 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
390 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
391 PTREGSCALL stub_iopl, sys_iopl, %rsi
393 ENTRY(ptregscall_common)
395 CFI_ADJUST_CFA_OFFSET -8
396 CFI_REGISTER rip, r11
399 CFI_REGISTER rip, r15
400 FIXUP_TOP_OF_STACK %r11
402 RESTORE_TOP_OF_STACK %r11
404 CFI_REGISTER rip, r11
407 CFI_ADJUST_CFA_OFFSET 8
408 CFI_REL_OFFSET rip, 0
411 END(ptregscall_common)
416 CFI_ADJUST_CFA_OFFSET -8
417 CFI_REGISTER rip, r11
419 FIXUP_TOP_OF_STACK %r11
421 RESTORE_TOP_OF_STACK %r11
424 jmp int_ret_from_sys_call
429 * sigreturn is special because it needs to restore all registers on return.
430 * This cannot be done with SYSRET, so use the IRET return path instead.
432 ENTRY(stub_rt_sigreturn)
435 CFI_ADJUST_CFA_OFFSET -8
438 FIXUP_TOP_OF_STACK %r11
439 call sys_rt_sigreturn
440 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
442 jmp int_ret_from_sys_call
444 END(stub_rt_sigreturn)
447 * initial frame state for interrupts and exceptions
451 CFI_DEF_CFA rsp,SS+8-\ref
452 /*CFI_REL_OFFSET ss,SS-\ref*/
453 CFI_REL_OFFSET rsp,RSP-\ref
454 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
455 /*CFI_REL_OFFSET cs,CS-\ref*/
456 CFI_REL_OFFSET rip,RIP-\ref
459 /* initial frame state for interrupts (and exceptions without error code) */
460 #define INTR_FRAME _frame RIP
461 /* initial frame state for exceptions with error code (and interrupts with
462 vector already pushed) */
463 #define XCPT_FRAME _frame ORIG_RAX
466 * Interrupt entry/exit.
468 * Interrupt entry points save only callee clobbered registers in fast path.
470 * Entry runs with interrupts off.
473 /* 0(%rsp): interrupt number */
474 .macro interrupt func
477 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
479 CFI_ADJUST_CFA_OFFSET 8
480 CFI_REL_OFFSET rbp, 0
482 CFI_DEF_CFA_REGISTER rbp
486 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
487 cmoveq %gs:pda_irqstackptr,%rsp
491 ENTRY(common_interrupt)
494 /* 0(%rsp): oldrsp-ARGOFFSET */
497 decl %gs:pda_irqcount
499 CFI_DEF_CFA_REGISTER rsp
500 CFI_ADJUST_CFA_OFFSET -8
502 GET_THREAD_INFO(%rcx)
503 testl $3,CS-ARGOFFSET(%rsp)
506 /* Interrupt came from user space */
508 * Has a correct top of stack, but a partial stack frame
509 * %rcx: thread info. Interrupts off.
511 retint_with_reschedule:
512 movl $_TIF_WORK_MASK,%edi
514 movl threadinfo_flags(%rcx),%edx
526 .section __ex_table,"a"
527 .quad iret_label,bad_iret
530 /* force a signal here? this matches i386 behaviour */
531 /* running with kernel gs */
533 movq $11,%rdi /* SIGSEGV */
538 /* edi: workmask, edx: work */
541 bt $TIF_NEED_RESCHED,%edx
545 CFI_ADJUST_CFA_OFFSET 8
548 CFI_ADJUST_CFA_OFFSET -8
549 GET_THREAD_INFO(%rcx)
554 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
558 movq $-1,ORIG_RAX(%rsp)
559 xorl %esi,%esi # oldset
560 movq %rsp,%rdi # &pt_regs
561 call do_notify_resume
564 movl $_TIF_NEED_RESCHED,%edi
565 GET_THREAD_INFO(%rcx)
568 #ifdef CONFIG_PREEMPT
569 /* Returning to kernel space. Check if we need preemption */
570 /* rcx: threadinfo. interrupts off. */
573 cmpl $0,threadinfo_preempt_count(%rcx)
574 jnz retint_restore_args
575 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
576 jnc retint_restore_args
577 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
578 jnc retint_restore_args
579 call preempt_schedule_irq
584 END(common_interrupt)
589 .macro apicinterrupt num,func
592 CFI_ADJUST_CFA_OFFSET 8
598 ENTRY(thermal_interrupt)
599 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
600 END(thermal_interrupt)
602 ENTRY(threshold_interrupt)
603 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
604 END(threshold_interrupt)
607 ENTRY(reschedule_interrupt)
608 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
609 END(reschedule_interrupt)
611 .macro INVALIDATE_ENTRY num
612 ENTRY(invalidate_interrupt\num)
613 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
614 END(invalidate_interrupt\num)
626 ENTRY(call_function_interrupt)
627 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
628 END(call_function_interrupt)
631 #ifdef CONFIG_X86_LOCAL_APIC
632 ENTRY(apic_timer_interrupt)
633 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
634 END(apic_timer_interrupt)
636 ENTRY(error_interrupt)
637 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
640 ENTRY(spurious_interrupt)
641 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
642 END(spurious_interrupt)
646 * Exception entry points.
650 pushq $0 /* push error code/oldrax */
651 CFI_ADJUST_CFA_OFFSET 8
652 pushq %rax /* push real oldrax to the rdi slot */
653 CFI_ADJUST_CFA_OFFSET 8
659 .macro errorentry sym
662 CFI_ADJUST_CFA_OFFSET 8
668 /* error code is on the stack already */
669 /* handle NMI like exceptions that can happen everywhere */
670 .macro paranoidentry sym, ist=0
674 movl $MSR_GS_BASE,%ecx
682 movq %gs:pda_data_offset, %rbp
685 movq ORIG_RAX(%rsp),%rsi
686 movq $-1,ORIG_RAX(%rsp)
688 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
692 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
698 * Exception entry point. This expects an error code/orig_rax on the stack
699 * and the exception handler in %rax.
703 /* rdi slot contains rax, oldrax contains error code */
706 CFI_ADJUST_CFA_OFFSET (14*8)
708 CFI_REL_OFFSET rsi,RSI
709 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
711 CFI_REL_OFFSET rdx,RDX
713 CFI_REL_OFFSET rcx,RCX
714 movq %rsi,10*8(%rsp) /* store rax */
715 CFI_REL_OFFSET rax,RAX
721 CFI_REL_OFFSET r10,R10
723 CFI_REL_OFFSET r11,R11
725 CFI_REL_OFFSET rbx,RBX
727 CFI_REL_OFFSET rbp,RBP
729 CFI_REL_OFFSET r12,R12
731 CFI_REL_OFFSET r13,R13
733 CFI_REL_OFFSET r14,R14
735 CFI_REL_OFFSET r15,R15
744 movq ORIG_RAX(%rsp),%rsi /* get error code */
745 movq $-1,ORIG_RAX(%rsp)
747 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
752 GET_THREAD_INFO(%rcx)
755 movl threadinfo_flags(%rcx),%edx
756 movl $_TIF_WORK_MASK,%edi
766 /* There are two places in the kernel that can potentially fault with
767 usergs. Handle them here. The exception handlers after
768 iret run with kernel gs again, so don't set the user space flag.
769 B stepping K8s sometimes report an truncated RIP for IRET
770 exceptions returning to compat mode. Check for these here too. */
771 leaq iret_label(%rip),%rbp
774 movl %ebp,%ebp /* zero extend */
777 cmpq $gs_change,RIP(%rsp)
782 /* Reload gs selector with exception handling */
783 /* edi: new selector */
787 CFI_ADJUST_CFA_OFFSET 8
792 2: mfence /* workaround */
795 CFI_ADJUST_CFA_OFFSET -8
798 ENDPROC(load_gs_index)
800 .section __ex_table,"a"
802 .quad gs_change,bad_gs
805 /* running with kernelgs */
807 swapgs /* switch back to user gs */
814 * Create a kernel thread.
816 * C extern interface:
817 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
819 * asm input arguments:
820 * rdi: fn, rsi: arg, rdx: flags
824 FAKE_STACK_FRAME $child_rip
827 # rdi: flags, rsi: usp, rdx: will be &pt_regs
829 orq kernel_thread_flags(%rip),%rdi
842 * It isn't worth to check for reschedule here,
843 * so internally to the x86_64 port you can rely on kernel_thread()
844 * not to reschedule the child before returning, this avoids the need
845 * of hacks for example to fork off the per-CPU idle tasks.
846 * [Hopefully no generic code relies on the reschedule -AK]
852 ENDPROC(kernel_thread)
856 * Here we are in the child and the registers are set as they were
857 * at kernel_thread() invocation in the parent.
868 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
870 * C extern interface:
871 * extern long execve(char *name, char **argv, char **envp)
873 * asm input arguments:
874 * rdi: name, rsi: argv, rdx: envp
876 * We want to fallback into:
877 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
879 * do_sys_execve asm fallback arguments:
880 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
890 je int_ret_from_sys_call
897 KPROBE_ENTRY(page_fault)
898 errorentry do_page_fault
902 ENTRY(coprocessor_error)
903 zeroentry do_coprocessor_error
904 END(coprocessor_error)
906 ENTRY(simd_coprocessor_error)
907 zeroentry do_simd_coprocessor_error
908 END(simd_coprocessor_error)
910 ENTRY(device_not_available)
911 zeroentry math_state_restore
912 END(device_not_available)
914 /* runs on exception stack */
918 CFI_ADJUST_CFA_OFFSET 8
919 paranoidentry do_debug, DEBUG_STACK
925 /* runs on exception stack */
929 CFI_ADJUST_CFA_OFFSET 8
932 * "Paranoid" exit path from exception stack.
933 * Paranoid because this is used by NMIs and cannot take
934 * any kernel state for granted.
935 * We don't do kernel preemption checks here, because only
936 * NMI should be common and it does not enable IRQs and
937 * cannot get reschedule ticks.
939 /* ebx: no swapgs flag */
941 testl %ebx,%ebx /* swapgs needed? */
944 jnz paranoid_userspace
951 GET_THREAD_INFO(%rcx)
952 movl threadinfo_flags(%rcx),%ebx
953 andl $_TIF_WORK_MASK,%ebx
955 movq %rsp,%rdi /* &pt_regs */
957 movq %rax,%rsp /* switch stack for scheduling */
958 testl $_TIF_NEED_RESCHED,%ebx
959 jnz paranoid_schedule
960 movl %ebx,%edx /* arg3: thread flags */
962 xorl %esi,%esi /* arg2: oldset */
963 movq %rsp,%rdi /* arg1: &pt_regs */
964 call do_notify_resume
966 jmp paranoid_userspace
971 jmp paranoid_userspace
979 CFI_ADJUST_CFA_OFFSET 8
980 paranoidentry do_int3, DEBUG_STACK
987 zeroentry do_overflow
995 zeroentry do_invalid_op
998 ENTRY(coprocessor_segment_overrun)
999 zeroentry do_coprocessor_segment_overrun
1000 END(coprocessor_segment_overrun)
1003 zeroentry do_reserved
1006 /* runs on exception stack */
1009 paranoidentry do_double_fault
1015 errorentry do_invalid_TSS
1018 ENTRY(segment_not_present)
1019 errorentry do_segment_not_present
1020 END(segment_not_present)
1022 /* runs on exception stack */
1023 ENTRY(stack_segment)
1025 paranoidentry do_stack_segment
1030 KPROBE_ENTRY(general_protection)
1031 errorentry do_general_protection
1032 END(general_protection)
1035 ENTRY(alignment_check)
1036 errorentry do_alignment_check
1037 END(alignment_check)
1040 zeroentry do_divide_error
1043 ENTRY(spurious_interrupt_bug)
1044 zeroentry do_spurious_interrupt_bug
1045 END(spurious_interrupt_bug)
1047 #ifdef CONFIG_X86_MCE
1048 /* runs on exception stack */
1049 ENTRY(machine_check)
1052 CFI_ADJUST_CFA_OFFSET 8
1053 paranoidentry do_machine_check
1061 movq %gs:pda_irqstackptr,%rax
1063 CFI_DEF_CFA_REGISTER rdx
1064 incl %gs:pda_irqcount
1067 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1070 CFI_DEF_CFA_REGISTER rsp
1071 decl %gs:pda_irqcount
1074 ENDPROC(call_softirq)
1076 #ifdef CONFIG_STACK_UNWIND
1077 ENTRY(arch_unwind_init_running)
1079 movq %r15, R15(%rdi)
1080 movq %r14, R14(%rdi)
1082 movq %r13, R13(%rdi)
1083 movq %r12, R12(%rdi)
1085 movq %rbp, RBP(%rdi)
1086 movq %rbx, RBX(%rdi)
1088 movq %rax, R11(%rdi)
1089 movq %rax, R10(%rdi)
1092 movq %rax, RAX(%rdi)
1093 movq %rax, RCX(%rdi)
1094 movq %rax, RDX(%rdi)
1095 movq %rax, RSI(%rdi)
1096 movq %rax, RDI(%rdi)
1097 movq %rax, ORIG_RAX(%rdi)
1098 movq %rcx, RIP(%rdi)
1100 movq $__KERNEL_CS, CS(%rdi)
1101 movq %rax, EFLAGS(%rdi)
1102 movq %rcx, RSP(%rdi)
1103 movq $__KERNEL_DS, SS(%rdi)
1106 ENDPROC(arch_unwind_init_running)