2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
12 * entry.S contains the system-call and fault low-level handling routines.
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers upto R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
27 * - schedule it carefully for the final hardware.
31 #include <linux/linkage.h>
32 #include <asm/segment.h>
34 #include <asm/cache.h>
35 #include <asm/errno.h>
36 #include <asm/dwarf2.h>
37 #include <asm/calling.h>
38 #include <asm/asm-offsets.h>
40 #include <asm/unistd.h>
41 #include <asm/thread_info.h>
42 #include <asm/hw_irq.h>
44 #include <asm/irqflags.h>
48 #ifndef CONFIG_PREEMPT
49 #define retint_kernel retint_restore_args
53 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
54 #ifdef CONFIG_TRACE_IRQFLAGS
55 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
63 * C code is not supposed to know about undefined top of stack. Every time
64 * a C function with an pt_regs argument is called from the SYSCALL based
65 * fast path FIXUP_TOP_OF_STACK is needed.
66 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
70 /* %rsp:at FRAMEEND */
71 .macro FIXUP_TOP_OF_STACK tmp
72 movq %gs:pda_oldrsp,\tmp
74 movq $__USER_DS,SS(%rsp)
75 movq $__USER_CS,CS(%rsp)
77 movq R11(%rsp),\tmp /* get eflags */
78 movq \tmp,EFLAGS(%rsp)
81 .macro RESTORE_TOP_OF_STACK tmp,offset=0
82 movq RSP-\offset(%rsp),\tmp
83 movq \tmp,%gs:pda_oldrsp
84 movq EFLAGS-\offset(%rsp),\tmp
85 movq \tmp,R11-\offset(%rsp)
88 .macro FAKE_STACK_FRAME child_rip
89 /* push in order ss, rsp, eflags, cs, rip */
92 CFI_ADJUST_CFA_OFFSET 8
93 /*CFI_REL_OFFSET ss,0*/
95 CFI_ADJUST_CFA_OFFSET 8
97 pushq $(1<<9) /* eflags - interrupts on */
98 CFI_ADJUST_CFA_OFFSET 8
99 /*CFI_REL_OFFSET rflags,0*/
100 pushq $__KERNEL_CS /* cs */
101 CFI_ADJUST_CFA_OFFSET 8
102 /*CFI_REL_OFFSET cs,0*/
103 pushq \child_rip /* rip */
104 CFI_ADJUST_CFA_OFFSET 8
106 pushq %rax /* orig rax */
107 CFI_ADJUST_CFA_OFFSET 8
110 .macro UNFAKE_STACK_FRAME
112 CFI_ADJUST_CFA_OFFSET -(6*8)
115 .macro CFI_DEFAULT_STACK start=1
120 CFI_DEF_CFA_OFFSET SS+8
122 CFI_REL_OFFSET r15,R15
123 CFI_REL_OFFSET r14,R14
124 CFI_REL_OFFSET r13,R13
125 CFI_REL_OFFSET r12,R12
126 CFI_REL_OFFSET rbp,RBP
127 CFI_REL_OFFSET rbx,RBX
128 CFI_REL_OFFSET r11,R11
129 CFI_REL_OFFSET r10,R10
132 CFI_REL_OFFSET rax,RAX
133 CFI_REL_OFFSET rcx,RCX
134 CFI_REL_OFFSET rdx,RDX
135 CFI_REL_OFFSET rsi,RSI
136 CFI_REL_OFFSET rdi,RDI
137 CFI_REL_OFFSET rip,RIP
138 /*CFI_REL_OFFSET cs,CS*/
139 /*CFI_REL_OFFSET rflags,EFLAGS*/
140 CFI_REL_OFFSET rsp,RSP
141 /*CFI_REL_OFFSET ss,SS*/
144 * A newly forked process directly context switches into this.
150 GET_THREAD_INFO(%rcx)
151 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
155 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
156 je int_ret_from_sys_call
157 testl $_TIF_IA32,threadinfo_flags(%rcx)
158 jnz int_ret_from_sys_call
159 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
160 jmp ret_from_sys_call
163 call syscall_trace_leave
164 GET_THREAD_INFO(%rcx)
170 * System call entry. Upto 6 arguments in registers are supported.
172 * SYSCALL does not save anything on the stack and does not change the
178 * rax system call number
180 * rcx return address for syscall/sysret, C arg3
183 * r10 arg3 (--> moved to rcx for C)
186 * r11 eflags for syscall/sysret, temporary for C
187 * r12-r15,rbp,rbx saved by C code, not touched.
189 * Interrupts are off on entry.
190 * Only called from user space.
192 * XXX if we had a free scratch register we could save the RSP into the stack frame
193 * and report it properly in ps. Unfortunately we haven't.
195 * When user can change the frames always force IRET. That is because
196 * it deals with uncanonical addresses better. SYSRET has trouble
197 * with them due to bugs in both AMD and Intel CPUs.
202 CFI_DEF_CFA rsp,PDA_STACKOFFSET
204 /*CFI_REGISTER rflags,r11*/
206 movq %rsp,%gs:pda_oldrsp
207 movq %gs:pda_kernelstack,%rsp
209 * No need to follow this irqs off/on section - it's straight
214 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
215 movq %rcx,RIP-ARGOFFSET(%rsp)
216 CFI_REL_OFFSET rip,RIP-ARGOFFSET
217 GET_THREAD_INFO(%rcx)
218 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
221 cmpq $__NR_syscall_max,%rax
224 call *sys_call_table(,%rax,8) # XXX: rip relative
225 movq %rax,RAX-ARGOFFSET(%rsp)
227 * Syscall return path ending with SYSRET (fast path)
228 * Has incomplete stack frame and undefined top of stack.
230 .globl ret_from_sys_call
232 movl $_TIF_ALLWORK_MASK,%edi
235 GET_THREAD_INFO(%rcx)
238 movl threadinfo_flags(%rcx),%edx
243 * sysretq will re-enable interrupts:
246 movq RIP-ARGOFFSET(%rsp),%rcx
248 RESTORE_ARGS 0,-ARG_SKIP,1
249 /*CFI_REGISTER rflags,r11*/
250 movq %gs:pda_oldrsp,%rsp
254 /* Handle reschedules */
255 /* edx: work, edi: workmask */
258 bt $TIF_NEED_RESCHED,%edx
263 CFI_ADJUST_CFA_OFFSET 8
266 CFI_ADJUST_CFA_OFFSET -8
269 /* Handle a signal */
273 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
276 /* Really a signal */
277 /* edx: work flags (arg3) */
278 leaq do_notify_resume(%rip),%rax
279 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
280 xorl %esi,%esi # oldset -> arg2
281 call ptregscall_common
282 1: movl $_TIF_NEED_RESCHED,%edi
283 /* Use IRET because user could have changed frame. This
284 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
290 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
291 jmp ret_from_sys_call
293 /* Do syscall tracing */
297 movq $-ENOSYS,RAX(%rsp)
298 FIXUP_TOP_OF_STACK %rdi
300 call syscall_trace_enter
301 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
303 cmpq $__NR_syscall_max,%rax
305 movq %r10,%rcx /* fixup for C */
306 call *sys_call_table(,%rax,8)
307 1: movq %rax,RAX-ARGOFFSET(%rsp)
308 /* Use IRET because user could have changed frame */
309 jmp int_ret_from_sys_call
314 * Syscall return path ending with IRET.
315 * Has correct top of stack, but partial stack frame.
317 ENTRY(int_ret_from_sys_call)
319 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
320 /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
321 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
322 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
323 /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
324 CFI_REL_OFFSET rip,RIP-ARGOFFSET
325 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
326 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
327 CFI_REL_OFFSET rax,RAX-ARGOFFSET
328 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
329 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
330 CFI_REL_OFFSET r8,R8-ARGOFFSET
331 CFI_REL_OFFSET r9,R9-ARGOFFSET
332 CFI_REL_OFFSET r10,R10-ARGOFFSET
333 CFI_REL_OFFSET r11,R11-ARGOFFSET
336 testl $3,CS-ARGOFFSET(%rsp)
337 je retint_restore_args
338 movl $_TIF_ALLWORK_MASK,%edi
339 /* edi: mask to check */
341 GET_THREAD_INFO(%rcx)
342 movl threadinfo_flags(%rcx),%edx
345 andl $~TS_COMPAT,threadinfo_status(%rcx)
348 /* Either reschedule or signal or syscall exit tracking needed. */
349 /* First do a reschedule test. */
350 /* edx: work, edi: workmask */
352 bt $TIF_NEED_RESCHED,%edx
357 CFI_ADJUST_CFA_OFFSET 8
360 CFI_ADJUST_CFA_OFFSET -8
365 /* handle signals and tracing -- both require a full stack frame */
370 /* Check for syscall exit trace */
371 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
374 CFI_ADJUST_CFA_OFFSET 8
375 leaq 8(%rsp),%rdi # &ptregs -> arg1
376 call syscall_trace_leave
378 CFI_ADJUST_CFA_OFFSET -8
379 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
385 testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
387 movq %rsp,%rdi # &ptregs -> arg1
388 xorl %esi,%esi # oldset -> arg2
389 call do_notify_resume
390 1: movl $_TIF_NEED_RESCHED,%edi
397 END(int_ret_from_sys_call)
400 * Certain special system calls that need to save a complete full stack frame.
403 .macro PTREGSCALL label,func,arg
406 leaq \func(%rip),%rax
407 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
408 jmp ptregscall_common
414 PTREGSCALL stub_clone, sys_clone, %r8
415 PTREGSCALL stub_fork, sys_fork, %rdi
416 PTREGSCALL stub_vfork, sys_vfork, %rdi
417 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
418 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
419 PTREGSCALL stub_iopl, sys_iopl, %rsi
421 ENTRY(ptregscall_common)
423 CFI_ADJUST_CFA_OFFSET -8
424 CFI_REGISTER rip, r11
427 CFI_REGISTER rip, r15
428 FIXUP_TOP_OF_STACK %r11
430 RESTORE_TOP_OF_STACK %r11
432 CFI_REGISTER rip, r11
435 CFI_ADJUST_CFA_OFFSET 8
436 CFI_REL_OFFSET rip, 0
439 END(ptregscall_common)
444 CFI_ADJUST_CFA_OFFSET -8
445 CFI_REGISTER rip, r11
447 FIXUP_TOP_OF_STACK %r11
449 RESTORE_TOP_OF_STACK %r11
452 jmp int_ret_from_sys_call
457 * sigreturn is special because it needs to restore all registers on return.
458 * This cannot be done with SYSRET, so use the IRET return path instead.
460 ENTRY(stub_rt_sigreturn)
463 CFI_ADJUST_CFA_OFFSET -8
466 FIXUP_TOP_OF_STACK %r11
467 call sys_rt_sigreturn
468 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
470 jmp int_ret_from_sys_call
472 END(stub_rt_sigreturn)
475 * initial frame state for interrupts and exceptions
479 CFI_DEF_CFA rsp,SS+8-\ref
480 /*CFI_REL_OFFSET ss,SS-\ref*/
481 CFI_REL_OFFSET rsp,RSP-\ref
482 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
483 /*CFI_REL_OFFSET cs,CS-\ref*/
484 CFI_REL_OFFSET rip,RIP-\ref
487 /* initial frame state for interrupts (and exceptions without error code) */
488 #define INTR_FRAME _frame RIP
489 /* initial frame state for exceptions with error code (and interrupts with
490 vector already pushed) */
491 #define XCPT_FRAME _frame ORIG_RAX
494 * Interrupt entry/exit.
496 * Interrupt entry points save only callee clobbered registers in fast path.
498 * Entry runs with interrupts off.
501 /* 0(%rsp): interrupt number */
502 .macro interrupt func
505 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
507 CFI_ADJUST_CFA_OFFSET 8
508 CFI_REL_OFFSET rbp, 0
510 CFI_DEF_CFA_REGISTER rbp
514 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
515 cmoveq %gs:pda_irqstackptr,%rsp
517 * We entered an interrupt context - irqs are off:
523 ENTRY(common_interrupt)
526 /* 0(%rsp): oldrsp-ARGOFFSET */
530 decl %gs:pda_irqcount
532 CFI_DEF_CFA_REGISTER rsp
533 CFI_ADJUST_CFA_OFFSET -8
535 GET_THREAD_INFO(%rcx)
536 testl $3,CS-ARGOFFSET(%rsp)
539 /* Interrupt came from user space */
541 * Has a correct top of stack, but a partial stack frame
542 * %rcx: thread info. Interrupts off.
544 retint_with_reschedule:
545 movl $_TIF_WORK_MASK,%edi
547 movl threadinfo_flags(%rcx),%edx
553 * The iretq could re-enable interrupts:
563 * The iretq could re-enable interrupts:
571 .section __ex_table,"a"
572 .quad iret_label,bad_iret
575 /* force a signal here? this matches i386 behaviour */
576 /* running with kernel gs */
578 movq $11,%rdi /* SIGSEGV */
584 /* edi: workmask, edx: work */
587 bt $TIF_NEED_RESCHED,%edx
592 CFI_ADJUST_CFA_OFFSET 8
595 CFI_ADJUST_CFA_OFFSET -8
596 GET_THREAD_INFO(%rcx)
602 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
607 movq $-1,ORIG_RAX(%rsp)
608 xorl %esi,%esi # oldset
609 movq %rsp,%rdi # &pt_regs
610 call do_notify_resume
614 movl $_TIF_NEED_RESCHED,%edi
615 GET_THREAD_INFO(%rcx)
618 #ifdef CONFIG_PREEMPT
619 /* Returning to kernel space. Check if we need preemption */
620 /* rcx: threadinfo. interrupts off. */
623 cmpl $0,threadinfo_preempt_count(%rcx)
624 jnz retint_restore_args
625 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
626 jnc retint_restore_args
627 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
628 jnc retint_restore_args
629 call preempt_schedule_irq
634 END(common_interrupt)
639 .macro apicinterrupt num,func
642 CFI_ADJUST_CFA_OFFSET 8
648 ENTRY(thermal_interrupt)
649 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
650 END(thermal_interrupt)
652 ENTRY(threshold_interrupt)
653 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
654 END(threshold_interrupt)
657 ENTRY(reschedule_interrupt)
658 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
659 END(reschedule_interrupt)
661 .macro INVALIDATE_ENTRY num
662 ENTRY(invalidate_interrupt\num)
663 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
664 END(invalidate_interrupt\num)
676 ENTRY(call_function_interrupt)
677 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
678 END(call_function_interrupt)
681 #ifdef CONFIG_X86_LOCAL_APIC
682 ENTRY(apic_timer_interrupt)
683 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
684 END(apic_timer_interrupt)
686 ENTRY(error_interrupt)
687 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
690 ENTRY(spurious_interrupt)
691 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
692 END(spurious_interrupt)
696 * Exception entry points.
700 pushq $0 /* push error code/oldrax */
701 CFI_ADJUST_CFA_OFFSET 8
702 pushq %rax /* push real oldrax to the rdi slot */
703 CFI_ADJUST_CFA_OFFSET 8
709 .macro errorentry sym
712 CFI_ADJUST_CFA_OFFSET 8
718 /* error code is on the stack already */
719 /* handle NMI like exceptions that can happen everywhere */
720 .macro paranoidentry sym, ist=0, irqtrace=1
724 movl $MSR_GS_BASE,%ecx
732 movq %gs:pda_data_offset, %rbp
735 movq ORIG_RAX(%rsp),%rsi
736 movq $-1,ORIG_RAX(%rsp)
738 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
742 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
751 * "Paranoid" exit path from exception stack.
752 * Paranoid because this is used by NMIs and cannot take
753 * any kernel state for granted.
754 * We don't do kernel preemption checks here, because only
755 * NMI should be common and it does not enable IRQs and
756 * cannot get reschedule ticks.
758 * "trace" is 0 for the NMI handler only, because irq-tracing
759 * is fundamentally NMI-unsafe. (we cannot change the soft and
760 * hard flags at once, atomically)
762 .macro paranoidexit trace=1
763 /* ebx: no swapgs flag */
765 testl %ebx,%ebx /* swapgs needed? */
766 jnz paranoid_restore\trace
768 jnz paranoid_userspace\trace
769 paranoid_swapgs\trace:
772 paranoid_restore\trace:
775 paranoid_userspace\trace:
776 GET_THREAD_INFO(%rcx)
777 movl threadinfo_flags(%rcx),%ebx
778 andl $_TIF_WORK_MASK,%ebx
779 jz paranoid_swapgs\trace
780 movq %rsp,%rdi /* &pt_regs */
782 movq %rax,%rsp /* switch stack for scheduling */
783 testl $_TIF_NEED_RESCHED,%ebx
784 jnz paranoid_schedule\trace
785 movl %ebx,%edx /* arg3: thread flags */
790 xorl %esi,%esi /* arg2: oldset */
791 movq %rsp,%rdi /* arg1: &pt_regs */
792 call do_notify_resume
797 jmp paranoid_userspace\trace
798 paranoid_schedule\trace:
808 jmp paranoid_userspace\trace
813 * Exception entry point. This expects an error code/orig_rax on the stack
814 * and the exception handler in %rax.
818 /* rdi slot contains rax, oldrax contains error code */
821 CFI_ADJUST_CFA_OFFSET (14*8)
823 CFI_REL_OFFSET rsi,RSI
824 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
826 CFI_REL_OFFSET rdx,RDX
828 CFI_REL_OFFSET rcx,RCX
829 movq %rsi,10*8(%rsp) /* store rax */
830 CFI_REL_OFFSET rax,RAX
836 CFI_REL_OFFSET r10,R10
838 CFI_REL_OFFSET r11,R11
840 CFI_REL_OFFSET rbx,RBX
842 CFI_REL_OFFSET rbp,RBP
844 CFI_REL_OFFSET r12,R12
846 CFI_REL_OFFSET r13,R13
848 CFI_REL_OFFSET r14,R14
850 CFI_REL_OFFSET r15,R15
859 movq ORIG_RAX(%rsp),%rsi /* get error code */
860 movq $-1,ORIG_RAX(%rsp)
862 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
868 GET_THREAD_INFO(%rcx)
871 movl threadinfo_flags(%rcx),%edx
872 movl $_TIF_WORK_MASK,%edi
876 * The iret might restore flags:
886 /* There are two places in the kernel that can potentially fault with
887 usergs. Handle them here. The exception handlers after
888 iret run with kernel gs again, so don't set the user space flag.
889 B stepping K8s sometimes report an truncated RIP for IRET
890 exceptions returning to compat mode. Check for these here too. */
891 leaq iret_label(%rip),%rbp
894 movl %ebp,%ebp /* zero extend */
897 cmpq $gs_change,RIP(%rsp)
902 /* Reload gs selector with exception handling */
903 /* edi: new selector */
907 CFI_ADJUST_CFA_OFFSET 8
912 2: mfence /* workaround */
915 CFI_ADJUST_CFA_OFFSET -8
918 ENDPROC(load_gs_index)
920 .section __ex_table,"a"
922 .quad gs_change,bad_gs
925 /* running with kernelgs */
927 swapgs /* switch back to user gs */
934 * Create a kernel thread.
936 * C extern interface:
937 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
939 * asm input arguments:
940 * rdi: fn, rsi: arg, rdx: flags
944 FAKE_STACK_FRAME $child_rip
947 # rdi: flags, rsi: usp, rdx: will be &pt_regs
949 orq kernel_thread_flags(%rip),%rdi
962 * It isn't worth to check for reschedule here,
963 * so internally to the x86_64 port you can rely on kernel_thread()
964 * not to reschedule the child before returning, this avoids the need
965 * of hacks for example to fork off the per-CPU idle tasks.
966 * [Hopefully no generic code relies on the reschedule -AK]
972 ENDPROC(kernel_thread)
976 * Here we are in the child and the registers are set as they were
977 * at kernel_thread() invocation in the parent.
988 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
990 * C extern interface:
991 * extern long execve(char *name, char **argv, char **envp)
993 * asm input arguments:
994 * rdi: name, rsi: argv, rdx: envp
996 * We want to fallback into:
997 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
999 * do_sys_execve asm fallback arguments:
1000 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1007 movq %rax, RAX(%rsp)
1010 je int_ret_from_sys_call
1017 KPROBE_ENTRY(page_fault)
1018 errorentry do_page_fault
1022 ENTRY(coprocessor_error)
1023 zeroentry do_coprocessor_error
1024 END(coprocessor_error)
1026 ENTRY(simd_coprocessor_error)
1027 zeroentry do_simd_coprocessor_error
1028 END(simd_coprocessor_error)
1030 ENTRY(device_not_available)
1031 zeroentry math_state_restore
1032 END(device_not_available)
1034 /* runs on exception stack */
1038 CFI_ADJUST_CFA_OFFSET 8
1039 paranoidentry do_debug, DEBUG_STACK
1044 /* runs on exception stack */
1048 CFI_ADJUST_CFA_OFFSET 8
1049 paranoidentry do_nmi, 0, 0
1050 #ifdef CONFIG_TRACE_IRQFLAGS
1062 CFI_ADJUST_CFA_OFFSET 8
1063 paranoidentry do_int3, DEBUG_STACK
1070 zeroentry do_overflow
1078 zeroentry do_invalid_op
1081 ENTRY(coprocessor_segment_overrun)
1082 zeroentry do_coprocessor_segment_overrun
1083 END(coprocessor_segment_overrun)
1086 zeroentry do_reserved
1089 /* runs on exception stack */
1092 paranoidentry do_double_fault
1098 errorentry do_invalid_TSS
1101 ENTRY(segment_not_present)
1102 errorentry do_segment_not_present
1103 END(segment_not_present)
1105 /* runs on exception stack */
1106 ENTRY(stack_segment)
1108 paranoidentry do_stack_segment
1113 KPROBE_ENTRY(general_protection)
1114 errorentry do_general_protection
1115 END(general_protection)
1118 ENTRY(alignment_check)
1119 errorentry do_alignment_check
1120 END(alignment_check)
1123 zeroentry do_divide_error
1126 ENTRY(spurious_interrupt_bug)
1127 zeroentry do_spurious_interrupt_bug
1128 END(spurious_interrupt_bug)
1130 #ifdef CONFIG_X86_MCE
1131 /* runs on exception stack */
1132 ENTRY(machine_check)
1135 CFI_ADJUST_CFA_OFFSET 8
1136 paranoidentry do_machine_check
1144 movq %gs:pda_irqstackptr,%rax
1146 CFI_DEF_CFA_REGISTER rdx
1147 incl %gs:pda_irqcount
1150 /*todo CFI_DEF_CFA_EXPRESSION ...*/
1153 CFI_DEF_CFA_REGISTER rsp
1154 decl %gs:pda_irqcount
1157 ENDPROC(call_softirq)
1159 #ifdef CONFIG_STACK_UNWIND
1160 ENTRY(arch_unwind_init_running)
1162 movq %r15, R15(%rdi)
1163 movq %r14, R14(%rdi)
1165 movq %r13, R13(%rdi)
1166 movq %r12, R12(%rdi)
1168 movq %rbp, RBP(%rdi)
1169 movq %rbx, RBX(%rdi)
1171 movq %rax, R11(%rdi)
1172 movq %rax, R10(%rdi)
1175 movq %rax, RAX(%rdi)
1176 movq %rax, RCX(%rdi)
1177 movq %rax, RDX(%rdi)
1178 movq %rax, RSI(%rdi)
1179 movq %rax, RDI(%rdi)
1180 movq %rax, ORIG_RAX(%rdi)
1181 movq %rcx, RIP(%rdi)
1183 movq $__KERNEL_CS, CS(%rdi)
1184 movq %rax, EFLAGS(%rdi)
1185 movq %rcx, RSP(%rdi)
1186 movq $__KERNEL_DS, SS(%rdi)
1189 ENDPROC(arch_unwind_init_running)