2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
56 #ifndef CONFIG_PREEMPT
57 #define retint_kernel retint_restore_args
61 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
62 #ifdef CONFIG_TRACE_IRQFLAGS
63 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
71 * C code is not supposed to know about undefined top of stack. Every time
72 * a C function with an pt_regs argument is called from the SYSCALL based
73 * fast path FIXUP_TOP_OF_STACK is needed.
74 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
78 /* %rsp:at FRAMEEND */
79 .macro FIXUP_TOP_OF_STACK tmp
80 movq %gs:pda_oldrsp,\tmp
82 movq $__USER_DS,SS(%rsp)
83 movq $__USER_CS,CS(%rsp)
85 movq R11(%rsp),\tmp /* get eflags */
86 movq \tmp,EFLAGS(%rsp)
89 .macro RESTORE_TOP_OF_STACK tmp,offset=0
90 movq RSP-\offset(%rsp),\tmp
91 movq \tmp,%gs:pda_oldrsp
92 movq EFLAGS-\offset(%rsp),\tmp
93 movq \tmp,R11-\offset(%rsp)
96 .macro FAKE_STACK_FRAME child_rip
97 /* push in order ss, rsp, eflags, cs, rip */
100 CFI_ADJUST_CFA_OFFSET 8
101 /*CFI_REL_OFFSET ss,0*/
103 CFI_ADJUST_CFA_OFFSET 8
105 pushq $(1<<9) /* eflags - interrupts on */
106 CFI_ADJUST_CFA_OFFSET 8
107 /*CFI_REL_OFFSET rflags,0*/
108 pushq $__KERNEL_CS /* cs */
109 CFI_ADJUST_CFA_OFFSET 8
110 /*CFI_REL_OFFSET cs,0*/
111 pushq \child_rip /* rip */
112 CFI_ADJUST_CFA_OFFSET 8
114 pushq %rax /* orig rax */
115 CFI_ADJUST_CFA_OFFSET 8
118 .macro UNFAKE_STACK_FRAME
120 CFI_ADJUST_CFA_OFFSET -(6*8)
123 .macro CFI_DEFAULT_STACK start=1
129 CFI_DEF_CFA_OFFSET SS+8
131 CFI_REL_OFFSET r15,R15
132 CFI_REL_OFFSET r14,R14
133 CFI_REL_OFFSET r13,R13
134 CFI_REL_OFFSET r12,R12
135 CFI_REL_OFFSET rbp,RBP
136 CFI_REL_OFFSET rbx,RBX
137 CFI_REL_OFFSET r11,R11
138 CFI_REL_OFFSET r10,R10
141 CFI_REL_OFFSET rax,RAX
142 CFI_REL_OFFSET rcx,RCX
143 CFI_REL_OFFSET rdx,RDX
144 CFI_REL_OFFSET rsi,RSI
145 CFI_REL_OFFSET rdi,RDI
146 CFI_REL_OFFSET rip,RIP
147 /*CFI_REL_OFFSET cs,CS*/
148 /*CFI_REL_OFFSET rflags,EFLAGS*/
149 CFI_REL_OFFSET rsp,RSP
150 /*CFI_REL_OFFSET ss,SS*/
153 * A newly forked process directly context switches into this.
158 push kernel_eflags(%rip)
159 CFI_ADJUST_CFA_OFFSET 4
160 popf # reset kernel eflags
161 CFI_ADJUST_CFA_OFFSET -4
163 GET_THREAD_INFO(%rcx)
164 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
168 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
169 je int_ret_from_sys_call
170 testl $_TIF_IA32,threadinfo_flags(%rcx)
171 jnz int_ret_from_sys_call
172 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
173 jmp ret_from_sys_call
176 call syscall_trace_leave
177 GET_THREAD_INFO(%rcx)
183 * System call entry. Upto 6 arguments in registers are supported.
185 * SYSCALL does not save anything on the stack and does not change the
191 * rax system call number
193 * rcx return address for syscall/sysret, C arg3
196 * r10 arg3 (--> moved to rcx for C)
199 * r11 eflags for syscall/sysret, temporary for C
200 * r12-r15,rbp,rbx saved by C code, not touched.
202 * Interrupts are off on entry.
203 * Only called from user space.
205 * XXX if we had a free scratch register we could save the RSP into the stack frame
206 * and report it properly in ps. Unfortunately we haven't.
208 * When user can change the frames always force IRET. That is because
209 * it deals with uncanonical addresses better. SYSRET has trouble
210 * with them due to bugs in both AMD and Intel CPUs.
216 CFI_DEF_CFA rsp,PDA_STACKOFFSET
218 /*CFI_REGISTER rflags,r11*/
220 movq %rsp,%gs:pda_oldrsp
221 movq %gs:pda_kernelstack,%rsp
223 * No need to follow this irqs off/on section - it's straight
228 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
229 movq %rcx,RIP-ARGOFFSET(%rsp)
230 CFI_REL_OFFSET rip,RIP-ARGOFFSET
231 GET_THREAD_INFO(%rcx)
232 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
234 cmpq $__NR_syscall_max,%rax
237 call *sys_call_table(,%rax,8) # XXX: rip relative
238 movq %rax,RAX-ARGOFFSET(%rsp)
240 * Syscall return path ending with SYSRET (fast path)
241 * Has incomplete stack frame and undefined top of stack.
244 movl $_TIF_ALLWORK_MASK,%edi
248 GET_THREAD_INFO(%rcx)
251 movl threadinfo_flags(%rcx),%edx
256 * sysretq will re-enable interrupts:
259 movq RIP-ARGOFFSET(%rsp),%rcx
261 RESTORE_ARGS 0,-ARG_SKIP,1
262 /*CFI_REGISTER rflags,r11*/
263 movq %gs:pda_oldrsp,%rsp
268 /* Handle reschedules */
269 /* edx: work, edi: workmask */
271 bt $TIF_NEED_RESCHED,%edx
276 CFI_ADJUST_CFA_OFFSET 8
279 CFI_ADJUST_CFA_OFFSET -8
282 /* Handle a signal */
286 testl $_TIF_DO_NOTIFY_MASK,%edx
289 /* Really a signal */
290 /* edx: work flags (arg3) */
291 leaq do_notify_resume(%rip),%rax
292 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
293 xorl %esi,%esi # oldset -> arg2
294 call ptregscall_common
295 1: movl $_TIF_NEED_RESCHED,%edi
296 /* Use IRET because user could have changed frame. This
297 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
303 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
304 jmp ret_from_sys_call
306 /* Do syscall tracing */
309 movq $-ENOSYS,RAX(%rsp)
310 FIXUP_TOP_OF_STACK %rdi
312 call syscall_trace_enter
313 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
315 cmpq $__NR_syscall_max,%rax
319 movq %r10,%rcx /* fixup for C */
320 call *sys_call_table(,%rax,8)
321 1: movq %rax,RAX-ARGOFFSET(%rsp)
322 /* Use IRET because user could have changed frame */
325 * Syscall return path ending with IRET.
326 * Has correct top of stack, but partial stack frame.
328 .globl int_ret_from_sys_call
329 int_ret_from_sys_call:
332 testl $3,CS-ARGOFFSET(%rsp)
333 je retint_restore_args
334 movl $_TIF_ALLWORK_MASK,%edi
335 /* edi: mask to check */
338 GET_THREAD_INFO(%rcx)
339 movl threadinfo_flags(%rcx),%edx
342 andl $~TS_COMPAT,threadinfo_status(%rcx)
345 /* Either reschedule or signal or syscall exit tracking needed. */
346 /* First do a reschedule test. */
347 /* edx: work, edi: workmask */
349 bt $TIF_NEED_RESCHED,%edx
354 CFI_ADJUST_CFA_OFFSET 8
357 CFI_ADJUST_CFA_OFFSET -8
362 /* handle signals and tracing -- both require a full stack frame */
367 /* Check for syscall exit trace */
368 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
371 CFI_ADJUST_CFA_OFFSET 8
372 leaq 8(%rsp),%rdi # &ptregs -> arg1
373 call syscall_trace_leave
375 CFI_ADJUST_CFA_OFFSET -8
376 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
380 testl $_TIF_DO_NOTIFY_MASK,%edx
382 movq %rsp,%rdi # &ptregs -> arg1
383 xorl %esi,%esi # oldset -> arg2
384 call do_notify_resume
385 1: movl $_TIF_NEED_RESCHED,%edi
395 * Certain special system calls that need to save a complete full stack frame.
398 .macro PTREGSCALL label,func,arg
401 leaq \func(%rip),%rax
402 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
403 jmp ptregscall_common
409 PTREGSCALL stub_clone, sys_clone, %r8
410 PTREGSCALL stub_fork, sys_fork, %rdi
411 PTREGSCALL stub_vfork, sys_vfork, %rdi
412 PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
413 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
414 PTREGSCALL stub_iopl, sys_iopl, %rsi
416 ENTRY(ptregscall_common)
418 CFI_ADJUST_CFA_OFFSET -8
419 CFI_REGISTER rip, r11
422 CFI_REGISTER rip, r15
423 FIXUP_TOP_OF_STACK %r11
425 RESTORE_TOP_OF_STACK %r11
427 CFI_REGISTER rip, r11
430 CFI_ADJUST_CFA_OFFSET 8
431 CFI_REL_OFFSET rip, 0
434 END(ptregscall_common)
439 CFI_ADJUST_CFA_OFFSET -8
440 CFI_REGISTER rip, r11
442 FIXUP_TOP_OF_STACK %r11
444 RESTORE_TOP_OF_STACK %r11
447 jmp int_ret_from_sys_call
452 * sigreturn is special because it needs to restore all registers on return.
453 * This cannot be done with SYSRET, so use the IRET return path instead.
455 ENTRY(stub_rt_sigreturn)
458 CFI_ADJUST_CFA_OFFSET -8
461 FIXUP_TOP_OF_STACK %r11
462 call sys_rt_sigreturn
463 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
465 jmp int_ret_from_sys_call
467 END(stub_rt_sigreturn)
470 * initial frame state for interrupts and exceptions
475 CFI_DEF_CFA rsp,SS+8-\ref
476 /*CFI_REL_OFFSET ss,SS-\ref*/
477 CFI_REL_OFFSET rsp,RSP-\ref
478 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
479 /*CFI_REL_OFFSET cs,CS-\ref*/
480 CFI_REL_OFFSET rip,RIP-\ref
483 /* initial frame state for interrupts (and exceptions without error code) */
484 #define INTR_FRAME _frame RIP
485 /* initial frame state for exceptions with error code (and interrupts with
486 vector already pushed) */
487 #define XCPT_FRAME _frame ORIG_RAX
490 * Interrupt entry/exit.
492 * Interrupt entry points save only callee clobbered registers in fast path.
494 * Entry runs with interrupts off.
497 /* 0(%rsp): interrupt number */
498 .macro interrupt func
501 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
503 CFI_ADJUST_CFA_OFFSET 8
504 CFI_REL_OFFSET rbp, 0
506 CFI_DEF_CFA_REGISTER rbp
510 /* irqcount is used to check if a CPU is already on an interrupt
511 stack or not. While this is essentially redundant with preempt_count
512 it is a little cheaper to use a separate counter in the PDA
513 (short of moving irq_enter into assembly, which would be too
515 1: incl %gs:pda_irqcount
516 cmoveq %gs:pda_irqstackptr,%rsp
517 push %rbp # backlink for old unwinder
519 * We entered an interrupt context - irqs are off:
525 ENTRY(common_interrupt)
528 /* 0(%rsp): oldrsp-ARGOFFSET */
532 decl %gs:pda_irqcount
534 CFI_DEF_CFA_REGISTER rsp
535 CFI_ADJUST_CFA_OFFSET -8
537 GET_THREAD_INFO(%rcx)
538 testl $3,CS-ARGOFFSET(%rsp)
541 /* Interrupt came from user space */
543 * Has a correct top of stack, but a partial stack frame
544 * %rcx: thread info. Interrupts off.
546 retint_with_reschedule:
547 movl $_TIF_WORK_MASK,%edi
550 movl threadinfo_flags(%rcx),%edx
555 retint_swapgs: /* return to user-space */
557 * The iretq could re-enable interrupts:
564 retint_restore_args: /* return to kernel space */
567 * The iretq could re-enable interrupts:
575 .section __ex_table,"a"
576 .quad iret_label,bad_iret
579 /* force a signal here? this matches i386 behaviour */
580 /* running with kernel gs */
582 movq $11,%rdi /* SIGSEGV */
588 /* edi: workmask, edx: work */
591 bt $TIF_NEED_RESCHED,%edx
596 CFI_ADJUST_CFA_OFFSET 8
599 CFI_ADJUST_CFA_OFFSET -8
600 GET_THREAD_INFO(%rcx)
606 testl $_TIF_DO_NOTIFY_MASK,%edx
611 movq $-1,ORIG_RAX(%rsp)
612 xorl %esi,%esi # oldset
613 movq %rsp,%rdi # &pt_regs
614 call do_notify_resume
618 movl $_TIF_NEED_RESCHED,%edi
619 GET_THREAD_INFO(%rcx)
622 #ifdef CONFIG_PREEMPT
623 /* Returning to kernel space. Check if we need preemption */
624 /* rcx: threadinfo. interrupts off. */
626 cmpl $0,threadinfo_preempt_count(%rcx)
627 jnz retint_restore_args
628 bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
629 jnc retint_restore_args
630 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
631 jnc retint_restore_args
632 call preempt_schedule_irq
637 END(common_interrupt)
642 .macro apicinterrupt num,func
645 CFI_ADJUST_CFA_OFFSET 8
651 ENTRY(thermal_interrupt)
652 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
653 END(thermal_interrupt)
655 ENTRY(threshold_interrupt)
656 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
657 END(threshold_interrupt)
660 ENTRY(reschedule_interrupt)
661 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
662 END(reschedule_interrupt)
664 .macro INVALIDATE_ENTRY num
665 ENTRY(invalidate_interrupt\num)
666 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
667 END(invalidate_interrupt\num)
679 ENTRY(call_function_interrupt)
680 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
681 END(call_function_interrupt)
682 ENTRY(irq_move_cleanup_interrupt)
683 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
684 END(irq_move_cleanup_interrupt)
687 ENTRY(apic_timer_interrupt)
688 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
689 END(apic_timer_interrupt)
691 ENTRY(error_interrupt)
692 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
695 ENTRY(spurious_interrupt)
696 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
697 END(spurious_interrupt)
700 * Exception entry points.
704 pushq $0 /* push error code/oldrax */
705 CFI_ADJUST_CFA_OFFSET 8
706 pushq %rax /* push real oldrax to the rdi slot */
707 CFI_ADJUST_CFA_OFFSET 8
714 .macro errorentry sym
717 CFI_ADJUST_CFA_OFFSET 8
724 /* error code is on the stack already */
725 /* handle NMI like exceptions that can happen everywhere */
726 .macro paranoidentry sym, ist=0, irqtrace=1
730 movl $MSR_GS_BASE,%ecx
738 movq %gs:pda_data_offset, %rbp
741 movq ORIG_RAX(%rsp),%rsi
742 movq $-1,ORIG_RAX(%rsp)
744 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
748 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
757 * "Paranoid" exit path from exception stack.
758 * Paranoid because this is used by NMIs and cannot take
759 * any kernel state for granted.
760 * We don't do kernel preemption checks here, because only
761 * NMI should be common and it does not enable IRQs and
762 * cannot get reschedule ticks.
764 * "trace" is 0 for the NMI handler only, because irq-tracing
765 * is fundamentally NMI-unsafe. (we cannot change the soft and
766 * hard flags at once, atomically)
768 .macro paranoidexit trace=1
769 /* ebx: no swapgs flag */
771 testl %ebx,%ebx /* swapgs needed? */
772 jnz paranoid_restore\trace
774 jnz paranoid_userspace\trace
775 paranoid_swapgs\trace:
780 paranoid_restore\trace:
783 paranoid_userspace\trace:
784 GET_THREAD_INFO(%rcx)
785 movl threadinfo_flags(%rcx),%ebx
786 andl $_TIF_WORK_MASK,%ebx
787 jz paranoid_swapgs\trace
788 movq %rsp,%rdi /* &pt_regs */
790 movq %rax,%rsp /* switch stack for scheduling */
791 testl $_TIF_NEED_RESCHED,%ebx
792 jnz paranoid_schedule\trace
793 movl %ebx,%edx /* arg3: thread flags */
798 xorl %esi,%esi /* arg2: oldset */
799 movq %rsp,%rdi /* arg1: &pt_regs */
800 call do_notify_resume
805 jmp paranoid_userspace\trace
806 paranoid_schedule\trace:
816 jmp paranoid_userspace\trace
821 * Exception entry point. This expects an error code/orig_rax on the stack
822 * and the exception handler in %rax.
824 KPROBE_ENTRY(error_entry)
827 /* rdi slot contains rax, oldrax contains error code */
830 CFI_ADJUST_CFA_OFFSET (14*8)
832 CFI_REL_OFFSET rsi,RSI
833 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
836 CFI_REL_OFFSET rdx,RDX
838 CFI_REL_OFFSET rcx,RCX
839 movq %rsi,10*8(%rsp) /* store rax */
840 CFI_REL_OFFSET rax,RAX
846 CFI_REL_OFFSET r10,R10
848 CFI_REL_OFFSET r11,R11
850 CFI_REL_OFFSET rbx,RBX
852 CFI_REL_OFFSET rbp,RBP
854 CFI_REL_OFFSET r12,R12
856 CFI_REL_OFFSET r13,R13
858 CFI_REL_OFFSET r14,R14
860 CFI_REL_OFFSET r15,R15
868 CFI_REL_OFFSET rdi,RDI
870 movq ORIG_RAX(%rsp),%rsi /* get error code */
871 movq $-1,ORIG_RAX(%rsp)
873 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
879 GET_THREAD_INFO(%rcx)
883 movl threadinfo_flags(%rcx),%edx
884 movl $_TIF_WORK_MASK,%edi
892 /* There are two places in the kernel that can potentially fault with
893 usergs. Handle them here. The exception handlers after
894 iret run with kernel gs again, so don't set the user space flag.
895 B stepping K8s sometimes report an truncated RIP for IRET
896 exceptions returning to compat mode. Check for these here too. */
897 leaq iret_label(%rip),%rbp
900 movl %ebp,%ebp /* zero extend */
903 cmpq $gs_change,RIP(%rsp)
906 KPROBE_END(error_entry)
908 /* Reload gs selector with exception handling */
909 /* edi: new selector */
913 CFI_ADJUST_CFA_OFFSET 8
918 2: mfence /* workaround */
921 CFI_ADJUST_CFA_OFFSET -8
924 ENDPROC(load_gs_index)
926 .section __ex_table,"a"
928 .quad gs_change,bad_gs
931 /* running with kernelgs */
933 swapgs /* switch back to user gs */
940 * Create a kernel thread.
942 * C extern interface:
943 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
945 * asm input arguments:
946 * rdi: fn, rsi: arg, rdx: flags
950 FAKE_STACK_FRAME $child_rip
953 # rdi: flags, rsi: usp, rdx: will be &pt_regs
955 orq kernel_thread_flags(%rip),%rdi
968 * It isn't worth to check for reschedule here,
969 * so internally to the x86_64 port you can rely on kernel_thread()
970 * not to reschedule the child before returning, this avoids the need
971 * of hacks for example to fork off the per-CPU idle tasks.
972 * [Hopefully no generic code relies on the reschedule -AK]
978 ENDPROC(kernel_thread)
981 pushq $0 # fake return address
984 * Here we are in the child and the registers are set as they were
985 * at kernel_thread() invocation in the parent.
997 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
999 * C extern interface:
1000 * extern long execve(char *name, char **argv, char **envp)
1002 * asm input arguments:
1003 * rdi: name, rsi: argv, rdx: envp
1005 * We want to fallback into:
1006 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
1008 * do_sys_execve asm fallback arguments:
1009 * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
1011 ENTRY(kernel_execve)
1016 movq %rax, RAX(%rsp)
1019 je int_ret_from_sys_call
1024 ENDPROC(kernel_execve)
1026 KPROBE_ENTRY(page_fault)
1027 errorentry do_page_fault
1028 KPROBE_END(page_fault)
1030 ENTRY(coprocessor_error)
1031 zeroentry do_coprocessor_error
1032 END(coprocessor_error)
1034 ENTRY(simd_coprocessor_error)
1035 zeroentry do_simd_coprocessor_error
1036 END(simd_coprocessor_error)
1038 ENTRY(device_not_available)
1039 zeroentry math_state_restore
1040 END(device_not_available)
1042 /* runs on exception stack */
1046 CFI_ADJUST_CFA_OFFSET 8
1047 paranoidentry do_debug, DEBUG_STACK
1051 /* runs on exception stack */
1055 CFI_ADJUST_CFA_OFFSET 8
1056 paranoidentry do_nmi, 0, 0
1057 #ifdef CONFIG_TRACE_IRQFLAGS
1068 CFI_ADJUST_CFA_OFFSET 8
1069 paranoidentry do_int3, DEBUG_STACK
1075 zeroentry do_overflow
1083 zeroentry do_invalid_op
1086 ENTRY(coprocessor_segment_overrun)
1087 zeroentry do_coprocessor_segment_overrun
1088 END(coprocessor_segment_overrun)
1091 zeroentry do_reserved
1094 /* runs on exception stack */
1097 paranoidentry do_double_fault
1103 errorentry do_invalid_TSS
1106 ENTRY(segment_not_present)
1107 errorentry do_segment_not_present
1108 END(segment_not_present)
1110 /* runs on exception stack */
1111 ENTRY(stack_segment)
1113 paranoidentry do_stack_segment
1118 KPROBE_ENTRY(general_protection)
1119 errorentry do_general_protection
1120 KPROBE_END(general_protection)
1122 ENTRY(alignment_check)
1123 errorentry do_alignment_check
1124 END(alignment_check)
1127 zeroentry do_divide_error
1130 ENTRY(spurious_interrupt_bug)
1131 zeroentry do_spurious_interrupt_bug
1132 END(spurious_interrupt_bug)
1134 #ifdef CONFIG_X86_MCE
1135 /* runs on exception stack */
1136 ENTRY(machine_check)
1139 CFI_ADJUST_CFA_OFFSET 8
1140 paranoidentry do_machine_check
1146 /* Call softirq on interrupt stack. Interrupts are off. */
1150 CFI_ADJUST_CFA_OFFSET 8
1151 CFI_REL_OFFSET rbp,0
1153 CFI_DEF_CFA_REGISTER rbp
1154 incl %gs:pda_irqcount
1155 cmove %gs:pda_irqstackptr,%rsp
1156 push %rbp # backlink for old unwinder
1159 CFI_DEF_CFA_REGISTER rsp
1160 CFI_ADJUST_CFA_OFFSET -8
1161 decl %gs:pda_irqcount
1164 ENDPROC(call_softirq)
1166 KPROBE_ENTRY(ignore_sysret)
1171 ENDPROC(ignore_sysret)