2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
59 #ifdef CONFIG_DYNAMIC_FTRACE
72 subq $MCOUNT_INSN_SIZE, %rdi
92 /* taken from glibc */
102 movq 0x38(%rsp), %rdi
104 subq $MCOUNT_INSN_SIZE, %rdi
124 #else /* ! CONFIG_DYNAMIC_FTRACE */
126 cmpq $ftrace_stub, ftrace_trace_function
133 /* taken from glibc */
143 movq 0x38(%rsp), %rdi
145 subq $MCOUNT_INSN_SIZE, %rdi
147 call *ftrace_trace_function
160 #endif /* CONFIG_DYNAMIC_FTRACE */
161 #endif /* CONFIG_FTRACE */
163 #ifndef CONFIG_PREEMPT
164 #define retint_kernel retint_restore_args
167 #ifdef CONFIG_PARAVIRT
168 ENTRY(native_usergs_sysret64)
171 #endif /* CONFIG_PARAVIRT */
174 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
175 #ifdef CONFIG_TRACE_IRQFLAGS
176 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
184 * C code is not supposed to know about undefined top of stack. Every time
185 * a C function with an pt_regs argument is called from the SYSCALL based
186 * fast path FIXUP_TOP_OF_STACK is needed.
187 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
191 /* %rsp:at FRAMEEND */
192 .macro FIXUP_TOP_OF_STACK tmp
193 movq %gs:pda_oldrsp,\tmp
195 movq $__USER_DS,SS(%rsp)
196 movq $__USER_CS,CS(%rsp)
198 movq R11(%rsp),\tmp /* get eflags */
199 movq \tmp,EFLAGS(%rsp)
202 .macro RESTORE_TOP_OF_STACK tmp,offset=0
203 movq RSP-\offset(%rsp),\tmp
204 movq \tmp,%gs:pda_oldrsp
205 movq EFLAGS-\offset(%rsp),\tmp
206 movq \tmp,R11-\offset(%rsp)
209 .macro FAKE_STACK_FRAME child_rip
210 /* push in order ss, rsp, eflags, cs, rip */
212 pushq $__KERNEL_DS /* ss */
213 CFI_ADJUST_CFA_OFFSET 8
214 /*CFI_REL_OFFSET ss,0*/
216 CFI_ADJUST_CFA_OFFSET 8
218 pushq $(1<<9) /* eflags - interrupts on */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET rflags,0*/
221 pushq $__KERNEL_CS /* cs */
222 CFI_ADJUST_CFA_OFFSET 8
223 /*CFI_REL_OFFSET cs,0*/
224 pushq \child_rip /* rip */
225 CFI_ADJUST_CFA_OFFSET 8
227 pushq %rax /* orig rax */
228 CFI_ADJUST_CFA_OFFSET 8
231 .macro UNFAKE_STACK_FRAME
233 CFI_ADJUST_CFA_OFFSET -(6*8)
236 .macro CFI_DEFAULT_STACK start=1
242 CFI_DEF_CFA_OFFSET SS+8
244 CFI_REL_OFFSET r15,R15
245 CFI_REL_OFFSET r14,R14
246 CFI_REL_OFFSET r13,R13
247 CFI_REL_OFFSET r12,R12
248 CFI_REL_OFFSET rbp,RBP
249 CFI_REL_OFFSET rbx,RBX
250 CFI_REL_OFFSET r11,R11
251 CFI_REL_OFFSET r10,R10
254 CFI_REL_OFFSET rax,RAX
255 CFI_REL_OFFSET rcx,RCX
256 CFI_REL_OFFSET rdx,RDX
257 CFI_REL_OFFSET rsi,RSI
258 CFI_REL_OFFSET rdi,RDI
259 CFI_REL_OFFSET rip,RIP
260 /*CFI_REL_OFFSET cs,CS*/
261 /*CFI_REL_OFFSET rflags,EFLAGS*/
262 CFI_REL_OFFSET rsp,RSP
263 /*CFI_REL_OFFSET ss,SS*/
266 * A newly forked process directly context switches into this.
271 push kernel_eflags(%rip)
272 CFI_ADJUST_CFA_OFFSET 4
273 popf # reset kernel eflags
274 CFI_ADJUST_CFA_OFFSET -4
276 GET_THREAD_INFO(%rcx)
277 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
281 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
282 je int_ret_from_sys_call
283 testl $_TIF_IA32,TI_flags(%rcx)
284 jnz int_ret_from_sys_call
285 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
286 jmp ret_from_sys_call
289 call syscall_trace_leave
290 GET_THREAD_INFO(%rcx)
296 * System call entry. Upto 6 arguments in registers are supported.
298 * SYSCALL does not save anything on the stack and does not change the
304 * rax system call number
306 * rcx return address for syscall/sysret, C arg3
309 * r10 arg3 (--> moved to rcx for C)
312 * r11 eflags for syscall/sysret, temporary for C
313 * r12-r15,rbp,rbx saved by C code, not touched.
315 * Interrupts are off on entry.
316 * Only called from user space.
318 * XXX if we had a free scratch register we could save the RSP into the stack frame
319 * and report it properly in ps. Unfortunately we haven't.
321 * When user can change the frames always force IRET. That is because
322 * it deals with uncanonical addresses better. SYSRET has trouble
323 * with them due to bugs in both AMD and Intel CPUs.
329 CFI_DEF_CFA rsp,PDA_STACKOFFSET
331 /*CFI_REGISTER rflags,r11*/
334 * A hypervisor implementation might want to use a label
335 * after the swapgs, so that it can do the swapgs
336 * for the guest and jump here on syscall.
338 ENTRY(system_call_after_swapgs)
340 movq %rsp,%gs:pda_oldrsp
341 movq %gs:pda_kernelstack,%rsp
343 * No need to follow this irqs off/on section - it's straight
346 ENABLE_INTERRUPTS(CLBR_NONE)
348 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
349 movq %rcx,RIP-ARGOFFSET(%rsp)
350 CFI_REL_OFFSET rip,RIP-ARGOFFSET
351 GET_THREAD_INFO(%rcx)
352 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
355 cmpq $__NR_syscall_max,%rax
358 call *sys_call_table(,%rax,8) # XXX: rip relative
359 movq %rax,RAX-ARGOFFSET(%rsp)
361 * Syscall return path ending with SYSRET (fast path)
362 * Has incomplete stack frame and undefined top of stack.
365 movl $_TIF_ALLWORK_MASK,%edi
369 GET_THREAD_INFO(%rcx)
370 DISABLE_INTERRUPTS(CLBR_NONE)
372 movl TI_flags(%rcx),%edx
377 * sysretq will re-enable interrupts:
380 movq RIP-ARGOFFSET(%rsp),%rcx
382 RESTORE_ARGS 0,-ARG_SKIP,1
383 /*CFI_REGISTER rflags,r11*/
384 movq %gs:pda_oldrsp, %rsp
388 /* Handle reschedules */
389 /* edx: work, edi: workmask */
391 bt $TIF_NEED_RESCHED,%edx
394 ENABLE_INTERRUPTS(CLBR_NONE)
396 CFI_ADJUST_CFA_OFFSET 8
399 CFI_ADJUST_CFA_OFFSET -8
402 /* Handle a signal */
405 ENABLE_INTERRUPTS(CLBR_NONE)
406 testl $_TIF_DO_NOTIFY_MASK,%edx
409 /* Really a signal */
410 /* edx: work flags (arg3) */
411 leaq do_notify_resume(%rip),%rax
412 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
413 xorl %esi,%esi # oldset -> arg2
414 call ptregscall_common
415 1: movl $_TIF_WORK_MASK,%edi
416 /* Use IRET because user could have changed frame. This
417 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
418 DISABLE_INTERRUPTS(CLBR_NONE)
423 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
424 jmp ret_from_sys_call
426 /* Do syscall tracing */
429 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
430 FIXUP_TOP_OF_STACK %rdi
432 call syscall_trace_enter
433 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
435 cmpq $__NR_syscall_max,%rax
436 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
437 movq %r10,%rcx /* fixup for C */
438 call *sys_call_table(,%rax,8)
439 movq %rax,RAX-ARGOFFSET(%rsp)
440 /* Use IRET because user could have changed frame */
443 * Syscall return path ending with IRET.
444 * Has correct top of stack, but partial stack frame.
446 .globl int_ret_from_sys_call
447 int_ret_from_sys_call:
448 DISABLE_INTERRUPTS(CLBR_NONE)
450 testl $3,CS-ARGOFFSET(%rsp)
451 je retint_restore_args
452 movl $_TIF_ALLWORK_MASK,%edi
453 /* edi: mask to check */
456 GET_THREAD_INFO(%rcx)
457 movl TI_flags(%rcx),%edx
460 andl $~TS_COMPAT,TI_status(%rcx)
463 /* Either reschedule or signal or syscall exit tracking needed. */
464 /* First do a reschedule test. */
465 /* edx: work, edi: workmask */
467 bt $TIF_NEED_RESCHED,%edx
470 ENABLE_INTERRUPTS(CLBR_NONE)
472 CFI_ADJUST_CFA_OFFSET 8
475 CFI_ADJUST_CFA_OFFSET -8
476 DISABLE_INTERRUPTS(CLBR_NONE)
480 /* handle signals and tracing -- both require a full stack frame */
483 ENABLE_INTERRUPTS(CLBR_NONE)
485 /* Check for syscall exit trace */
486 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
489 CFI_ADJUST_CFA_OFFSET 8
490 leaq 8(%rsp),%rdi # &ptregs -> arg1
491 call syscall_trace_leave
493 CFI_ADJUST_CFA_OFFSET -8
494 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
498 testl $_TIF_DO_NOTIFY_MASK,%edx
500 movq %rsp,%rdi # &ptregs -> arg1
501 xorl %esi,%esi # oldset -> arg2
502 call do_notify_resume
503 1: movl $_TIF_WORK_MASK,%edi
506 DISABLE_INTERRUPTS(CLBR_NONE)
513 * Certain special system calls that need to save a complete full stack frame.
516 .macro PTREGSCALL label,func,arg
519 leaq \func(%rip),%rax
520 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
521 jmp ptregscall_common
527 PTREGSCALL stub_clone, sys_clone, %r8
528 PTREGSCALL stub_fork, sys_fork, %rdi
529 PTREGSCALL stub_vfork, sys_vfork, %rdi
530 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
531 PTREGSCALL stub_iopl, sys_iopl, %rsi
533 ENTRY(ptregscall_common)
535 CFI_ADJUST_CFA_OFFSET -8
536 CFI_REGISTER rip, r11
539 CFI_REGISTER rip, r15
540 FIXUP_TOP_OF_STACK %r11
542 RESTORE_TOP_OF_STACK %r11
544 CFI_REGISTER rip, r11
547 CFI_ADJUST_CFA_OFFSET 8
548 CFI_REL_OFFSET rip, 0
551 END(ptregscall_common)
556 CFI_ADJUST_CFA_OFFSET -8
557 CFI_REGISTER rip, r11
559 FIXUP_TOP_OF_STACK %r11
562 RESTORE_TOP_OF_STACK %r11
565 jmp int_ret_from_sys_call
570 * sigreturn is special because it needs to restore all registers on return.
571 * This cannot be done with SYSRET, so use the IRET return path instead.
573 ENTRY(stub_rt_sigreturn)
576 CFI_ADJUST_CFA_OFFSET -8
579 FIXUP_TOP_OF_STACK %r11
580 call sys_rt_sigreturn
581 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
583 jmp int_ret_from_sys_call
585 END(stub_rt_sigreturn)
588 * initial frame state for interrupts and exceptions
593 CFI_DEF_CFA rsp,SS+8-\ref
594 /*CFI_REL_OFFSET ss,SS-\ref*/
595 CFI_REL_OFFSET rsp,RSP-\ref
596 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
597 /*CFI_REL_OFFSET cs,CS-\ref*/
598 CFI_REL_OFFSET rip,RIP-\ref
601 /* initial frame state for interrupts (and exceptions without error code) */
602 #define INTR_FRAME _frame RIP
603 /* initial frame state for exceptions with error code (and interrupts with
604 vector already pushed) */
605 #define XCPT_FRAME _frame ORIG_RAX
608 * Interrupt entry/exit.
610 * Interrupt entry points save only callee clobbered registers in fast path.
612 * Entry runs with interrupts off.
615 /* 0(%rsp): interrupt number */
616 .macro interrupt func
619 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
621 CFI_ADJUST_CFA_OFFSET 8
622 CFI_REL_OFFSET rbp, 0
624 CFI_DEF_CFA_REGISTER rbp
628 /* irqcount is used to check if a CPU is already on an interrupt
629 stack or not. While this is essentially redundant with preempt_count
630 it is a little cheaper to use a separate counter in the PDA
631 (short of moving irq_enter into assembly, which would be too
633 1: incl %gs:pda_irqcount
634 cmoveq %gs:pda_irqstackptr,%rsp
635 push %rbp # backlink for old unwinder
637 * We entered an interrupt context - irqs are off:
643 ENTRY(common_interrupt)
646 /* 0(%rsp): oldrsp-ARGOFFSET */
648 DISABLE_INTERRUPTS(CLBR_NONE)
650 decl %gs:pda_irqcount
652 CFI_DEF_CFA_REGISTER rsp
653 CFI_ADJUST_CFA_OFFSET -8
655 GET_THREAD_INFO(%rcx)
656 testl $3,CS-ARGOFFSET(%rsp)
659 /* Interrupt came from user space */
661 * Has a correct top of stack, but a partial stack frame
662 * %rcx: thread info. Interrupts off.
664 retint_with_reschedule:
665 movl $_TIF_WORK_MASK,%edi
668 movl TI_flags(%rcx),%edx
673 retint_swapgs: /* return to user-space */
675 * The iretq could re-enable interrupts:
677 DISABLE_INTERRUPTS(CLBR_ANY)
682 retint_restore_args: /* return to kernel space */
683 DISABLE_INTERRUPTS(CLBR_ANY)
685 * The iretq could re-enable interrupts:
694 .section __ex_table, "a"
695 .quad irq_return, bad_iret
698 #ifdef CONFIG_PARAVIRT
702 .section __ex_table,"a"
703 .quad native_iret, bad_iret
710 * The iret traps when the %cs or %ss being restored is bogus.
711 * We've lost the original trap vector and error code.
712 * #GPF is the most likely one to get for an invalid selector.
713 * So pretend we completed the iret and took the #GPF in user mode.
715 * We are now running with the kernel GS after exception recovery.
716 * But error_entry expects us to have user GS to match the user %cs,
722 jmp general_protection
726 /* edi: workmask, edx: work */
729 bt $TIF_NEED_RESCHED,%edx
732 ENABLE_INTERRUPTS(CLBR_NONE)
734 CFI_ADJUST_CFA_OFFSET 8
737 CFI_ADJUST_CFA_OFFSET -8
738 GET_THREAD_INFO(%rcx)
739 DISABLE_INTERRUPTS(CLBR_NONE)
744 testl $_TIF_DO_NOTIFY_MASK,%edx
747 ENABLE_INTERRUPTS(CLBR_NONE)
749 movq $-1,ORIG_RAX(%rsp)
750 xorl %esi,%esi # oldset
751 movq %rsp,%rdi # &pt_regs
752 call do_notify_resume
754 DISABLE_INTERRUPTS(CLBR_NONE)
756 GET_THREAD_INFO(%rcx)
757 jmp retint_with_reschedule
759 #ifdef CONFIG_PREEMPT
760 /* Returning to kernel space. Check if we need preemption */
761 /* rcx: threadinfo. interrupts off. */
763 cmpl $0,TI_preempt_count(%rcx)
764 jnz retint_restore_args
765 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
766 jnc retint_restore_args
767 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
768 jnc retint_restore_args
769 call preempt_schedule_irq
774 END(common_interrupt)
779 .macro apicinterrupt num,func
782 CFI_ADJUST_CFA_OFFSET 8
788 ENTRY(thermal_interrupt)
789 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
790 END(thermal_interrupt)
792 ENTRY(threshold_interrupt)
793 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
794 END(threshold_interrupt)
797 ENTRY(reschedule_interrupt)
798 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
799 END(reschedule_interrupt)
801 .macro INVALIDATE_ENTRY num
802 ENTRY(invalidate_interrupt\num)
803 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
804 END(invalidate_interrupt\num)
816 ENTRY(call_function_interrupt)
817 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
818 END(call_function_interrupt)
819 ENTRY(irq_move_cleanup_interrupt)
820 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
821 END(irq_move_cleanup_interrupt)
824 ENTRY(apic_timer_interrupt)
825 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
826 END(apic_timer_interrupt)
828 ENTRY(uv_bau_message_intr1)
829 apicinterrupt 220,uv_bau_message_interrupt
830 END(uv_bau_message_intr1)
832 ENTRY(error_interrupt)
833 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
836 ENTRY(spurious_interrupt)
837 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
838 END(spurious_interrupt)
841 * Exception entry points.
845 PARAVIRT_ADJUST_EXCEPTION_FRAME
846 pushq $0 /* push error code/oldrax */
847 CFI_ADJUST_CFA_OFFSET 8
848 pushq %rax /* push real oldrax to the rdi slot */
849 CFI_ADJUST_CFA_OFFSET 8
856 .macro errorentry sym
858 PARAVIRT_ADJUST_EXCEPTION_FRAME
860 CFI_ADJUST_CFA_OFFSET 8
867 /* error code is on the stack already */
868 /* handle NMI like exceptions that can happen everywhere */
869 .macro paranoidentry sym, ist=0, irqtrace=1
873 movl $MSR_GS_BASE,%ecx
881 movq %gs:pda_data_offset, %rbp
884 movq ORIG_RAX(%rsp),%rsi
885 movq $-1,ORIG_RAX(%rsp)
887 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
891 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
893 DISABLE_INTERRUPTS(CLBR_NONE)
900 * "Paranoid" exit path from exception stack.
901 * Paranoid because this is used by NMIs and cannot take
902 * any kernel state for granted.
903 * We don't do kernel preemption checks here, because only
904 * NMI should be common and it does not enable IRQs and
905 * cannot get reschedule ticks.
907 * "trace" is 0 for the NMI handler only, because irq-tracing
908 * is fundamentally NMI-unsafe. (we cannot change the soft and
909 * hard flags at once, atomically)
911 .macro paranoidexit trace=1
912 /* ebx: no swapgs flag */
914 testl %ebx,%ebx /* swapgs needed? */
915 jnz paranoid_restore\trace
917 jnz paranoid_userspace\trace
918 paranoid_swapgs\trace:
923 paranoid_restore\trace:
926 paranoid_userspace\trace:
927 GET_THREAD_INFO(%rcx)
928 movl TI_flags(%rcx),%ebx
929 andl $_TIF_WORK_MASK,%ebx
930 jz paranoid_swapgs\trace
931 movq %rsp,%rdi /* &pt_regs */
933 movq %rax,%rsp /* switch stack for scheduling */
934 testl $_TIF_NEED_RESCHED,%ebx
935 jnz paranoid_schedule\trace
936 movl %ebx,%edx /* arg3: thread flags */
940 ENABLE_INTERRUPTS(CLBR_NONE)
941 xorl %esi,%esi /* arg2: oldset */
942 movq %rsp,%rdi /* arg1: &pt_regs */
943 call do_notify_resume
944 DISABLE_INTERRUPTS(CLBR_NONE)
948 jmp paranoid_userspace\trace
949 paranoid_schedule\trace:
953 ENABLE_INTERRUPTS(CLBR_ANY)
955 DISABLE_INTERRUPTS(CLBR_ANY)
959 jmp paranoid_userspace\trace
964 * Exception entry point. This expects an error code/orig_rax on the stack
965 * and the exception handler in %rax.
967 KPROBE_ENTRY(error_entry)
970 /* rdi slot contains rax, oldrax contains error code */
973 CFI_ADJUST_CFA_OFFSET (14*8)
975 CFI_REL_OFFSET rsi,RSI
976 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
979 CFI_REL_OFFSET rdx,RDX
981 CFI_REL_OFFSET rcx,RCX
982 movq %rsi,10*8(%rsp) /* store rax */
983 CFI_REL_OFFSET rax,RAX
989 CFI_REL_OFFSET r10,R10
991 CFI_REL_OFFSET r11,R11
993 CFI_REL_OFFSET rbx,RBX
995 CFI_REL_OFFSET rbp,RBP
997 CFI_REL_OFFSET r12,R12
999 CFI_REL_OFFSET r13,R13
1001 CFI_REL_OFFSET r14,R14
1003 CFI_REL_OFFSET r15,R15
1006 je error_kernelspace
1011 CFI_REL_OFFSET rdi,RDI
1013 movq ORIG_RAX(%rsp),%rsi /* get error code */
1014 movq $-1,ORIG_RAX(%rsp)
1016 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1020 DISABLE_INTERRUPTS(CLBR_NONE)
1022 GET_THREAD_INFO(%rcx)
1025 LOCKDEP_SYS_EXIT_IRQ
1026 movl TI_flags(%rcx),%edx
1027 movl $_TIF_WORK_MASK,%edi
1035 /* There are two places in the kernel that can potentially fault with
1036 usergs. Handle them here. The exception handlers after
1037 iret run with kernel gs again, so don't set the user space flag.
1038 B stepping K8s sometimes report an truncated RIP for IRET
1039 exceptions returning to compat mode. Check for these here too. */
1040 leaq irq_return(%rip),%rcx
1043 movl %ecx,%ecx /* zero extend */
1046 cmpq $gs_change,RIP(%rsp)
1049 KPROBE_END(error_entry)
1051 /* Reload gs selector with exception handling */
1052 /* edi: new selector */
1053 ENTRY(native_load_gs_index)
1056 CFI_ADJUST_CFA_OFFSET 8
1057 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1061 2: mfence /* workaround */
1064 CFI_ADJUST_CFA_OFFSET -8
1067 ENDPROC(native_load_gs_index)
1069 .section __ex_table,"a"
1071 .quad gs_change,bad_gs
1073 .section .fixup,"ax"
1074 /* running with kernelgs */
1076 SWAPGS /* switch back to user gs */
1083 * Create a kernel thread.
1085 * C extern interface:
1086 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1088 * asm input arguments:
1089 * rdi: fn, rsi: arg, rdx: flags
1091 ENTRY(kernel_thread)
1093 FAKE_STACK_FRAME $child_rip
1096 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1098 orq kernel_thread_flags(%rip),%rdi
1111 * It isn't worth to check for reschedule here,
1112 * so internally to the x86_64 port you can rely on kernel_thread()
1113 * not to reschedule the child before returning, this avoids the need
1114 * of hacks for example to fork off the per-CPU idle tasks.
1115 * [Hopefully no generic code relies on the reschedule -AK]
1121 ENDPROC(kernel_thread)
1124 pushq $0 # fake return address
1127 * Here we are in the child and the registers are set as they were
1128 * at kernel_thread() invocation in the parent.
1140 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1142 * C extern interface:
1143 * extern long execve(char *name, char **argv, char **envp)
1145 * asm input arguments:
1146 * rdi: name, rsi: argv, rdx: envp
1148 * We want to fallback into:
1149 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1151 * do_sys_execve asm fallback arguments:
1152 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1154 ENTRY(kernel_execve)
1160 movq %rax, RAX(%rsp)
1163 je int_ret_from_sys_call
1168 ENDPROC(kernel_execve)
1170 KPROBE_ENTRY(page_fault)
1171 errorentry do_page_fault
1172 KPROBE_END(page_fault)
1174 ENTRY(coprocessor_error)
1175 zeroentry do_coprocessor_error
1176 END(coprocessor_error)
1178 ENTRY(simd_coprocessor_error)
1179 zeroentry do_simd_coprocessor_error
1180 END(simd_coprocessor_error)
1182 ENTRY(device_not_available)
1183 zeroentry math_state_restore
1184 END(device_not_available)
1186 /* runs on exception stack */
1190 CFI_ADJUST_CFA_OFFSET 8
1191 paranoidentry do_debug, DEBUG_STACK
1195 /* runs on exception stack */
1199 CFI_ADJUST_CFA_OFFSET 8
1200 paranoidentry do_nmi, 0, 0
1201 #ifdef CONFIG_TRACE_IRQFLAGS
1212 CFI_ADJUST_CFA_OFFSET 8
1213 paranoidentry do_int3, DEBUG_STACK
1219 zeroentry do_overflow
1227 zeroentry do_invalid_op
1230 ENTRY(coprocessor_segment_overrun)
1231 zeroentry do_coprocessor_segment_overrun
1232 END(coprocessor_segment_overrun)
1234 /* runs on exception stack */
1237 paranoidentry do_double_fault
1243 errorentry do_invalid_TSS
1246 ENTRY(segment_not_present)
1247 errorentry do_segment_not_present
1248 END(segment_not_present)
1250 /* runs on exception stack */
1251 ENTRY(stack_segment)
1253 paranoidentry do_stack_segment
1258 KPROBE_ENTRY(general_protection)
1259 errorentry do_general_protection
1260 KPROBE_END(general_protection)
1262 ENTRY(alignment_check)
1263 errorentry do_alignment_check
1264 END(alignment_check)
1267 zeroentry do_divide_error
1270 ENTRY(spurious_interrupt_bug)
1271 zeroentry do_spurious_interrupt_bug
1272 END(spurious_interrupt_bug)
1274 #ifdef CONFIG_X86_MCE
1275 /* runs on exception stack */
1276 ENTRY(machine_check)
1279 CFI_ADJUST_CFA_OFFSET 8
1280 paranoidentry do_machine_check
1286 /* Call softirq on interrupt stack. Interrupts are off. */
1290 CFI_ADJUST_CFA_OFFSET 8
1291 CFI_REL_OFFSET rbp,0
1293 CFI_DEF_CFA_REGISTER rbp
1294 incl %gs:pda_irqcount
1295 cmove %gs:pda_irqstackptr,%rsp
1296 push %rbp # backlink for old unwinder
1299 CFI_DEF_CFA_REGISTER rsp
1300 CFI_ADJUST_CFA_OFFSET -8
1301 decl %gs:pda_irqcount
1304 ENDPROC(call_softirq)
1306 KPROBE_ENTRY(ignore_sysret)
1311 ENDPROC(ignore_sysret)