2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
59 #ifdef CONFIG_DYNAMIC_FTRACE
72 subq $MCOUNT_INSN_SIZE, %rdi
92 /* taken from glibc */
102 movq 0x38(%rsp), %rdi
104 subq $MCOUNT_INSN_SIZE, %rdi
124 #else /* ! CONFIG_DYNAMIC_FTRACE */
126 cmpq $ftrace_stub, ftrace_trace_function
133 /* taken from glibc */
143 movq 0x38(%rsp), %rdi
145 subq $MCOUNT_INSN_SIZE, %rdi
147 call *ftrace_trace_function
160 #endif /* CONFIG_DYNAMIC_FTRACE */
161 #endif /* CONFIG_FTRACE */
163 #ifndef CONFIG_PREEMPT
164 #define retint_kernel retint_restore_args
167 #ifdef CONFIG_PARAVIRT
168 ENTRY(native_usergs_sysret64)
171 #endif /* CONFIG_PARAVIRT */
174 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
175 #ifdef CONFIG_TRACE_IRQFLAGS
176 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
184 * C code is not supposed to know about undefined top of stack. Every time
185 * a C function with an pt_regs argument is called from the SYSCALL based
186 * fast path FIXUP_TOP_OF_STACK is needed.
187 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
191 /* %rsp:at FRAMEEND */
192 .macro FIXUP_TOP_OF_STACK tmp
193 movq %gs:pda_oldrsp,\tmp
195 movq $__USER_DS,SS(%rsp)
196 movq $__USER_CS,CS(%rsp)
198 movq R11(%rsp),\tmp /* get eflags */
199 movq \tmp,EFLAGS(%rsp)
202 .macro RESTORE_TOP_OF_STACK tmp,offset=0
203 movq RSP-\offset(%rsp),\tmp
204 movq \tmp,%gs:pda_oldrsp
205 movq EFLAGS-\offset(%rsp),\tmp
206 movq \tmp,R11-\offset(%rsp)
209 .macro FAKE_STACK_FRAME child_rip
210 /* push in order ss, rsp, eflags, cs, rip */
212 pushq $__KERNEL_DS /* ss */
213 CFI_ADJUST_CFA_OFFSET 8
214 /*CFI_REL_OFFSET ss,0*/
216 CFI_ADJUST_CFA_OFFSET 8
218 pushq $(1<<9) /* eflags - interrupts on */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET rflags,0*/
221 pushq $__KERNEL_CS /* cs */
222 CFI_ADJUST_CFA_OFFSET 8
223 /*CFI_REL_OFFSET cs,0*/
224 pushq \child_rip /* rip */
225 CFI_ADJUST_CFA_OFFSET 8
227 pushq %rax /* orig rax */
228 CFI_ADJUST_CFA_OFFSET 8
231 .macro UNFAKE_STACK_FRAME
233 CFI_ADJUST_CFA_OFFSET -(6*8)
236 .macro CFI_DEFAULT_STACK start=1
242 CFI_DEF_CFA_OFFSET SS+8
244 CFI_REL_OFFSET r15,R15
245 CFI_REL_OFFSET r14,R14
246 CFI_REL_OFFSET r13,R13
247 CFI_REL_OFFSET r12,R12
248 CFI_REL_OFFSET rbp,RBP
249 CFI_REL_OFFSET rbx,RBX
250 CFI_REL_OFFSET r11,R11
251 CFI_REL_OFFSET r10,R10
254 CFI_REL_OFFSET rax,RAX
255 CFI_REL_OFFSET rcx,RCX
256 CFI_REL_OFFSET rdx,RDX
257 CFI_REL_OFFSET rsi,RSI
258 CFI_REL_OFFSET rdi,RDI
259 CFI_REL_OFFSET rip,RIP
260 /*CFI_REL_OFFSET cs,CS*/
261 /*CFI_REL_OFFSET rflags,EFLAGS*/
262 CFI_REL_OFFSET rsp,RSP
263 /*CFI_REL_OFFSET ss,SS*/
266 * A newly forked process directly context switches into this.
271 push kernel_eflags(%rip)
272 CFI_ADJUST_CFA_OFFSET 4
273 popf # reset kernel eflags
274 CFI_ADJUST_CFA_OFFSET -4
276 GET_THREAD_INFO(%rcx)
277 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
281 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
282 je int_ret_from_sys_call
283 testl $_TIF_IA32,TI_flags(%rcx)
284 jnz int_ret_from_sys_call
285 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
286 jmp ret_from_sys_call
289 call syscall_trace_leave
290 GET_THREAD_INFO(%rcx)
296 * System call entry. Upto 6 arguments in registers are supported.
298 * SYSCALL does not save anything on the stack and does not change the
304 * rax system call number
306 * rcx return address for syscall/sysret, C arg3
309 * r10 arg3 (--> moved to rcx for C)
312 * r11 eflags for syscall/sysret, temporary for C
313 * r12-r15,rbp,rbx saved by C code, not touched.
315 * Interrupts are off on entry.
316 * Only called from user space.
318 * XXX if we had a free scratch register we could save the RSP into the stack frame
319 * and report it properly in ps. Unfortunately we haven't.
321 * When user can change the frames always force IRET. That is because
322 * it deals with uncanonical addresses better. SYSRET has trouble
323 * with them due to bugs in both AMD and Intel CPUs.
329 CFI_DEF_CFA rsp,PDA_STACKOFFSET
331 /*CFI_REGISTER rflags,r11*/
334 * A hypervisor implementation might want to use a label
335 * after the swapgs, so that it can do the swapgs
336 * for the guest and jump here on syscall.
338 ENTRY(system_call_after_swapgs)
340 movq %rsp,%gs:pda_oldrsp
341 movq %gs:pda_kernelstack,%rsp
343 * No need to follow this irqs off/on section - it's straight
346 ENABLE_INTERRUPTS(CLBR_NONE)
348 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
349 movq %rcx,RIP-ARGOFFSET(%rsp)
350 CFI_REL_OFFSET rip,RIP-ARGOFFSET
351 GET_THREAD_INFO(%rcx)
352 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
354 cmpq $__NR_syscall_max,%rax
357 call *sys_call_table(,%rax,8) # XXX: rip relative
358 movq %rax,RAX-ARGOFFSET(%rsp)
360 * Syscall return path ending with SYSRET (fast path)
361 * Has incomplete stack frame and undefined top of stack.
364 movl $_TIF_ALLWORK_MASK,%edi
368 GET_THREAD_INFO(%rcx)
369 DISABLE_INTERRUPTS(CLBR_NONE)
371 movl TI_flags(%rcx),%edx
376 * sysretq will re-enable interrupts:
379 movq RIP-ARGOFFSET(%rsp),%rcx
381 RESTORE_ARGS 0,-ARG_SKIP,1
382 /*CFI_REGISTER rflags,r11*/
383 movq %gs:pda_oldrsp, %rsp
387 /* Handle reschedules */
388 /* edx: work, edi: workmask */
390 bt $TIF_NEED_RESCHED,%edx
393 ENABLE_INTERRUPTS(CLBR_NONE)
395 CFI_ADJUST_CFA_OFFSET 8
398 CFI_ADJUST_CFA_OFFSET -8
401 /* Handle a signal */
404 ENABLE_INTERRUPTS(CLBR_NONE)
405 testl $_TIF_DO_NOTIFY_MASK,%edx
408 /* Really a signal */
409 /* edx: work flags (arg3) */
410 leaq do_notify_resume(%rip),%rax
411 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
412 xorl %esi,%esi # oldset -> arg2
413 call ptregscall_common
414 1: movl $_TIF_WORK_MASK,%edi
415 /* Use IRET because user could have changed frame. This
416 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
417 DISABLE_INTERRUPTS(CLBR_NONE)
422 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
423 jmp ret_from_sys_call
425 /* Do syscall tracing */
428 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
429 FIXUP_TOP_OF_STACK %rdi
431 call syscall_trace_enter
433 * Reload arg registers from stack in case ptrace changed them.
434 * We don't reload %rax because syscall_trace_enter() returned
435 * the value it wants us to use in the table lookup.
437 LOAD_ARGS ARGOFFSET, 1
439 cmpq $__NR_syscall_max,%rax
440 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
441 movq %r10,%rcx /* fixup for C */
442 call *sys_call_table(,%rax,8)
443 movq %rax,RAX-ARGOFFSET(%rsp)
444 /* Use IRET because user could have changed frame */
447 * Syscall return path ending with IRET.
448 * Has correct top of stack, but partial stack frame.
450 .globl int_ret_from_sys_call
451 int_ret_from_sys_call:
452 DISABLE_INTERRUPTS(CLBR_NONE)
454 testl $3,CS-ARGOFFSET(%rsp)
455 je retint_restore_args
456 movl $_TIF_ALLWORK_MASK,%edi
457 /* edi: mask to check */
460 GET_THREAD_INFO(%rcx)
461 movl TI_flags(%rcx),%edx
464 andl $~TS_COMPAT,TI_status(%rcx)
467 /* Either reschedule or signal or syscall exit tracking needed. */
468 /* First do a reschedule test. */
469 /* edx: work, edi: workmask */
471 bt $TIF_NEED_RESCHED,%edx
474 ENABLE_INTERRUPTS(CLBR_NONE)
476 CFI_ADJUST_CFA_OFFSET 8
479 CFI_ADJUST_CFA_OFFSET -8
480 DISABLE_INTERRUPTS(CLBR_NONE)
484 /* handle signals and tracing -- both require a full stack frame */
487 ENABLE_INTERRUPTS(CLBR_NONE)
489 /* Check for syscall exit trace */
490 testl $_TIF_WORK_SYSCALL_EXIT,%edx
493 CFI_ADJUST_CFA_OFFSET 8
494 leaq 8(%rsp),%rdi # &ptregs -> arg1
495 call syscall_trace_leave
497 CFI_ADJUST_CFA_OFFSET -8
498 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
502 testl $_TIF_DO_NOTIFY_MASK,%edx
504 movq %rsp,%rdi # &ptregs -> arg1
505 xorl %esi,%esi # oldset -> arg2
506 call do_notify_resume
507 1: movl $_TIF_WORK_MASK,%edi
510 DISABLE_INTERRUPTS(CLBR_NONE)
517 * Certain special system calls that need to save a complete full stack frame.
520 .macro PTREGSCALL label,func,arg
523 leaq \func(%rip),%rax
524 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
525 jmp ptregscall_common
531 PTREGSCALL stub_clone, sys_clone, %r8
532 PTREGSCALL stub_fork, sys_fork, %rdi
533 PTREGSCALL stub_vfork, sys_vfork, %rdi
534 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
535 PTREGSCALL stub_iopl, sys_iopl, %rsi
537 ENTRY(ptregscall_common)
539 CFI_ADJUST_CFA_OFFSET -8
540 CFI_REGISTER rip, r11
543 CFI_REGISTER rip, r15
544 FIXUP_TOP_OF_STACK %r11
546 RESTORE_TOP_OF_STACK %r11
548 CFI_REGISTER rip, r11
551 CFI_ADJUST_CFA_OFFSET 8
552 CFI_REL_OFFSET rip, 0
555 END(ptregscall_common)
560 CFI_ADJUST_CFA_OFFSET -8
561 CFI_REGISTER rip, r11
563 FIXUP_TOP_OF_STACK %r11
566 RESTORE_TOP_OF_STACK %r11
569 jmp int_ret_from_sys_call
574 * sigreturn is special because it needs to restore all registers on return.
575 * This cannot be done with SYSRET, so use the IRET return path instead.
577 ENTRY(stub_rt_sigreturn)
580 CFI_ADJUST_CFA_OFFSET -8
583 FIXUP_TOP_OF_STACK %r11
584 call sys_rt_sigreturn
585 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
587 jmp int_ret_from_sys_call
589 END(stub_rt_sigreturn)
592 * initial frame state for interrupts and exceptions
597 CFI_DEF_CFA rsp,SS+8-\ref
598 /*CFI_REL_OFFSET ss,SS-\ref*/
599 CFI_REL_OFFSET rsp,RSP-\ref
600 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
601 /*CFI_REL_OFFSET cs,CS-\ref*/
602 CFI_REL_OFFSET rip,RIP-\ref
605 /* initial frame state for interrupts (and exceptions without error code) */
606 #define INTR_FRAME _frame RIP
607 /* initial frame state for exceptions with error code (and interrupts with
608 vector already pushed) */
609 #define XCPT_FRAME _frame ORIG_RAX
612 * Interrupt entry/exit.
614 * Interrupt entry points save only callee clobbered registers in fast path.
616 * Entry runs with interrupts off.
619 /* 0(%rsp): interrupt number */
620 .macro interrupt func
623 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
625 CFI_ADJUST_CFA_OFFSET 8
626 CFI_REL_OFFSET rbp, 0
628 CFI_DEF_CFA_REGISTER rbp
632 /* irqcount is used to check if a CPU is already on an interrupt
633 stack or not. While this is essentially redundant with preempt_count
634 it is a little cheaper to use a separate counter in the PDA
635 (short of moving irq_enter into assembly, which would be too
637 1: incl %gs:pda_irqcount
638 cmoveq %gs:pda_irqstackptr,%rsp
639 push %rbp # backlink for old unwinder
641 * We entered an interrupt context - irqs are off:
647 ENTRY(common_interrupt)
650 /* 0(%rsp): oldrsp-ARGOFFSET */
652 DISABLE_INTERRUPTS(CLBR_NONE)
654 decl %gs:pda_irqcount
656 CFI_DEF_CFA_REGISTER rsp
657 CFI_ADJUST_CFA_OFFSET -8
659 GET_THREAD_INFO(%rcx)
660 testl $3,CS-ARGOFFSET(%rsp)
663 /* Interrupt came from user space */
665 * Has a correct top of stack, but a partial stack frame
666 * %rcx: thread info. Interrupts off.
668 retint_with_reschedule:
669 movl $_TIF_WORK_MASK,%edi
672 movl TI_flags(%rcx),%edx
677 retint_swapgs: /* return to user-space */
679 * The iretq could re-enable interrupts:
681 DISABLE_INTERRUPTS(CLBR_ANY)
686 retint_restore_args: /* return to kernel space */
687 DISABLE_INTERRUPTS(CLBR_ANY)
689 * The iretq could re-enable interrupts:
698 .section __ex_table, "a"
699 .quad irq_return, bad_iret
702 #ifdef CONFIG_PARAVIRT
706 .section __ex_table,"a"
707 .quad native_iret, bad_iret
714 * The iret traps when the %cs or %ss being restored is bogus.
715 * We've lost the original trap vector and error code.
716 * #GPF is the most likely one to get for an invalid selector.
717 * So pretend we completed the iret and took the #GPF in user mode.
719 * We are now running with the kernel GS after exception recovery.
720 * But error_entry expects us to have user GS to match the user %cs,
726 jmp general_protection
730 /* edi: workmask, edx: work */
733 bt $TIF_NEED_RESCHED,%edx
736 ENABLE_INTERRUPTS(CLBR_NONE)
738 CFI_ADJUST_CFA_OFFSET 8
741 CFI_ADJUST_CFA_OFFSET -8
742 GET_THREAD_INFO(%rcx)
743 DISABLE_INTERRUPTS(CLBR_NONE)
748 testl $_TIF_DO_NOTIFY_MASK,%edx
751 ENABLE_INTERRUPTS(CLBR_NONE)
753 movq $-1,ORIG_RAX(%rsp)
754 xorl %esi,%esi # oldset
755 movq %rsp,%rdi # &pt_regs
756 call do_notify_resume
758 DISABLE_INTERRUPTS(CLBR_NONE)
760 GET_THREAD_INFO(%rcx)
761 jmp retint_with_reschedule
763 #ifdef CONFIG_PREEMPT
764 /* Returning to kernel space. Check if we need preemption */
765 /* rcx: threadinfo. interrupts off. */
767 cmpl $0,TI_preempt_count(%rcx)
768 jnz retint_restore_args
769 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
770 jnc retint_restore_args
771 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
772 jnc retint_restore_args
773 call preempt_schedule_irq
778 END(common_interrupt)
783 .macro apicinterrupt num,func
786 CFI_ADJUST_CFA_OFFSET 8
792 ENTRY(thermal_interrupt)
793 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
794 END(thermal_interrupt)
796 ENTRY(threshold_interrupt)
797 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
798 END(threshold_interrupt)
801 ENTRY(reschedule_interrupt)
802 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
803 END(reschedule_interrupt)
805 .macro INVALIDATE_ENTRY num
806 ENTRY(invalidate_interrupt\num)
807 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
808 END(invalidate_interrupt\num)
820 ENTRY(call_function_interrupt)
821 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
822 END(call_function_interrupt)
823 ENTRY(call_function_single_interrupt)
824 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
825 END(call_function_single_interrupt)
826 ENTRY(irq_move_cleanup_interrupt)
827 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
828 END(irq_move_cleanup_interrupt)
831 ENTRY(apic_timer_interrupt)
832 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
833 END(apic_timer_interrupt)
835 ENTRY(uv_bau_message_intr1)
836 apicinterrupt 220,uv_bau_message_interrupt
837 END(uv_bau_message_intr1)
839 ENTRY(error_interrupt)
840 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
843 ENTRY(spurious_interrupt)
844 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
845 END(spurious_interrupt)
848 * Exception entry points.
852 PARAVIRT_ADJUST_EXCEPTION_FRAME
853 pushq $0 /* push error code/oldrax */
854 CFI_ADJUST_CFA_OFFSET 8
855 pushq %rax /* push real oldrax to the rdi slot */
856 CFI_ADJUST_CFA_OFFSET 8
863 .macro errorentry sym
865 PARAVIRT_ADJUST_EXCEPTION_FRAME
867 CFI_ADJUST_CFA_OFFSET 8
874 /* error code is on the stack already */
875 /* handle NMI like exceptions that can happen everywhere */
876 .macro paranoidentry sym, ist=0, irqtrace=1
880 movl $MSR_GS_BASE,%ecx
888 movq %gs:pda_data_offset, %rbp
891 movq ORIG_RAX(%rsp),%rsi
892 movq $-1,ORIG_RAX(%rsp)
894 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
898 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
900 DISABLE_INTERRUPTS(CLBR_NONE)
907 * "Paranoid" exit path from exception stack.
908 * Paranoid because this is used by NMIs and cannot take
909 * any kernel state for granted.
910 * We don't do kernel preemption checks here, because only
911 * NMI should be common and it does not enable IRQs and
912 * cannot get reschedule ticks.
914 * "trace" is 0 for the NMI handler only, because irq-tracing
915 * is fundamentally NMI-unsafe. (we cannot change the soft and
916 * hard flags at once, atomically)
918 .macro paranoidexit trace=1
919 /* ebx: no swapgs flag */
921 testl %ebx,%ebx /* swapgs needed? */
922 jnz paranoid_restore\trace
924 jnz paranoid_userspace\trace
925 paranoid_swapgs\trace:
930 paranoid_restore\trace:
933 paranoid_userspace\trace:
934 GET_THREAD_INFO(%rcx)
935 movl TI_flags(%rcx),%ebx
936 andl $_TIF_WORK_MASK,%ebx
937 jz paranoid_swapgs\trace
938 movq %rsp,%rdi /* &pt_regs */
940 movq %rax,%rsp /* switch stack for scheduling */
941 testl $_TIF_NEED_RESCHED,%ebx
942 jnz paranoid_schedule\trace
943 movl %ebx,%edx /* arg3: thread flags */
947 ENABLE_INTERRUPTS(CLBR_NONE)
948 xorl %esi,%esi /* arg2: oldset */
949 movq %rsp,%rdi /* arg1: &pt_regs */
950 call do_notify_resume
951 DISABLE_INTERRUPTS(CLBR_NONE)
955 jmp paranoid_userspace\trace
956 paranoid_schedule\trace:
960 ENABLE_INTERRUPTS(CLBR_ANY)
962 DISABLE_INTERRUPTS(CLBR_ANY)
966 jmp paranoid_userspace\trace
971 * Exception entry point. This expects an error code/orig_rax on the stack
972 * and the exception handler in %rax.
974 KPROBE_ENTRY(error_entry)
977 /* rdi slot contains rax, oldrax contains error code */
980 CFI_ADJUST_CFA_OFFSET (14*8)
982 CFI_REL_OFFSET rsi,RSI
983 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
986 CFI_REL_OFFSET rdx,RDX
988 CFI_REL_OFFSET rcx,RCX
989 movq %rsi,10*8(%rsp) /* store rax */
990 CFI_REL_OFFSET rax,RAX
996 CFI_REL_OFFSET r10,R10
998 CFI_REL_OFFSET r11,R11
1000 CFI_REL_OFFSET rbx,RBX
1002 CFI_REL_OFFSET rbp,RBP
1004 CFI_REL_OFFSET r12,R12
1006 CFI_REL_OFFSET r13,R13
1008 CFI_REL_OFFSET r14,R14
1010 CFI_REL_OFFSET r15,R15
1013 je error_kernelspace
1018 CFI_REL_OFFSET rdi,RDI
1020 movq ORIG_RAX(%rsp),%rsi /* get error code */
1021 movq $-1,ORIG_RAX(%rsp)
1023 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1027 DISABLE_INTERRUPTS(CLBR_NONE)
1029 GET_THREAD_INFO(%rcx)
1032 LOCKDEP_SYS_EXIT_IRQ
1033 movl TI_flags(%rcx),%edx
1034 movl $_TIF_WORK_MASK,%edi
1042 /* There are two places in the kernel that can potentially fault with
1043 usergs. Handle them here. The exception handlers after
1044 iret run with kernel gs again, so don't set the user space flag.
1045 B stepping K8s sometimes report an truncated RIP for IRET
1046 exceptions returning to compat mode. Check for these here too. */
1047 leaq irq_return(%rip),%rcx
1050 movl %ecx,%ecx /* zero extend */
1053 cmpq $gs_change,RIP(%rsp)
1056 KPROBE_END(error_entry)
1058 /* Reload gs selector with exception handling */
1059 /* edi: new selector */
1060 ENTRY(native_load_gs_index)
1063 CFI_ADJUST_CFA_OFFSET 8
1064 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1068 2: mfence /* workaround */
1071 CFI_ADJUST_CFA_OFFSET -8
1074 ENDPROC(native_load_gs_index)
1076 .section __ex_table,"a"
1078 .quad gs_change,bad_gs
1080 .section .fixup,"ax"
1081 /* running with kernelgs */
1083 SWAPGS /* switch back to user gs */
1090 * Create a kernel thread.
1092 * C extern interface:
1093 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1095 * asm input arguments:
1096 * rdi: fn, rsi: arg, rdx: flags
1098 ENTRY(kernel_thread)
1100 FAKE_STACK_FRAME $child_rip
1103 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1105 orq kernel_thread_flags(%rip),%rdi
1118 * It isn't worth to check for reschedule here,
1119 * so internally to the x86_64 port you can rely on kernel_thread()
1120 * not to reschedule the child before returning, this avoids the need
1121 * of hacks for example to fork off the per-CPU idle tasks.
1122 * [Hopefully no generic code relies on the reschedule -AK]
1128 ENDPROC(kernel_thread)
1131 pushq $0 # fake return address
1134 * Here we are in the child and the registers are set as they were
1135 * at kernel_thread() invocation in the parent.
1147 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1149 * C extern interface:
1150 * extern long execve(char *name, char **argv, char **envp)
1152 * asm input arguments:
1153 * rdi: name, rsi: argv, rdx: envp
1155 * We want to fallback into:
1156 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1158 * do_sys_execve asm fallback arguments:
1159 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1161 ENTRY(kernel_execve)
1167 movq %rax, RAX(%rsp)
1170 je int_ret_from_sys_call
1175 ENDPROC(kernel_execve)
1177 KPROBE_ENTRY(page_fault)
1178 errorentry do_page_fault
1179 KPROBE_END(page_fault)
1181 ENTRY(coprocessor_error)
1182 zeroentry do_coprocessor_error
1183 END(coprocessor_error)
1185 ENTRY(simd_coprocessor_error)
1186 zeroentry do_simd_coprocessor_error
1187 END(simd_coprocessor_error)
1189 ENTRY(device_not_available)
1190 zeroentry math_state_restore
1191 END(device_not_available)
1193 /* runs on exception stack */
1196 PARAVIRT_ADJUST_EXCEPTION_FRAME
1198 CFI_ADJUST_CFA_OFFSET 8
1199 paranoidentry do_debug, DEBUG_STACK
1203 /* runs on exception stack */
1206 PARAVIRT_ADJUST_EXCEPTION_FRAME
1208 CFI_ADJUST_CFA_OFFSET 8
1209 paranoidentry do_nmi, 0, 0
1210 #ifdef CONFIG_TRACE_IRQFLAGS
1220 PARAVIRT_ADJUST_EXCEPTION_FRAME
1222 CFI_ADJUST_CFA_OFFSET 8
1223 paranoidentry do_int3, DEBUG_STACK
1229 zeroentry do_overflow
1237 zeroentry do_invalid_op
1240 ENTRY(coprocessor_segment_overrun)
1241 zeroentry do_coprocessor_segment_overrun
1242 END(coprocessor_segment_overrun)
1244 /* runs on exception stack */
1247 PARAVIRT_ADJUST_EXCEPTION_FRAME
1248 paranoidentry do_double_fault
1254 errorentry do_invalid_TSS
1257 ENTRY(segment_not_present)
1258 errorentry do_segment_not_present
1259 END(segment_not_present)
1261 /* runs on exception stack */
1262 ENTRY(stack_segment)
1264 PARAVIRT_ADJUST_EXCEPTION_FRAME
1265 paranoidentry do_stack_segment
1270 KPROBE_ENTRY(general_protection)
1271 errorentry do_general_protection
1272 KPROBE_END(general_protection)
1274 ENTRY(alignment_check)
1275 errorentry do_alignment_check
1276 END(alignment_check)
1279 zeroentry do_divide_error
1282 ENTRY(spurious_interrupt_bug)
1283 zeroentry do_spurious_interrupt_bug
1284 END(spurious_interrupt_bug)
1286 #ifdef CONFIG_X86_MCE
1287 /* runs on exception stack */
1288 ENTRY(machine_check)
1290 PARAVIRT_ADJUST_EXCEPTION_FRAME
1292 CFI_ADJUST_CFA_OFFSET 8
1293 paranoidentry do_machine_check
1299 /* Call softirq on interrupt stack. Interrupts are off. */
1303 CFI_ADJUST_CFA_OFFSET 8
1304 CFI_REL_OFFSET rbp,0
1306 CFI_DEF_CFA_REGISTER rbp
1307 incl %gs:pda_irqcount
1308 cmove %gs:pda_irqstackptr,%rsp
1309 push %rbp # backlink for old unwinder
1312 CFI_DEF_CFA_REGISTER rsp
1313 CFI_ADJUST_CFA_OFFSET -8
1314 decl %gs:pda_irqcount
1317 ENDPROC(call_softirq)
1319 KPROBE_ENTRY(ignore_sysret)
1324 ENDPROC(ignore_sysret)
1327 ENTRY(xen_hypervisor_callback)
1328 zeroentry xen_do_hypervisor_callback
1329 END(xen_hypervisor_callback)
1332 # A note on the "critical region" in our callback handler.
1333 # We want to avoid stacking callback handlers due to events occurring
1334 # during handling of the last event. To do this, we keep events disabled
1335 # until we've done all processing. HOWEVER, we must enable events before
1336 # popping the stack frame (can't be done atomically) and so it would still
1337 # be possible to get enough handler activations to overflow the stack.
1338 # Although unlikely, bugs of that kind are hard to track down, so we'd
1339 # like to avoid the possibility.
1340 # So, on entry to the handler we detect whether we interrupted an
1341 # existing activation in its critical region -- if so, we pop the current
1342 # activation and restart the handler using the previous one.
1344 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1346 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1347 see the correct pointer to the pt_regs */
1348 movq %rdi, %rsp # we don't return, adjust the stack frame
1351 11: incl %gs:pda_irqcount
1353 CFI_DEF_CFA_REGISTER rbp
1354 cmovzq %gs:pda_irqstackptr,%rsp
1355 pushq %rbp # backlink for old unwinder
1356 call xen_evtchn_do_upcall
1358 CFI_DEF_CFA_REGISTER rsp
1359 decl %gs:pda_irqcount
1362 END(do_hypervisor_callback)
1365 # Hypervisor uses this for application faults while it executes.
1366 # We get here for two reasons:
1367 # 1. Fault while reloading DS, ES, FS or GS
1368 # 2. Fault while executing IRET
1369 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1370 # registers that could be reloaded and zeroed the others.
1371 # Category 2 we fix up by killing the current process. We cannot use the
1372 # normal Linux return path in this case because if we use the IRET hypercall
1373 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1374 # We distinguish between categories by comparing each saved segment register
1375 # with its current contents: any discrepancy means we in category 1.
1377 ENTRY(xen_failsafe_callback)
1378 framesz = (RIP-0x30) /* workaround buggy gas */
1380 CFI_REL_OFFSET rcx, 0
1381 CFI_REL_OFFSET r11, 8
1395 /* All segments match their saved values => Category 2 (Bad IRET). */
1401 CFI_ADJUST_CFA_OFFSET -0x30
1403 CFI_ADJUST_CFA_OFFSET 8
1405 CFI_ADJUST_CFA_OFFSET 8
1407 CFI_ADJUST_CFA_OFFSET 8
1408 jmp general_protection
1410 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1416 CFI_ADJUST_CFA_OFFSET -0x30
1418 CFI_ADJUST_CFA_OFFSET 8
1422 END(xen_failsafe_callback)
1424 #endif /* CONFIG_XEN */