2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
65 #ifdef CONFIG_DYNAMIC_FTRACE
78 subq $MCOUNT_INSN_SIZE, %rdi
98 /* taken from glibc */
108 movq 0x38(%rsp), %rdi
110 subq $MCOUNT_INSN_SIZE, %rdi
130 #else /* ! CONFIG_DYNAMIC_FTRACE */
132 cmpq $ftrace_stub, ftrace_trace_function
139 /* taken from glibc */
149 movq 0x38(%rsp), %rdi
151 subq $MCOUNT_INSN_SIZE, %rdi
153 call *ftrace_trace_function
166 #endif /* CONFIG_DYNAMIC_FTRACE */
167 #endif /* CONFIG_FTRACE */
169 #ifndef CONFIG_PREEMPT
170 #define retint_kernel retint_restore_args
173 #ifdef CONFIG_PARAVIRT
174 ENTRY(native_usergs_sysret64)
177 #endif /* CONFIG_PARAVIRT */
180 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
181 #ifdef CONFIG_TRACE_IRQFLAGS
182 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
190 * C code is not supposed to know about undefined top of stack. Every time
191 * a C function with an pt_regs argument is called from the SYSCALL based
192 * fast path FIXUP_TOP_OF_STACK is needed.
193 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
197 /* %rsp:at FRAMEEND */
198 .macro FIXUP_TOP_OF_STACK tmp
199 movq %gs:pda_oldrsp,\tmp
201 movq $__USER_DS,SS(%rsp)
202 movq $__USER_CS,CS(%rsp)
204 movq R11(%rsp),\tmp /* get eflags */
205 movq \tmp,EFLAGS(%rsp)
208 .macro RESTORE_TOP_OF_STACK tmp,offset=0
209 movq RSP-\offset(%rsp),\tmp
210 movq \tmp,%gs:pda_oldrsp
211 movq EFLAGS-\offset(%rsp),\tmp
212 movq \tmp,R11-\offset(%rsp)
215 .macro FAKE_STACK_FRAME child_rip
216 /* push in order ss, rsp, eflags, cs, rip */
218 pushq $__KERNEL_DS /* ss */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET ss,0*/
222 CFI_ADJUST_CFA_OFFSET 8
224 pushq $(1<<9) /* eflags - interrupts on */
225 CFI_ADJUST_CFA_OFFSET 8
226 /*CFI_REL_OFFSET rflags,0*/
227 pushq $__KERNEL_CS /* cs */
228 CFI_ADJUST_CFA_OFFSET 8
229 /*CFI_REL_OFFSET cs,0*/
230 pushq \child_rip /* rip */
231 CFI_ADJUST_CFA_OFFSET 8
233 pushq %rax /* orig rax */
234 CFI_ADJUST_CFA_OFFSET 8
237 .macro UNFAKE_STACK_FRAME
239 CFI_ADJUST_CFA_OFFSET -(6*8)
242 .macro CFI_DEFAULT_STACK start=1
248 CFI_DEF_CFA_OFFSET SS+8
250 CFI_REL_OFFSET r15,R15
251 CFI_REL_OFFSET r14,R14
252 CFI_REL_OFFSET r13,R13
253 CFI_REL_OFFSET r12,R12
254 CFI_REL_OFFSET rbp,RBP
255 CFI_REL_OFFSET rbx,RBX
256 CFI_REL_OFFSET r11,R11
257 CFI_REL_OFFSET r10,R10
260 CFI_REL_OFFSET rax,RAX
261 CFI_REL_OFFSET rcx,RCX
262 CFI_REL_OFFSET rdx,RDX
263 CFI_REL_OFFSET rsi,RSI
264 CFI_REL_OFFSET rdi,RDI
265 CFI_REL_OFFSET rip,RIP
266 /*CFI_REL_OFFSET cs,CS*/
267 /*CFI_REL_OFFSET rflags,EFLAGS*/
268 CFI_REL_OFFSET rsp,RSP
269 /*CFI_REL_OFFSET ss,SS*/
272 * A newly forked process directly context switches into this.
277 push kernel_eflags(%rip)
278 CFI_ADJUST_CFA_OFFSET 8
279 popf # reset kernel eflags
280 CFI_ADJUST_CFA_OFFSET -8
282 GET_THREAD_INFO(%rcx)
283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
287 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
288 je int_ret_from_sys_call
289 testl $_TIF_IA32,TI_flags(%rcx)
290 jnz int_ret_from_sys_call
291 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
292 jmp ret_from_sys_call
295 call syscall_trace_leave
296 GET_THREAD_INFO(%rcx)
302 * System call entry. Upto 6 arguments in registers are supported.
304 * SYSCALL does not save anything on the stack and does not change the
310 * rax system call number
312 * rcx return address for syscall/sysret, C arg3
315 * r10 arg3 (--> moved to rcx for C)
318 * r11 eflags for syscall/sysret, temporary for C
319 * r12-r15,rbp,rbx saved by C code, not touched.
321 * Interrupts are off on entry.
322 * Only called from user space.
324 * XXX if we had a free scratch register we could save the RSP into the stack frame
325 * and report it properly in ps. Unfortunately we haven't.
327 * When user can change the frames always force IRET. That is because
328 * it deals with uncanonical addresses better. SYSRET has trouble
329 * with them due to bugs in both AMD and Intel CPUs.
335 CFI_DEF_CFA rsp,PDA_STACKOFFSET
337 /*CFI_REGISTER rflags,r11*/
340 * A hypervisor implementation might want to use a label
341 * after the swapgs, so that it can do the swapgs
342 * for the guest and jump here on syscall.
344 ENTRY(system_call_after_swapgs)
346 movq %rsp,%gs:pda_oldrsp
347 movq %gs:pda_kernelstack,%rsp
349 * No need to follow this irqs off/on section - it's straight
352 ENABLE_INTERRUPTS(CLBR_NONE)
354 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
355 movq %rcx,RIP-ARGOFFSET(%rsp)
356 CFI_REL_OFFSET rip,RIP-ARGOFFSET
357 GET_THREAD_INFO(%rcx)
358 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
360 system_call_fastpath:
361 cmpq $__NR_syscall_max,%rax
364 call *sys_call_table(,%rax,8) # XXX: rip relative
365 movq %rax,RAX-ARGOFFSET(%rsp)
367 * Syscall return path ending with SYSRET (fast path)
368 * Has incomplete stack frame and undefined top of stack.
371 movl $_TIF_ALLWORK_MASK,%edi
375 GET_THREAD_INFO(%rcx)
376 DISABLE_INTERRUPTS(CLBR_NONE)
378 movl TI_flags(%rcx),%edx
383 * sysretq will re-enable interrupts:
386 movq RIP-ARGOFFSET(%rsp),%rcx
388 RESTORE_ARGS 0,-ARG_SKIP,1
389 /*CFI_REGISTER rflags,r11*/
390 movq %gs:pda_oldrsp, %rsp
394 /* Handle reschedules */
395 /* edx: work, edi: workmask */
397 bt $TIF_NEED_RESCHED,%edx
400 ENABLE_INTERRUPTS(CLBR_NONE)
402 CFI_ADJUST_CFA_OFFSET 8
405 CFI_ADJUST_CFA_OFFSET -8
408 /* Handle a signal */
411 ENABLE_INTERRUPTS(CLBR_NONE)
412 #ifdef CONFIG_AUDITSYSCALL
413 bt $TIF_SYSCALL_AUDIT,%edx
416 /* edx: work flags (arg3) */
417 leaq do_notify_resume(%rip),%rax
418 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
419 xorl %esi,%esi # oldset -> arg2
420 call ptregscall_common
421 movl $_TIF_WORK_MASK,%edi
422 /* Use IRET because user could have changed frame. This
423 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
424 DISABLE_INTERRUPTS(CLBR_NONE)
429 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
430 jmp ret_from_sys_call
432 #ifdef CONFIG_AUDITSYSCALL
434 * Fast path for syscall audit without full syscall trace.
435 * We just call audit_syscall_entry() directly, and then
436 * jump back to the normal fast path.
439 movq %r10,%r9 /* 6th arg: 4th syscall arg */
440 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
441 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
442 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
443 movq %rax,%rsi /* 2nd arg: syscall number */
444 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
445 call audit_syscall_entry
446 LOAD_ARGS 0 /* reload call-clobbered registers */
447 jmp system_call_fastpath
450 * Return fast path for syscall audit. Call audit_syscall_exit()
451 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
455 movq %rax,%rsi /* second arg, syscall return value */
456 cmpq $0,%rax /* is it < 0? */
457 setl %al /* 1 if so, 0 if not */
458 movzbl %al,%edi /* zero-extend that into %edi */
459 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
460 call audit_syscall_exit
461 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
463 #endif /* CONFIG_AUDITSYSCALL */
465 /* Do syscall tracing */
467 #ifdef CONFIG_AUDITSYSCALL
468 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
472 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
473 FIXUP_TOP_OF_STACK %rdi
475 call syscall_trace_enter
477 * Reload arg registers from stack in case ptrace changed them.
478 * We don't reload %rax because syscall_trace_enter() returned
479 * the value it wants us to use in the table lookup.
481 LOAD_ARGS ARGOFFSET, 1
483 cmpq $__NR_syscall_max,%rax
484 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
485 movq %r10,%rcx /* fixup for C */
486 call *sys_call_table(,%rax,8)
487 movq %rax,RAX-ARGOFFSET(%rsp)
488 /* Use IRET because user could have changed frame */
491 * Syscall return path ending with IRET.
492 * Has correct top of stack, but partial stack frame.
494 .globl int_ret_from_sys_call
495 .globl int_with_check
496 int_ret_from_sys_call:
497 DISABLE_INTERRUPTS(CLBR_NONE)
499 testl $3,CS-ARGOFFSET(%rsp)
500 je retint_restore_args
501 movl $_TIF_ALLWORK_MASK,%edi
502 /* edi: mask to check */
505 GET_THREAD_INFO(%rcx)
506 movl TI_flags(%rcx),%edx
509 andl $~TS_COMPAT,TI_status(%rcx)
512 /* Either reschedule or signal or syscall exit tracking needed. */
513 /* First do a reschedule test. */
514 /* edx: work, edi: workmask */
516 bt $TIF_NEED_RESCHED,%edx
519 ENABLE_INTERRUPTS(CLBR_NONE)
521 CFI_ADJUST_CFA_OFFSET 8
524 CFI_ADJUST_CFA_OFFSET -8
525 DISABLE_INTERRUPTS(CLBR_NONE)
529 /* handle signals and tracing -- both require a full stack frame */
532 ENABLE_INTERRUPTS(CLBR_NONE)
534 /* Check for syscall exit trace */
535 testl $_TIF_WORK_SYSCALL_EXIT,%edx
538 CFI_ADJUST_CFA_OFFSET 8
539 leaq 8(%rsp),%rdi # &ptregs -> arg1
540 call syscall_trace_leave
542 CFI_ADJUST_CFA_OFFSET -8
543 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
547 testl $_TIF_DO_NOTIFY_MASK,%edx
549 movq %rsp,%rdi # &ptregs -> arg1
550 xorl %esi,%esi # oldset -> arg2
551 call do_notify_resume
552 1: movl $_TIF_WORK_MASK,%edi
555 DISABLE_INTERRUPTS(CLBR_NONE)
562 * Certain special system calls that need to save a complete full stack frame.
565 .macro PTREGSCALL label,func,arg
568 leaq \func(%rip),%rax
569 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
570 jmp ptregscall_common
576 PTREGSCALL stub_clone, sys_clone, %r8
577 PTREGSCALL stub_fork, sys_fork, %rdi
578 PTREGSCALL stub_vfork, sys_vfork, %rdi
579 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
580 PTREGSCALL stub_iopl, sys_iopl, %rsi
582 ENTRY(ptregscall_common)
584 CFI_ADJUST_CFA_OFFSET -8
585 CFI_REGISTER rip, r11
588 CFI_REGISTER rip, r15
589 FIXUP_TOP_OF_STACK %r11
591 RESTORE_TOP_OF_STACK %r11
593 CFI_REGISTER rip, r11
596 CFI_ADJUST_CFA_OFFSET 8
597 CFI_REL_OFFSET rip, 0
600 END(ptregscall_common)
605 CFI_ADJUST_CFA_OFFSET -8
606 CFI_REGISTER rip, r11
608 FIXUP_TOP_OF_STACK %r11
611 RESTORE_TOP_OF_STACK %r11
614 jmp int_ret_from_sys_call
619 * sigreturn is special because it needs to restore all registers on return.
620 * This cannot be done with SYSRET, so use the IRET return path instead.
622 ENTRY(stub_rt_sigreturn)
625 CFI_ADJUST_CFA_OFFSET -8
628 FIXUP_TOP_OF_STACK %r11
629 call sys_rt_sigreturn
630 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
632 jmp int_ret_from_sys_call
634 END(stub_rt_sigreturn)
637 * initial frame state for interrupts and exceptions
642 CFI_DEF_CFA rsp,SS+8-\ref
643 /*CFI_REL_OFFSET ss,SS-\ref*/
644 CFI_REL_OFFSET rsp,RSP-\ref
645 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
646 /*CFI_REL_OFFSET cs,CS-\ref*/
647 CFI_REL_OFFSET rip,RIP-\ref
650 /* initial frame state for interrupts (and exceptions without error code) */
651 #define INTR_FRAME _frame RIP
652 /* initial frame state for exceptions with error code (and interrupts with
653 vector already pushed) */
654 #define XCPT_FRAME _frame ORIG_RAX
657 * Interrupt entry/exit.
659 * Interrupt entry points save only callee clobbered registers in fast path.
661 * Entry runs with interrupts off.
664 /* 0(%rsp): interrupt number */
665 .macro interrupt func
668 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
671 * Save rbp twice: One is for marking the stack frame, as usual, and the
672 * other, to fill pt_regs properly. This is because bx comes right
673 * before the last saved register in that structure, and not bp. If the
674 * base pointer were in the place bx is today, this would not be needed.
677 CFI_ADJUST_CFA_OFFSET 8
678 CFI_REL_OFFSET rbp, 0
680 CFI_DEF_CFA_REGISTER rbp
684 /* irqcount is used to check if a CPU is already on an interrupt
685 stack or not. While this is essentially redundant with preempt_count
686 it is a little cheaper to use a separate counter in the PDA
687 (short of moving irq_enter into assembly, which would be too
689 1: incl %gs:pda_irqcount
690 cmoveq %gs:pda_irqstackptr,%rsp
691 push %rbp # backlink for old unwinder
693 * We entered an interrupt context - irqs are off:
699 ENTRY(common_interrupt)
702 /* 0(%rsp): oldrsp-ARGOFFSET */
704 DISABLE_INTERRUPTS(CLBR_NONE)
706 decl %gs:pda_irqcount
708 CFI_DEF_CFA_REGISTER rsp
709 CFI_ADJUST_CFA_OFFSET -8
711 GET_THREAD_INFO(%rcx)
712 testl $3,CS-ARGOFFSET(%rsp)
715 /* Interrupt came from user space */
717 * Has a correct top of stack, but a partial stack frame
718 * %rcx: thread info. Interrupts off.
720 retint_with_reschedule:
721 movl $_TIF_WORK_MASK,%edi
724 movl TI_flags(%rcx),%edx
729 retint_swapgs: /* return to user-space */
731 * The iretq could re-enable interrupts:
733 DISABLE_INTERRUPTS(CLBR_ANY)
738 retint_restore_args: /* return to kernel space */
739 DISABLE_INTERRUPTS(CLBR_ANY)
741 * The iretq could re-enable interrupts:
750 .section __ex_table, "a"
751 .quad irq_return, bad_iret
754 #ifdef CONFIG_PARAVIRT
758 .section __ex_table,"a"
759 .quad native_iret, bad_iret
766 * The iret traps when the %cs or %ss being restored is bogus.
767 * We've lost the original trap vector and error code.
768 * #GPF is the most likely one to get for an invalid selector.
769 * So pretend we completed the iret and took the #GPF in user mode.
771 * We are now running with the kernel GS after exception recovery.
772 * But error_entry expects us to have user GS to match the user %cs,
778 jmp general_protection
782 /* edi: workmask, edx: work */
785 bt $TIF_NEED_RESCHED,%edx
788 ENABLE_INTERRUPTS(CLBR_NONE)
790 CFI_ADJUST_CFA_OFFSET 8
793 CFI_ADJUST_CFA_OFFSET -8
794 GET_THREAD_INFO(%rcx)
795 DISABLE_INTERRUPTS(CLBR_NONE)
800 testl $_TIF_DO_NOTIFY_MASK,%edx
803 ENABLE_INTERRUPTS(CLBR_NONE)
805 movq $-1,ORIG_RAX(%rsp)
806 xorl %esi,%esi # oldset
807 movq %rsp,%rdi # &pt_regs
808 call do_notify_resume
810 DISABLE_INTERRUPTS(CLBR_NONE)
812 GET_THREAD_INFO(%rcx)
813 jmp retint_with_reschedule
815 #ifdef CONFIG_PREEMPT
816 /* Returning to kernel space. Check if we need preemption */
817 /* rcx: threadinfo. interrupts off. */
819 cmpl $0,TI_preempt_count(%rcx)
820 jnz retint_restore_args
821 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
822 jnc retint_restore_args
823 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
824 jnc retint_restore_args
825 call preempt_schedule_irq
830 END(common_interrupt)
835 .macro apicinterrupt num,func
838 CFI_ADJUST_CFA_OFFSET 8
844 ENTRY(thermal_interrupt)
845 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
846 END(thermal_interrupt)
848 ENTRY(threshold_interrupt)
849 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
850 END(threshold_interrupt)
853 ENTRY(reschedule_interrupt)
854 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
855 END(reschedule_interrupt)
857 .macro INVALIDATE_ENTRY num
858 ENTRY(invalidate_interrupt\num)
859 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
860 END(invalidate_interrupt\num)
872 ENTRY(call_function_interrupt)
873 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
874 END(call_function_interrupt)
875 ENTRY(call_function_single_interrupt)
876 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
877 END(call_function_single_interrupt)
878 ENTRY(irq_move_cleanup_interrupt)
879 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
880 END(irq_move_cleanup_interrupt)
883 ENTRY(apic_timer_interrupt)
884 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
885 END(apic_timer_interrupt)
887 ENTRY(uv_bau_message_intr1)
888 apicinterrupt 220,uv_bau_message_interrupt
889 END(uv_bau_message_intr1)
891 ENTRY(error_interrupt)
892 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
895 ENTRY(spurious_interrupt)
896 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
897 END(spurious_interrupt)
900 * Exception entry points.
904 PARAVIRT_ADJUST_EXCEPTION_FRAME
905 pushq $0 /* push error code/oldrax */
906 CFI_ADJUST_CFA_OFFSET 8
907 pushq %rax /* push real oldrax to the rdi slot */
908 CFI_ADJUST_CFA_OFFSET 8
915 .macro errorentry sym
917 PARAVIRT_ADJUST_EXCEPTION_FRAME
919 CFI_ADJUST_CFA_OFFSET 8
926 /* error code is on the stack already */
927 /* handle NMI like exceptions that can happen everywhere */
928 .macro paranoidentry sym, ist=0, irqtrace=1
932 movl $MSR_GS_BASE,%ecx
940 movq %gs:pda_data_offset, %rbp
943 movq ORIG_RAX(%rsp),%rsi
944 movq $-1,ORIG_RAX(%rsp)
946 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
950 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
952 DISABLE_INTERRUPTS(CLBR_NONE)
959 * "Paranoid" exit path from exception stack.
960 * Paranoid because this is used by NMIs and cannot take
961 * any kernel state for granted.
962 * We don't do kernel preemption checks here, because only
963 * NMI should be common and it does not enable IRQs and
964 * cannot get reschedule ticks.
966 * "trace" is 0 for the NMI handler only, because irq-tracing
967 * is fundamentally NMI-unsafe. (we cannot change the soft and
968 * hard flags at once, atomically)
970 .macro paranoidexit trace=1
971 /* ebx: no swapgs flag */
973 testl %ebx,%ebx /* swapgs needed? */
974 jnz paranoid_restore\trace
976 jnz paranoid_userspace\trace
977 paranoid_swapgs\trace:
982 paranoid_restore\trace:
985 paranoid_userspace\trace:
986 GET_THREAD_INFO(%rcx)
987 movl TI_flags(%rcx),%ebx
988 andl $_TIF_WORK_MASK,%ebx
989 jz paranoid_swapgs\trace
990 movq %rsp,%rdi /* &pt_regs */
992 movq %rax,%rsp /* switch stack for scheduling */
993 testl $_TIF_NEED_RESCHED,%ebx
994 jnz paranoid_schedule\trace
995 movl %ebx,%edx /* arg3: thread flags */
999 ENABLE_INTERRUPTS(CLBR_NONE)
1000 xorl %esi,%esi /* arg2: oldset */
1001 movq %rsp,%rdi /* arg1: &pt_regs */
1002 call do_notify_resume
1003 DISABLE_INTERRUPTS(CLBR_NONE)
1007 jmp paranoid_userspace\trace
1008 paranoid_schedule\trace:
1012 ENABLE_INTERRUPTS(CLBR_ANY)
1014 DISABLE_INTERRUPTS(CLBR_ANY)
1018 jmp paranoid_userspace\trace
1023 * Exception entry point. This expects an error code/orig_rax on the stack
1024 * and the exception handler in %rax.
1026 KPROBE_ENTRY(error_entry)
1028 CFI_REL_OFFSET rax,0
1029 /* rdi slot contains rax, oldrax contains error code */
1032 CFI_ADJUST_CFA_OFFSET (14*8)
1033 movq %rsi,13*8(%rsp)
1034 CFI_REL_OFFSET rsi,RSI
1035 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1036 CFI_REGISTER rax,rsi
1037 movq %rdx,12*8(%rsp)
1038 CFI_REL_OFFSET rdx,RDX
1039 movq %rcx,11*8(%rsp)
1040 CFI_REL_OFFSET rcx,RCX
1041 movq %rsi,10*8(%rsp) /* store rax */
1042 CFI_REL_OFFSET rax,RAX
1044 CFI_REL_OFFSET r8,R8
1046 CFI_REL_OFFSET r9,R9
1048 CFI_REL_OFFSET r10,R10
1050 CFI_REL_OFFSET r11,R11
1052 CFI_REL_OFFSET rbx,RBX
1054 CFI_REL_OFFSET rbp,RBP
1056 CFI_REL_OFFSET r12,R12
1058 CFI_REL_OFFSET r13,R13
1060 CFI_REL_OFFSET r14,R14
1062 CFI_REL_OFFSET r15,R15
1065 je error_kernelspace
1071 CFI_REL_OFFSET rdi,RDI
1073 movq ORIG_RAX(%rsp),%rsi /* get error code */
1074 movq $-1,ORIG_RAX(%rsp)
1076 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1080 DISABLE_INTERRUPTS(CLBR_NONE)
1082 GET_THREAD_INFO(%rcx)
1085 LOCKDEP_SYS_EXIT_IRQ
1086 movl TI_flags(%rcx),%edx
1087 movl $_TIF_WORK_MASK,%edi
1095 /* There are two places in the kernel that can potentially fault with
1096 usergs. Handle them here. The exception handlers after
1097 iret run with kernel gs again, so don't set the user space flag.
1098 B stepping K8s sometimes report an truncated RIP for IRET
1099 exceptions returning to compat mode. Check for these here too. */
1100 leaq irq_return(%rip),%rcx
1103 movl %ecx,%ecx /* zero extend */
1106 cmpq $gs_change,RIP(%rsp)
1109 KPROBE_END(error_entry)
1111 /* Reload gs selector with exception handling */
1112 /* edi: new selector */
1113 ENTRY(native_load_gs_index)
1116 CFI_ADJUST_CFA_OFFSET 8
1117 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1121 2: mfence /* workaround */
1124 CFI_ADJUST_CFA_OFFSET -8
1127 ENDPROC(native_load_gs_index)
1129 .section __ex_table,"a"
1131 .quad gs_change,bad_gs
1133 .section .fixup,"ax"
1134 /* running with kernelgs */
1136 SWAPGS /* switch back to user gs */
1143 * Create a kernel thread.
1145 * C extern interface:
1146 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1148 * asm input arguments:
1149 * rdi: fn, rsi: arg, rdx: flags
1151 ENTRY(kernel_thread)
1153 FAKE_STACK_FRAME $child_rip
1156 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1158 orq kernel_thread_flags(%rip),%rdi
1171 * It isn't worth to check for reschedule here,
1172 * so internally to the x86_64 port you can rely on kernel_thread()
1173 * not to reschedule the child before returning, this avoids the need
1174 * of hacks for example to fork off the per-CPU idle tasks.
1175 * [Hopefully no generic code relies on the reschedule -AK]
1181 ENDPROC(kernel_thread)
1184 pushq $0 # fake return address
1187 * Here we are in the child and the registers are set as they were
1188 * at kernel_thread() invocation in the parent.
1200 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1202 * C extern interface:
1203 * extern long execve(char *name, char **argv, char **envp)
1205 * asm input arguments:
1206 * rdi: name, rsi: argv, rdx: envp
1208 * We want to fallback into:
1209 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1211 * do_sys_execve asm fallback arguments:
1212 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1214 ENTRY(kernel_execve)
1220 movq %rax, RAX(%rsp)
1223 je int_ret_from_sys_call
1228 ENDPROC(kernel_execve)
1230 KPROBE_ENTRY(page_fault)
1231 errorentry do_page_fault
1232 KPROBE_END(page_fault)
1234 ENTRY(coprocessor_error)
1235 zeroentry do_coprocessor_error
1236 END(coprocessor_error)
1238 ENTRY(simd_coprocessor_error)
1239 zeroentry do_simd_coprocessor_error
1240 END(simd_coprocessor_error)
1242 ENTRY(device_not_available)
1243 zeroentry math_state_restore
1244 END(device_not_available)
1246 /* runs on exception stack */
1249 PARAVIRT_ADJUST_EXCEPTION_FRAME
1251 CFI_ADJUST_CFA_OFFSET 8
1252 paranoidentry do_debug, DEBUG_STACK
1256 /* runs on exception stack */
1259 PARAVIRT_ADJUST_EXCEPTION_FRAME
1261 CFI_ADJUST_CFA_OFFSET 8
1262 paranoidentry do_nmi, 0, 0
1263 #ifdef CONFIG_TRACE_IRQFLAGS
1273 PARAVIRT_ADJUST_EXCEPTION_FRAME
1275 CFI_ADJUST_CFA_OFFSET 8
1276 paranoidentry do_int3, DEBUG_STACK
1282 zeroentry do_overflow
1290 zeroentry do_invalid_op
1293 ENTRY(coprocessor_segment_overrun)
1294 zeroentry do_coprocessor_segment_overrun
1295 END(coprocessor_segment_overrun)
1297 /* runs on exception stack */
1300 PARAVIRT_ADJUST_EXCEPTION_FRAME
1301 paranoidentry do_double_fault
1307 errorentry do_invalid_TSS
1310 ENTRY(segment_not_present)
1311 errorentry do_segment_not_present
1312 END(segment_not_present)
1314 /* runs on exception stack */
1315 ENTRY(stack_segment)
1317 PARAVIRT_ADJUST_EXCEPTION_FRAME
1318 paranoidentry do_stack_segment
1323 KPROBE_ENTRY(general_protection)
1324 errorentry do_general_protection
1325 KPROBE_END(general_protection)
1327 ENTRY(alignment_check)
1328 errorentry do_alignment_check
1329 END(alignment_check)
1332 zeroentry do_divide_error
1335 ENTRY(spurious_interrupt_bug)
1336 zeroentry do_spurious_interrupt_bug
1337 END(spurious_interrupt_bug)
1339 #ifdef CONFIG_X86_MCE
1340 /* runs on exception stack */
1341 ENTRY(machine_check)
1343 PARAVIRT_ADJUST_EXCEPTION_FRAME
1345 CFI_ADJUST_CFA_OFFSET 8
1346 paranoidentry do_machine_check
1352 /* Call softirq on interrupt stack. Interrupts are off. */
1356 CFI_ADJUST_CFA_OFFSET 8
1357 CFI_REL_OFFSET rbp,0
1359 CFI_DEF_CFA_REGISTER rbp
1360 incl %gs:pda_irqcount
1361 cmove %gs:pda_irqstackptr,%rsp
1362 push %rbp # backlink for old unwinder
1365 CFI_DEF_CFA_REGISTER rsp
1366 CFI_ADJUST_CFA_OFFSET -8
1367 decl %gs:pda_irqcount
1370 ENDPROC(call_softirq)
1372 KPROBE_ENTRY(ignore_sysret)
1377 ENDPROC(ignore_sysret)
1380 ENTRY(xen_hypervisor_callback)
1381 zeroentry xen_do_hypervisor_callback
1382 END(xen_hypervisor_callback)
1385 # A note on the "critical region" in our callback handler.
1386 # We want to avoid stacking callback handlers due to events occurring
1387 # during handling of the last event. To do this, we keep events disabled
1388 # until we've done all processing. HOWEVER, we must enable events before
1389 # popping the stack frame (can't be done atomically) and so it would still
1390 # be possible to get enough handler activations to overflow the stack.
1391 # Although unlikely, bugs of that kind are hard to track down, so we'd
1392 # like to avoid the possibility.
1393 # So, on entry to the handler we detect whether we interrupted an
1394 # existing activation in its critical region -- if so, we pop the current
1395 # activation and restart the handler using the previous one.
1397 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1399 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1400 see the correct pointer to the pt_regs */
1401 movq %rdi, %rsp # we don't return, adjust the stack frame
1404 11: incl %gs:pda_irqcount
1406 CFI_DEF_CFA_REGISTER rbp
1407 cmovzq %gs:pda_irqstackptr,%rsp
1408 pushq %rbp # backlink for old unwinder
1409 call xen_evtchn_do_upcall
1411 CFI_DEF_CFA_REGISTER rsp
1412 decl %gs:pda_irqcount
1415 END(do_hypervisor_callback)
1418 # Hypervisor uses this for application faults while it executes.
1419 # We get here for two reasons:
1420 # 1. Fault while reloading DS, ES, FS or GS
1421 # 2. Fault while executing IRET
1422 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1423 # registers that could be reloaded and zeroed the others.
1424 # Category 2 we fix up by killing the current process. We cannot use the
1425 # normal Linux return path in this case because if we use the IRET hypercall
1426 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1427 # We distinguish between categories by comparing each saved segment register
1428 # with its current contents: any discrepancy means we in category 1.
1430 ENTRY(xen_failsafe_callback)
1431 framesz = (RIP-0x30) /* workaround buggy gas */
1433 CFI_REL_OFFSET rcx, 0
1434 CFI_REL_OFFSET r11, 8
1448 /* All segments match their saved values => Category 2 (Bad IRET). */
1454 CFI_ADJUST_CFA_OFFSET -0x30
1456 CFI_ADJUST_CFA_OFFSET 8
1458 CFI_ADJUST_CFA_OFFSET 8
1460 CFI_ADJUST_CFA_OFFSET 8
1461 jmp general_protection
1463 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1469 CFI_ADJUST_CFA_OFFSET -0x30
1471 CFI_ADJUST_CFA_OFFSET 8
1475 END(xen_failsafe_callback)
1477 #endif /* CONFIG_XEN */