2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
64 #ifdef CONFIG_FUNCTION_TRACER
65 #ifdef CONFIG_DYNAMIC_FTRACE
72 /* taken from glibc */
84 subq $MCOUNT_INSN_SIZE, %rdi
104 #else /* ! CONFIG_DYNAMIC_FTRACE */
106 cmpq $ftrace_stub, ftrace_trace_function
113 /* taken from glibc */
123 movq 0x38(%rsp), %rdi
125 subq $MCOUNT_INSN_SIZE, %rdi
127 call *ftrace_trace_function
140 #endif /* CONFIG_DYNAMIC_FTRACE */
141 #endif /* CONFIG_FUNCTION_TRACER */
143 #ifndef CONFIG_PREEMPT
144 #define retint_kernel retint_restore_args
147 #ifdef CONFIG_PARAVIRT
148 ENTRY(native_usergs_sysret64)
151 #endif /* CONFIG_PARAVIRT */
154 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
155 #ifdef CONFIG_TRACE_IRQFLAGS
156 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
164 * C code is not supposed to know about undefined top of stack. Every time
165 * a C function with an pt_regs argument is called from the SYSCALL based
166 * fast path FIXUP_TOP_OF_STACK is needed.
167 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
171 /* %rsp:at FRAMEEND */
172 .macro FIXUP_TOP_OF_STACK tmp
173 movq %gs:pda_oldrsp,\tmp
175 movq $__USER_DS,SS(%rsp)
176 movq $__USER_CS,CS(%rsp)
178 movq R11(%rsp),\tmp /* get eflags */
179 movq \tmp,EFLAGS(%rsp)
182 .macro RESTORE_TOP_OF_STACK tmp,offset=0
183 movq RSP-\offset(%rsp),\tmp
184 movq \tmp,%gs:pda_oldrsp
185 movq EFLAGS-\offset(%rsp),\tmp
186 movq \tmp,R11-\offset(%rsp)
189 .macro FAKE_STACK_FRAME child_rip
190 /* push in order ss, rsp, eflags, cs, rip */
192 pushq $__KERNEL_DS /* ss */
193 CFI_ADJUST_CFA_OFFSET 8
194 /*CFI_REL_OFFSET ss,0*/
196 CFI_ADJUST_CFA_OFFSET 8
198 pushq $(1<<9) /* eflags - interrupts on */
199 CFI_ADJUST_CFA_OFFSET 8
200 /*CFI_REL_OFFSET rflags,0*/
201 pushq $__KERNEL_CS /* cs */
202 CFI_ADJUST_CFA_OFFSET 8
203 /*CFI_REL_OFFSET cs,0*/
204 pushq \child_rip /* rip */
205 CFI_ADJUST_CFA_OFFSET 8
207 pushq %rax /* orig rax */
208 CFI_ADJUST_CFA_OFFSET 8
211 .macro UNFAKE_STACK_FRAME
213 CFI_ADJUST_CFA_OFFSET -(6*8)
216 .macro CFI_DEFAULT_STACK start=1
222 CFI_DEF_CFA_OFFSET SS+8
224 CFI_REL_OFFSET r15,R15
225 CFI_REL_OFFSET r14,R14
226 CFI_REL_OFFSET r13,R13
227 CFI_REL_OFFSET r12,R12
228 CFI_REL_OFFSET rbp,RBP
229 CFI_REL_OFFSET rbx,RBX
230 CFI_REL_OFFSET r11,R11
231 CFI_REL_OFFSET r10,R10
234 CFI_REL_OFFSET rax,RAX
235 CFI_REL_OFFSET rcx,RCX
236 CFI_REL_OFFSET rdx,RDX
237 CFI_REL_OFFSET rsi,RSI
238 CFI_REL_OFFSET rdi,RDI
239 CFI_REL_OFFSET rip,RIP
240 /*CFI_REL_OFFSET cs,CS*/
241 /*CFI_REL_OFFSET rflags,EFLAGS*/
242 CFI_REL_OFFSET rsp,RSP
243 /*CFI_REL_OFFSET ss,SS*/
246 * A newly forked process directly context switches into this.
251 push kernel_eflags(%rip)
252 CFI_ADJUST_CFA_OFFSET 8
253 popf # reset kernel eflags
254 CFI_ADJUST_CFA_OFFSET -8
256 GET_THREAD_INFO(%rcx)
257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
261 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
262 je int_ret_from_sys_call
263 testl $_TIF_IA32,TI_flags(%rcx)
264 jnz int_ret_from_sys_call
265 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
266 jmp ret_from_sys_call
269 call syscall_trace_leave
270 GET_THREAD_INFO(%rcx)
276 * System call entry. Upto 6 arguments in registers are supported.
278 * SYSCALL does not save anything on the stack and does not change the
284 * rax system call number
286 * rcx return address for syscall/sysret, C arg3
289 * r10 arg3 (--> moved to rcx for C)
292 * r11 eflags for syscall/sysret, temporary for C
293 * r12-r15,rbp,rbx saved by C code, not touched.
295 * Interrupts are off on entry.
296 * Only called from user space.
298 * XXX if we had a free scratch register we could save the RSP into the stack frame
299 * and report it properly in ps. Unfortunately we haven't.
301 * When user can change the frames always force IRET. That is because
302 * it deals with uncanonical addresses better. SYSRET has trouble
303 * with them due to bugs in both AMD and Intel CPUs.
309 CFI_DEF_CFA rsp,PDA_STACKOFFSET
311 /*CFI_REGISTER rflags,r11*/
314 * A hypervisor implementation might want to use a label
315 * after the swapgs, so that it can do the swapgs
316 * for the guest and jump here on syscall.
318 ENTRY(system_call_after_swapgs)
320 movq %rsp,%gs:pda_oldrsp
321 movq %gs:pda_kernelstack,%rsp
323 * No need to follow this irqs off/on section - it's straight
326 ENABLE_INTERRUPTS(CLBR_NONE)
328 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
329 movq %rcx,RIP-ARGOFFSET(%rsp)
330 CFI_REL_OFFSET rip,RIP-ARGOFFSET
331 GET_THREAD_INFO(%rcx)
332 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
334 system_call_fastpath:
335 cmpq $__NR_syscall_max,%rax
338 call *sys_call_table(,%rax,8) # XXX: rip relative
339 movq %rax,RAX-ARGOFFSET(%rsp)
341 * Syscall return path ending with SYSRET (fast path)
342 * Has incomplete stack frame and undefined top of stack.
345 movl $_TIF_ALLWORK_MASK,%edi
349 GET_THREAD_INFO(%rcx)
350 DISABLE_INTERRUPTS(CLBR_NONE)
352 movl TI_flags(%rcx),%edx
357 * sysretq will re-enable interrupts:
360 movq RIP-ARGOFFSET(%rsp),%rcx
362 RESTORE_ARGS 0,-ARG_SKIP,1
363 /*CFI_REGISTER rflags,r11*/
364 movq %gs:pda_oldrsp, %rsp
368 /* Handle reschedules */
369 /* edx: work, edi: workmask */
371 bt $TIF_NEED_RESCHED,%edx
374 ENABLE_INTERRUPTS(CLBR_NONE)
376 CFI_ADJUST_CFA_OFFSET 8
379 CFI_ADJUST_CFA_OFFSET -8
382 /* Handle a signal */
385 ENABLE_INTERRUPTS(CLBR_NONE)
386 #ifdef CONFIG_AUDITSYSCALL
387 bt $TIF_SYSCALL_AUDIT,%edx
390 /* edx: work flags (arg3) */
391 leaq do_notify_resume(%rip),%rax
392 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
393 xorl %esi,%esi # oldset -> arg2
394 call ptregscall_common
395 movl $_TIF_WORK_MASK,%edi
396 /* Use IRET because user could have changed frame. This
397 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
398 DISABLE_INTERRUPTS(CLBR_NONE)
403 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
404 jmp ret_from_sys_call
406 #ifdef CONFIG_AUDITSYSCALL
408 * Fast path for syscall audit without full syscall trace.
409 * We just call audit_syscall_entry() directly, and then
410 * jump back to the normal fast path.
413 movq %r10,%r9 /* 6th arg: 4th syscall arg */
414 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
415 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
416 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
417 movq %rax,%rsi /* 2nd arg: syscall number */
418 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
419 call audit_syscall_entry
420 LOAD_ARGS 0 /* reload call-clobbered registers */
421 jmp system_call_fastpath
424 * Return fast path for syscall audit. Call audit_syscall_exit()
425 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
429 movq %rax,%rsi /* second arg, syscall return value */
430 cmpq $0,%rax /* is it < 0? */
431 setl %al /* 1 if so, 0 if not */
432 movzbl %al,%edi /* zero-extend that into %edi */
433 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
434 call audit_syscall_exit
435 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
437 #endif /* CONFIG_AUDITSYSCALL */
439 /* Do syscall tracing */
441 #ifdef CONFIG_AUDITSYSCALL
442 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
446 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
447 FIXUP_TOP_OF_STACK %rdi
449 call syscall_trace_enter
451 * Reload arg registers from stack in case ptrace changed them.
452 * We don't reload %rax because syscall_trace_enter() returned
453 * the value it wants us to use in the table lookup.
455 LOAD_ARGS ARGOFFSET, 1
457 cmpq $__NR_syscall_max,%rax
458 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
459 movq %r10,%rcx /* fixup for C */
460 call *sys_call_table(,%rax,8)
461 movq %rax,RAX-ARGOFFSET(%rsp)
462 /* Use IRET because user could have changed frame */
465 * Syscall return path ending with IRET.
466 * Has correct top of stack, but partial stack frame.
468 .globl int_ret_from_sys_call
469 .globl int_with_check
470 int_ret_from_sys_call:
471 DISABLE_INTERRUPTS(CLBR_NONE)
473 testl $3,CS-ARGOFFSET(%rsp)
474 je retint_restore_args
475 movl $_TIF_ALLWORK_MASK,%edi
476 /* edi: mask to check */
479 GET_THREAD_INFO(%rcx)
480 movl TI_flags(%rcx),%edx
483 andl $~TS_COMPAT,TI_status(%rcx)
486 /* Either reschedule or signal or syscall exit tracking needed. */
487 /* First do a reschedule test. */
488 /* edx: work, edi: workmask */
490 bt $TIF_NEED_RESCHED,%edx
493 ENABLE_INTERRUPTS(CLBR_NONE)
495 CFI_ADJUST_CFA_OFFSET 8
498 CFI_ADJUST_CFA_OFFSET -8
499 DISABLE_INTERRUPTS(CLBR_NONE)
503 /* handle signals and tracing -- both require a full stack frame */
506 ENABLE_INTERRUPTS(CLBR_NONE)
508 /* Check for syscall exit trace */
509 testl $_TIF_WORK_SYSCALL_EXIT,%edx
512 CFI_ADJUST_CFA_OFFSET 8
513 leaq 8(%rsp),%rdi # &ptregs -> arg1
514 call syscall_trace_leave
516 CFI_ADJUST_CFA_OFFSET -8
517 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
521 testl $_TIF_DO_NOTIFY_MASK,%edx
523 movq %rsp,%rdi # &ptregs -> arg1
524 xorl %esi,%esi # oldset -> arg2
525 call do_notify_resume
526 1: movl $_TIF_WORK_MASK,%edi
529 DISABLE_INTERRUPTS(CLBR_NONE)
536 * Certain special system calls that need to save a complete full stack frame.
539 .macro PTREGSCALL label,func,arg
542 leaq \func(%rip),%rax
543 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
544 jmp ptregscall_common
550 PTREGSCALL stub_clone, sys_clone, %r8
551 PTREGSCALL stub_fork, sys_fork, %rdi
552 PTREGSCALL stub_vfork, sys_vfork, %rdi
553 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
554 PTREGSCALL stub_iopl, sys_iopl, %rsi
556 ENTRY(ptregscall_common)
558 CFI_ADJUST_CFA_OFFSET -8
559 CFI_REGISTER rip, r11
562 CFI_REGISTER rip, r15
563 FIXUP_TOP_OF_STACK %r11
565 RESTORE_TOP_OF_STACK %r11
567 CFI_REGISTER rip, r11
570 CFI_ADJUST_CFA_OFFSET 8
571 CFI_REL_OFFSET rip, 0
574 END(ptregscall_common)
579 CFI_ADJUST_CFA_OFFSET -8
580 CFI_REGISTER rip, r11
582 FIXUP_TOP_OF_STACK %r11
585 RESTORE_TOP_OF_STACK %r11
588 jmp int_ret_from_sys_call
593 * sigreturn is special because it needs to restore all registers on return.
594 * This cannot be done with SYSRET, so use the IRET return path instead.
596 ENTRY(stub_rt_sigreturn)
599 CFI_ADJUST_CFA_OFFSET -8
602 FIXUP_TOP_OF_STACK %r11
603 call sys_rt_sigreturn
604 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
606 jmp int_ret_from_sys_call
608 END(stub_rt_sigreturn)
611 * initial frame state for interrupts and exceptions
616 CFI_DEF_CFA rsp,SS+8-\ref
617 /*CFI_REL_OFFSET ss,SS-\ref*/
618 CFI_REL_OFFSET rsp,RSP-\ref
619 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
620 /*CFI_REL_OFFSET cs,CS-\ref*/
621 CFI_REL_OFFSET rip,RIP-\ref
624 /* initial frame state for interrupts (and exceptions without error code) */
625 #define INTR_FRAME _frame RIP
626 /* initial frame state for exceptions with error code (and interrupts with
627 vector already pushed) */
628 #define XCPT_FRAME _frame ORIG_RAX
631 * Interrupt entry/exit.
633 * Interrupt entry points save only callee clobbered registers in fast path.
635 * Entry runs with interrupts off.
638 /* 0(%rsp): interrupt number */
639 .macro interrupt func
642 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
645 * Save rbp twice: One is for marking the stack frame, as usual, and the
646 * other, to fill pt_regs properly. This is because bx comes right
647 * before the last saved register in that structure, and not bp. If the
648 * base pointer were in the place bx is today, this would not be needed.
651 CFI_ADJUST_CFA_OFFSET 8
652 CFI_REL_OFFSET rbp, 0
654 CFI_DEF_CFA_REGISTER rbp
658 /* irqcount is used to check if a CPU is already on an interrupt
659 stack or not. While this is essentially redundant with preempt_count
660 it is a little cheaper to use a separate counter in the PDA
661 (short of moving irq_enter into assembly, which would be too
663 1: incl %gs:pda_irqcount
664 cmoveq %gs:pda_irqstackptr,%rsp
665 push %rbp # backlink for old unwinder
667 * We entered an interrupt context - irqs are off:
673 ENTRY(common_interrupt)
676 /* 0(%rsp): oldrsp-ARGOFFSET */
678 DISABLE_INTERRUPTS(CLBR_NONE)
680 decl %gs:pda_irqcount
682 CFI_DEF_CFA_REGISTER rsp
683 CFI_ADJUST_CFA_OFFSET -8
685 GET_THREAD_INFO(%rcx)
686 testl $3,CS-ARGOFFSET(%rsp)
689 /* Interrupt came from user space */
691 * Has a correct top of stack, but a partial stack frame
692 * %rcx: thread info. Interrupts off.
694 retint_with_reschedule:
695 movl $_TIF_WORK_MASK,%edi
698 movl TI_flags(%rcx),%edx
703 retint_swapgs: /* return to user-space */
705 * The iretq could re-enable interrupts:
707 DISABLE_INTERRUPTS(CLBR_ANY)
712 retint_restore_args: /* return to kernel space */
713 DISABLE_INTERRUPTS(CLBR_ANY)
715 * The iretq could re-enable interrupts:
724 .section __ex_table, "a"
725 .quad irq_return, bad_iret
728 #ifdef CONFIG_PARAVIRT
732 .section __ex_table,"a"
733 .quad native_iret, bad_iret
740 * The iret traps when the %cs or %ss being restored is bogus.
741 * We've lost the original trap vector and error code.
742 * #GPF is the most likely one to get for an invalid selector.
743 * So pretend we completed the iret and took the #GPF in user mode.
745 * We are now running with the kernel GS after exception recovery.
746 * But error_entry expects us to have user GS to match the user %cs,
752 jmp general_protection
756 /* edi: workmask, edx: work */
759 bt $TIF_NEED_RESCHED,%edx
762 ENABLE_INTERRUPTS(CLBR_NONE)
764 CFI_ADJUST_CFA_OFFSET 8
767 CFI_ADJUST_CFA_OFFSET -8
768 GET_THREAD_INFO(%rcx)
769 DISABLE_INTERRUPTS(CLBR_NONE)
774 testl $_TIF_DO_NOTIFY_MASK,%edx
777 ENABLE_INTERRUPTS(CLBR_NONE)
779 movq $-1,ORIG_RAX(%rsp)
780 xorl %esi,%esi # oldset
781 movq %rsp,%rdi # &pt_regs
782 call do_notify_resume
784 DISABLE_INTERRUPTS(CLBR_NONE)
786 GET_THREAD_INFO(%rcx)
787 jmp retint_with_reschedule
789 #ifdef CONFIG_PREEMPT
790 /* Returning to kernel space. Check if we need preemption */
791 /* rcx: threadinfo. interrupts off. */
793 cmpl $0,TI_preempt_count(%rcx)
794 jnz retint_restore_args
795 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
796 jnc retint_restore_args
797 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
798 jnc retint_restore_args
799 call preempt_schedule_irq
804 END(common_interrupt)
809 .macro apicinterrupt num,func
812 CFI_ADJUST_CFA_OFFSET 8
818 ENTRY(thermal_interrupt)
819 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
820 END(thermal_interrupt)
822 ENTRY(threshold_interrupt)
823 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
824 END(threshold_interrupt)
827 ENTRY(reschedule_interrupt)
828 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
829 END(reschedule_interrupt)
831 .macro INVALIDATE_ENTRY num
832 ENTRY(invalidate_interrupt\num)
833 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
834 END(invalidate_interrupt\num)
846 ENTRY(call_function_interrupt)
847 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
848 END(call_function_interrupt)
849 ENTRY(call_function_single_interrupt)
850 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
851 END(call_function_single_interrupt)
852 ENTRY(irq_move_cleanup_interrupt)
853 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
854 END(irq_move_cleanup_interrupt)
857 ENTRY(apic_timer_interrupt)
858 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
859 END(apic_timer_interrupt)
861 ENTRY(uv_bau_message_intr1)
862 apicinterrupt 220,uv_bau_message_interrupt
863 END(uv_bau_message_intr1)
865 ENTRY(error_interrupt)
866 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
869 ENTRY(spurious_interrupt)
870 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
871 END(spurious_interrupt)
874 * Exception entry points.
878 PARAVIRT_ADJUST_EXCEPTION_FRAME
879 pushq $0 /* push error code/oldrax */
880 CFI_ADJUST_CFA_OFFSET 8
881 pushq %rax /* push real oldrax to the rdi slot */
882 CFI_ADJUST_CFA_OFFSET 8
889 .macro errorentry sym
891 PARAVIRT_ADJUST_EXCEPTION_FRAME
893 CFI_ADJUST_CFA_OFFSET 8
900 /* error code is on the stack already */
901 /* handle NMI like exceptions that can happen everywhere */
902 .macro paranoidentry sym, ist=0, irqtrace=1
906 movl $MSR_GS_BASE,%ecx
914 movq %gs:pda_data_offset, %rbp
920 movq ORIG_RAX(%rsp),%rsi
921 movq $-1,ORIG_RAX(%rsp)
923 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
927 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
929 DISABLE_INTERRUPTS(CLBR_NONE)
936 * "Paranoid" exit path from exception stack.
937 * Paranoid because this is used by NMIs and cannot take
938 * any kernel state for granted.
939 * We don't do kernel preemption checks here, because only
940 * NMI should be common and it does not enable IRQs and
941 * cannot get reschedule ticks.
943 * "trace" is 0 for the NMI handler only, because irq-tracing
944 * is fundamentally NMI-unsafe. (we cannot change the soft and
945 * hard flags at once, atomically)
947 .macro paranoidexit trace=1
948 /* ebx: no swapgs flag */
950 testl %ebx,%ebx /* swapgs needed? */
951 jnz paranoid_restore\trace
953 jnz paranoid_userspace\trace
954 paranoid_swapgs\trace:
959 paranoid_restore\trace:
962 paranoid_userspace\trace:
963 GET_THREAD_INFO(%rcx)
964 movl TI_flags(%rcx),%ebx
965 andl $_TIF_WORK_MASK,%ebx
966 jz paranoid_swapgs\trace
967 movq %rsp,%rdi /* &pt_regs */
969 movq %rax,%rsp /* switch stack for scheduling */
970 testl $_TIF_NEED_RESCHED,%ebx
971 jnz paranoid_schedule\trace
972 movl %ebx,%edx /* arg3: thread flags */
976 ENABLE_INTERRUPTS(CLBR_NONE)
977 xorl %esi,%esi /* arg2: oldset */
978 movq %rsp,%rdi /* arg1: &pt_regs */
979 call do_notify_resume
980 DISABLE_INTERRUPTS(CLBR_NONE)
984 jmp paranoid_userspace\trace
985 paranoid_schedule\trace:
989 ENABLE_INTERRUPTS(CLBR_ANY)
991 DISABLE_INTERRUPTS(CLBR_ANY)
995 jmp paranoid_userspace\trace
1000 * Exception entry point. This expects an error code/orig_rax on the stack
1001 * and the exception handler in %rax.
1003 KPROBE_ENTRY(error_entry)
1005 CFI_REL_OFFSET rax,0
1006 /* rdi slot contains rax, oldrax contains error code */
1009 CFI_ADJUST_CFA_OFFSET (14*8)
1010 movq %rsi,13*8(%rsp)
1011 CFI_REL_OFFSET rsi,RSI
1012 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1013 CFI_REGISTER rax,rsi
1014 movq %rdx,12*8(%rsp)
1015 CFI_REL_OFFSET rdx,RDX
1016 movq %rcx,11*8(%rsp)
1017 CFI_REL_OFFSET rcx,RCX
1018 movq %rsi,10*8(%rsp) /* store rax */
1019 CFI_REL_OFFSET rax,RAX
1021 CFI_REL_OFFSET r8,R8
1023 CFI_REL_OFFSET r9,R9
1025 CFI_REL_OFFSET r10,R10
1027 CFI_REL_OFFSET r11,R11
1029 CFI_REL_OFFSET rbx,RBX
1031 CFI_REL_OFFSET rbp,RBP
1033 CFI_REL_OFFSET r12,R12
1035 CFI_REL_OFFSET r13,R13
1037 CFI_REL_OFFSET r14,R14
1039 CFI_REL_OFFSET r15,R15
1042 je error_kernelspace
1048 CFI_REL_OFFSET rdi,RDI
1050 movq ORIG_RAX(%rsp),%rsi /* get error code */
1051 movq $-1,ORIG_RAX(%rsp)
1053 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1057 DISABLE_INTERRUPTS(CLBR_NONE)
1059 GET_THREAD_INFO(%rcx)
1062 LOCKDEP_SYS_EXIT_IRQ
1063 movl TI_flags(%rcx),%edx
1064 movl $_TIF_WORK_MASK,%edi
1072 /* There are two places in the kernel that can potentially fault with
1073 usergs. Handle them here. The exception handlers after
1074 iret run with kernel gs again, so don't set the user space flag.
1075 B stepping K8s sometimes report an truncated RIP for IRET
1076 exceptions returning to compat mode. Check for these here too. */
1077 leaq irq_return(%rip),%rcx
1080 movl %ecx,%ecx /* zero extend */
1083 cmpq $gs_change,RIP(%rsp)
1086 KPROBE_END(error_entry)
1088 /* Reload gs selector with exception handling */
1089 /* edi: new selector */
1090 ENTRY(native_load_gs_index)
1093 CFI_ADJUST_CFA_OFFSET 8
1094 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1098 2: mfence /* workaround */
1101 CFI_ADJUST_CFA_OFFSET -8
1104 ENDPROC(native_load_gs_index)
1106 .section __ex_table,"a"
1108 .quad gs_change,bad_gs
1110 .section .fixup,"ax"
1111 /* running with kernelgs */
1113 SWAPGS /* switch back to user gs */
1120 * Create a kernel thread.
1122 * C extern interface:
1123 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1125 * asm input arguments:
1126 * rdi: fn, rsi: arg, rdx: flags
1128 ENTRY(kernel_thread)
1130 FAKE_STACK_FRAME $child_rip
1133 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1135 orq kernel_thread_flags(%rip),%rdi
1148 * It isn't worth to check for reschedule here,
1149 * so internally to the x86_64 port you can rely on kernel_thread()
1150 * not to reschedule the child before returning, this avoids the need
1151 * of hacks for example to fork off the per-CPU idle tasks.
1152 * [Hopefully no generic code relies on the reschedule -AK]
1158 ENDPROC(kernel_thread)
1161 pushq $0 # fake return address
1164 * Here we are in the child and the registers are set as they were
1165 * at kernel_thread() invocation in the parent.
1177 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1179 * C extern interface:
1180 * extern long execve(char *name, char **argv, char **envp)
1182 * asm input arguments:
1183 * rdi: name, rsi: argv, rdx: envp
1185 * We want to fallback into:
1186 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1188 * do_sys_execve asm fallback arguments:
1189 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1191 ENTRY(kernel_execve)
1197 movq %rax, RAX(%rsp)
1200 je int_ret_from_sys_call
1205 ENDPROC(kernel_execve)
1207 KPROBE_ENTRY(page_fault)
1208 errorentry do_page_fault
1209 KPROBE_END(page_fault)
1211 ENTRY(coprocessor_error)
1212 zeroentry do_coprocessor_error
1213 END(coprocessor_error)
1215 ENTRY(simd_coprocessor_error)
1216 zeroentry do_simd_coprocessor_error
1217 END(simd_coprocessor_error)
1219 ENTRY(device_not_available)
1220 zeroentry do_device_not_available
1221 END(device_not_available)
1223 /* runs on exception stack */
1226 PARAVIRT_ADJUST_EXCEPTION_FRAME
1228 CFI_ADJUST_CFA_OFFSET 8
1229 paranoidentry do_debug, DEBUG_STACK
1233 /* runs on exception stack */
1236 PARAVIRT_ADJUST_EXCEPTION_FRAME
1238 CFI_ADJUST_CFA_OFFSET 8
1239 paranoidentry do_nmi, 0, 0
1240 #ifdef CONFIG_TRACE_IRQFLAGS
1250 PARAVIRT_ADJUST_EXCEPTION_FRAME
1252 CFI_ADJUST_CFA_OFFSET 8
1253 paranoidentry do_int3, DEBUG_STACK
1259 zeroentry do_overflow
1267 zeroentry do_invalid_op
1270 ENTRY(coprocessor_segment_overrun)
1271 zeroentry do_coprocessor_segment_overrun
1272 END(coprocessor_segment_overrun)
1274 /* runs on exception stack */
1277 PARAVIRT_ADJUST_EXCEPTION_FRAME
1278 paranoidentry do_double_fault
1284 errorentry do_invalid_TSS
1287 ENTRY(segment_not_present)
1288 errorentry do_segment_not_present
1289 END(segment_not_present)
1291 /* runs on exception stack */
1292 ENTRY(stack_segment)
1294 PARAVIRT_ADJUST_EXCEPTION_FRAME
1295 paranoidentry do_stack_segment
1300 KPROBE_ENTRY(general_protection)
1301 errorentry do_general_protection
1302 KPROBE_END(general_protection)
1304 ENTRY(alignment_check)
1305 errorentry do_alignment_check
1306 END(alignment_check)
1309 zeroentry do_divide_error
1312 ENTRY(spurious_interrupt_bug)
1313 zeroentry do_spurious_interrupt_bug
1314 END(spurious_interrupt_bug)
1316 #ifdef CONFIG_X86_MCE
1317 /* runs on exception stack */
1318 ENTRY(machine_check)
1320 PARAVIRT_ADJUST_EXCEPTION_FRAME
1322 CFI_ADJUST_CFA_OFFSET 8
1323 paranoidentry do_machine_check
1329 /* Call softirq on interrupt stack. Interrupts are off. */
1333 CFI_ADJUST_CFA_OFFSET 8
1334 CFI_REL_OFFSET rbp,0
1336 CFI_DEF_CFA_REGISTER rbp
1337 incl %gs:pda_irqcount
1338 cmove %gs:pda_irqstackptr,%rsp
1339 push %rbp # backlink for old unwinder
1342 CFI_DEF_CFA_REGISTER rsp
1343 CFI_ADJUST_CFA_OFFSET -8
1344 decl %gs:pda_irqcount
1347 ENDPROC(call_softirq)
1349 KPROBE_ENTRY(ignore_sysret)
1354 ENDPROC(ignore_sysret)
1357 ENTRY(xen_hypervisor_callback)
1358 zeroentry xen_do_hypervisor_callback
1359 END(xen_hypervisor_callback)
1362 # A note on the "critical region" in our callback handler.
1363 # We want to avoid stacking callback handlers due to events occurring
1364 # during handling of the last event. To do this, we keep events disabled
1365 # until we've done all processing. HOWEVER, we must enable events before
1366 # popping the stack frame (can't be done atomically) and so it would still
1367 # be possible to get enough handler activations to overflow the stack.
1368 # Although unlikely, bugs of that kind are hard to track down, so we'd
1369 # like to avoid the possibility.
1370 # So, on entry to the handler we detect whether we interrupted an
1371 # existing activation in its critical region -- if so, we pop the current
1372 # activation and restart the handler using the previous one.
1374 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1376 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1377 see the correct pointer to the pt_regs */
1378 movq %rdi, %rsp # we don't return, adjust the stack frame
1381 11: incl %gs:pda_irqcount
1383 CFI_DEF_CFA_REGISTER rbp
1384 cmovzq %gs:pda_irqstackptr,%rsp
1385 pushq %rbp # backlink for old unwinder
1386 call xen_evtchn_do_upcall
1388 CFI_DEF_CFA_REGISTER rsp
1389 decl %gs:pda_irqcount
1392 END(do_hypervisor_callback)
1395 # Hypervisor uses this for application faults while it executes.
1396 # We get here for two reasons:
1397 # 1. Fault while reloading DS, ES, FS or GS
1398 # 2. Fault while executing IRET
1399 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1400 # registers that could be reloaded and zeroed the others.
1401 # Category 2 we fix up by killing the current process. We cannot use the
1402 # normal Linux return path in this case because if we use the IRET hypercall
1403 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1404 # We distinguish between categories by comparing each saved segment register
1405 # with its current contents: any discrepancy means we in category 1.
1407 ENTRY(xen_failsafe_callback)
1408 framesz = (RIP-0x30) /* workaround buggy gas */
1410 CFI_REL_OFFSET rcx, 0
1411 CFI_REL_OFFSET r11, 8
1425 /* All segments match their saved values => Category 2 (Bad IRET). */
1431 CFI_ADJUST_CFA_OFFSET -0x30
1433 CFI_ADJUST_CFA_OFFSET 8
1435 CFI_ADJUST_CFA_OFFSET 8
1437 CFI_ADJUST_CFA_OFFSET 8
1438 jmp general_protection
1440 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1446 CFI_ADJUST_CFA_OFFSET -0x30
1448 CFI_ADJUST_CFA_OFFSET 8
1452 END(xen_failsafe_callback)
1454 #endif /* CONFIG_XEN */