2 * linux/arch/x86_64/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
10 * entry.S contains the system-call and fault low-level handling routines.
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
57 #ifndef CONFIG_PREEMPT
58 #define retint_kernel retint_restore_args
61 #ifdef CONFIG_PARAVIRT
62 ENTRY(native_usergs_sysret64)
65 #endif /* CONFIG_PARAVIRT */
68 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
69 #ifdef CONFIG_TRACE_IRQFLAGS
70 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
78 * C code is not supposed to know about undefined top of stack. Every time
79 * a C function with an pt_regs argument is called from the SYSCALL based
80 * fast path FIXUP_TOP_OF_STACK is needed.
81 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
85 /* %rsp:at FRAMEEND */
86 .macro FIXUP_TOP_OF_STACK tmp
87 movq %gs:pda_oldrsp,\tmp
89 movq $__USER_DS,SS(%rsp)
90 movq $__USER_CS,CS(%rsp)
92 movq R11(%rsp),\tmp /* get eflags */
93 movq \tmp,EFLAGS(%rsp)
96 .macro RESTORE_TOP_OF_STACK tmp,offset=0
97 movq RSP-\offset(%rsp),\tmp
98 movq \tmp,%gs:pda_oldrsp
99 movq EFLAGS-\offset(%rsp),\tmp
100 movq \tmp,R11-\offset(%rsp)
103 .macro FAKE_STACK_FRAME child_rip
104 /* push in order ss, rsp, eflags, cs, rip */
106 pushq $__KERNEL_DS /* ss */
107 CFI_ADJUST_CFA_OFFSET 8
108 /*CFI_REL_OFFSET ss,0*/
110 CFI_ADJUST_CFA_OFFSET 8
112 pushq $(1<<9) /* eflags - interrupts on */
113 CFI_ADJUST_CFA_OFFSET 8
114 /*CFI_REL_OFFSET rflags,0*/
115 pushq $__KERNEL_CS /* cs */
116 CFI_ADJUST_CFA_OFFSET 8
117 /*CFI_REL_OFFSET cs,0*/
118 pushq \child_rip /* rip */
119 CFI_ADJUST_CFA_OFFSET 8
121 pushq %rax /* orig rax */
122 CFI_ADJUST_CFA_OFFSET 8
125 .macro UNFAKE_STACK_FRAME
127 CFI_ADJUST_CFA_OFFSET -(6*8)
130 .macro CFI_DEFAULT_STACK start=1
136 CFI_DEF_CFA_OFFSET SS+8
138 CFI_REL_OFFSET r15,R15
139 CFI_REL_OFFSET r14,R14
140 CFI_REL_OFFSET r13,R13
141 CFI_REL_OFFSET r12,R12
142 CFI_REL_OFFSET rbp,RBP
143 CFI_REL_OFFSET rbx,RBX
144 CFI_REL_OFFSET r11,R11
145 CFI_REL_OFFSET r10,R10
148 CFI_REL_OFFSET rax,RAX
149 CFI_REL_OFFSET rcx,RCX
150 CFI_REL_OFFSET rdx,RDX
151 CFI_REL_OFFSET rsi,RSI
152 CFI_REL_OFFSET rdi,RDI
153 CFI_REL_OFFSET rip,RIP
154 /*CFI_REL_OFFSET cs,CS*/
155 /*CFI_REL_OFFSET rflags,EFLAGS*/
156 CFI_REL_OFFSET rsp,RSP
157 /*CFI_REL_OFFSET ss,SS*/
160 * A newly forked process directly context switches into this.
165 push kernel_eflags(%rip)
166 CFI_ADJUST_CFA_OFFSET 4
167 popf # reset kernel eflags
168 CFI_ADJUST_CFA_OFFSET -4
170 GET_THREAD_INFO(%rcx)
171 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
175 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
176 je int_ret_from_sys_call
177 testl $_TIF_IA32,TI_flags(%rcx)
178 jnz int_ret_from_sys_call
179 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
180 jmp ret_from_sys_call
183 call syscall_trace_leave
184 GET_THREAD_INFO(%rcx)
190 * System call entry. Upto 6 arguments in registers are supported.
192 * SYSCALL does not save anything on the stack and does not change the
198 * rax system call number
200 * rcx return address for syscall/sysret, C arg3
203 * r10 arg3 (--> moved to rcx for C)
206 * r11 eflags for syscall/sysret, temporary for C
207 * r12-r15,rbp,rbx saved by C code, not touched.
209 * Interrupts are off on entry.
210 * Only called from user space.
212 * XXX if we had a free scratch register we could save the RSP into the stack frame
213 * and report it properly in ps. Unfortunately we haven't.
215 * When user can change the frames always force IRET. That is because
216 * it deals with uncanonical addresses better. SYSRET has trouble
217 * with them due to bugs in both AMD and Intel CPUs.
223 CFI_DEF_CFA rsp,PDA_STACKOFFSET
225 /*CFI_REGISTER rflags,r11*/
228 * A hypervisor implementation might want to use a label
229 * after the swapgs, so that it can do the swapgs
230 * for the guest and jump here on syscall.
232 ENTRY(system_call_after_swapgs)
234 movq %rsp,%gs:pda_oldrsp
235 movq %gs:pda_kernelstack,%rsp
237 * No need to follow this irqs off/on section - it's straight
240 ENABLE_INTERRUPTS(CLBR_NONE)
242 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
243 movq %rcx,RIP-ARGOFFSET(%rsp)
244 CFI_REL_OFFSET rip,RIP-ARGOFFSET
245 GET_THREAD_INFO(%rcx)
246 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
249 cmpq $__NR_syscall_max,%rax
252 call *sys_call_table(,%rax,8) # XXX: rip relative
253 movq %rax,RAX-ARGOFFSET(%rsp)
255 * Syscall return path ending with SYSRET (fast path)
256 * Has incomplete stack frame and undefined top of stack.
259 movl $_TIF_ALLWORK_MASK,%edi
263 GET_THREAD_INFO(%rcx)
264 DISABLE_INTERRUPTS(CLBR_NONE)
266 movl TI_flags(%rcx),%edx
271 * sysretq will re-enable interrupts:
274 movq RIP-ARGOFFSET(%rsp),%rcx
276 RESTORE_ARGS 0,-ARG_SKIP,1
277 /*CFI_REGISTER rflags,r11*/
278 movq %gs:pda_oldrsp, %rsp
282 /* Handle reschedules */
283 /* edx: work, edi: workmask */
285 bt $TIF_NEED_RESCHED,%edx
288 ENABLE_INTERRUPTS(CLBR_NONE)
290 CFI_ADJUST_CFA_OFFSET 8
293 CFI_ADJUST_CFA_OFFSET -8
296 /* Handle a signal */
299 ENABLE_INTERRUPTS(CLBR_NONE)
300 testl $_TIF_DO_NOTIFY_MASK,%edx
303 /* Really a signal */
304 /* edx: work flags (arg3) */
305 leaq do_notify_resume(%rip),%rax
306 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
307 xorl %esi,%esi # oldset -> arg2
308 call ptregscall_common
309 1: movl $_TIF_WORK_MASK,%edi
310 /* Use IRET because user could have changed frame. This
311 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
312 DISABLE_INTERRUPTS(CLBR_NONE)
317 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
318 jmp ret_from_sys_call
320 /* Do syscall tracing */
323 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
324 FIXUP_TOP_OF_STACK %rdi
326 call syscall_trace_enter
327 LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
329 cmpq $__NR_syscall_max,%rax
330 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
331 movq %r10,%rcx /* fixup for C */
332 call *sys_call_table(,%rax,8)
333 movq %rax,RAX-ARGOFFSET(%rsp)
334 /* Use IRET because user could have changed frame */
337 * Syscall return path ending with IRET.
338 * Has correct top of stack, but partial stack frame.
340 .globl int_ret_from_sys_call
341 int_ret_from_sys_call:
342 DISABLE_INTERRUPTS(CLBR_NONE)
344 testl $3,CS-ARGOFFSET(%rsp)
345 je retint_restore_args
346 movl $_TIF_ALLWORK_MASK,%edi
347 /* edi: mask to check */
350 GET_THREAD_INFO(%rcx)
351 movl TI_flags(%rcx),%edx
354 andl $~TS_COMPAT,TI_status(%rcx)
357 /* Either reschedule or signal or syscall exit tracking needed. */
358 /* First do a reschedule test. */
359 /* edx: work, edi: workmask */
361 bt $TIF_NEED_RESCHED,%edx
364 ENABLE_INTERRUPTS(CLBR_NONE)
366 CFI_ADJUST_CFA_OFFSET 8
369 CFI_ADJUST_CFA_OFFSET -8
370 DISABLE_INTERRUPTS(CLBR_NONE)
374 /* handle signals and tracing -- both require a full stack frame */
377 ENABLE_INTERRUPTS(CLBR_NONE)
379 /* Check for syscall exit trace */
380 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
383 CFI_ADJUST_CFA_OFFSET 8
384 leaq 8(%rsp),%rdi # &ptregs -> arg1
385 call syscall_trace_leave
387 CFI_ADJUST_CFA_OFFSET -8
388 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
392 testl $_TIF_DO_NOTIFY_MASK,%edx
394 movq %rsp,%rdi # &ptregs -> arg1
395 xorl %esi,%esi # oldset -> arg2
396 call do_notify_resume
397 1: movl $_TIF_WORK_MASK,%edi
400 DISABLE_INTERRUPTS(CLBR_NONE)
407 * Certain special system calls that need to save a complete full stack frame.
410 .macro PTREGSCALL label,func,arg
413 leaq \func(%rip),%rax
414 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
415 jmp ptregscall_common
421 PTREGSCALL stub_clone, sys_clone, %r8
422 PTREGSCALL stub_fork, sys_fork, %rdi
423 PTREGSCALL stub_vfork, sys_vfork, %rdi
424 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
425 PTREGSCALL stub_iopl, sys_iopl, %rsi
427 ENTRY(ptregscall_common)
429 CFI_ADJUST_CFA_OFFSET -8
430 CFI_REGISTER rip, r11
433 CFI_REGISTER rip, r15
434 FIXUP_TOP_OF_STACK %r11
436 RESTORE_TOP_OF_STACK %r11
438 CFI_REGISTER rip, r11
441 CFI_ADJUST_CFA_OFFSET 8
442 CFI_REL_OFFSET rip, 0
445 END(ptregscall_common)
450 CFI_ADJUST_CFA_OFFSET -8
451 CFI_REGISTER rip, r11
453 FIXUP_TOP_OF_STACK %r11
456 RESTORE_TOP_OF_STACK %r11
459 jmp int_ret_from_sys_call
464 * sigreturn is special because it needs to restore all registers on return.
465 * This cannot be done with SYSRET, so use the IRET return path instead.
467 ENTRY(stub_rt_sigreturn)
470 CFI_ADJUST_CFA_OFFSET -8
473 FIXUP_TOP_OF_STACK %r11
474 call sys_rt_sigreturn
475 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
477 jmp int_ret_from_sys_call
479 END(stub_rt_sigreturn)
482 * initial frame state for interrupts and exceptions
487 CFI_DEF_CFA rsp,SS+8-\ref
488 /*CFI_REL_OFFSET ss,SS-\ref*/
489 CFI_REL_OFFSET rsp,RSP-\ref
490 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
491 /*CFI_REL_OFFSET cs,CS-\ref*/
492 CFI_REL_OFFSET rip,RIP-\ref
495 /* initial frame state for interrupts (and exceptions without error code) */
496 #define INTR_FRAME _frame RIP
497 /* initial frame state for exceptions with error code (and interrupts with
498 vector already pushed) */
499 #define XCPT_FRAME _frame ORIG_RAX
502 * Interrupt entry/exit.
504 * Interrupt entry points save only callee clobbered registers in fast path.
506 * Entry runs with interrupts off.
509 /* 0(%rsp): interrupt number */
510 .macro interrupt func
513 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
515 CFI_ADJUST_CFA_OFFSET 8
516 CFI_REL_OFFSET rbp, 0
518 CFI_DEF_CFA_REGISTER rbp
522 /* irqcount is used to check if a CPU is already on an interrupt
523 stack or not. While this is essentially redundant with preempt_count
524 it is a little cheaper to use a separate counter in the PDA
525 (short of moving irq_enter into assembly, which would be too
527 1: incl %gs:pda_irqcount
528 cmoveq %gs:pda_irqstackptr,%rsp
529 push %rbp # backlink for old unwinder
531 * We entered an interrupt context - irqs are off:
537 ENTRY(common_interrupt)
540 /* 0(%rsp): oldrsp-ARGOFFSET */
542 DISABLE_INTERRUPTS(CLBR_NONE)
544 decl %gs:pda_irqcount
546 CFI_DEF_CFA_REGISTER rsp
547 CFI_ADJUST_CFA_OFFSET -8
549 GET_THREAD_INFO(%rcx)
550 testl $3,CS-ARGOFFSET(%rsp)
553 /* Interrupt came from user space */
555 * Has a correct top of stack, but a partial stack frame
556 * %rcx: thread info. Interrupts off.
558 retint_with_reschedule:
559 movl $_TIF_WORK_MASK,%edi
562 movl TI_flags(%rcx),%edx
567 retint_swapgs: /* return to user-space */
569 * The iretq could re-enable interrupts:
571 DISABLE_INTERRUPTS(CLBR_ANY)
576 retint_restore_args: /* return to kernel space */
577 DISABLE_INTERRUPTS(CLBR_ANY)
579 * The iretq could re-enable interrupts:
588 .section __ex_table, "a"
589 .quad irq_return, bad_iret
592 #ifdef CONFIG_PARAVIRT
596 .section __ex_table,"a"
597 .quad native_iret, bad_iret
604 * The iret traps when the %cs or %ss being restored is bogus.
605 * We've lost the original trap vector and error code.
606 * #GPF is the most likely one to get for an invalid selector.
607 * So pretend we completed the iret and took the #GPF in user mode.
609 * We are now running with the kernel GS after exception recovery.
610 * But error_entry expects us to have user GS to match the user %cs,
616 jmp general_protection
620 /* edi: workmask, edx: work */
623 bt $TIF_NEED_RESCHED,%edx
626 ENABLE_INTERRUPTS(CLBR_NONE)
628 CFI_ADJUST_CFA_OFFSET 8
631 CFI_ADJUST_CFA_OFFSET -8
632 GET_THREAD_INFO(%rcx)
633 DISABLE_INTERRUPTS(CLBR_NONE)
638 testl $_TIF_DO_NOTIFY_MASK,%edx
641 ENABLE_INTERRUPTS(CLBR_NONE)
643 movq $-1,ORIG_RAX(%rsp)
644 xorl %esi,%esi # oldset
645 movq %rsp,%rdi # &pt_regs
646 call do_notify_resume
648 DISABLE_INTERRUPTS(CLBR_NONE)
650 GET_THREAD_INFO(%rcx)
651 jmp retint_with_reschedule
653 #ifdef CONFIG_PREEMPT
654 /* Returning to kernel space. Check if we need preemption */
655 /* rcx: threadinfo. interrupts off. */
657 cmpl $0,TI_preempt_count(%rcx)
658 jnz retint_restore_args
659 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
660 jnc retint_restore_args
661 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
662 jnc retint_restore_args
663 call preempt_schedule_irq
668 END(common_interrupt)
673 .macro apicinterrupt num,func
676 CFI_ADJUST_CFA_OFFSET 8
682 ENTRY(thermal_interrupt)
683 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
684 END(thermal_interrupt)
686 ENTRY(threshold_interrupt)
687 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
688 END(threshold_interrupt)
691 ENTRY(reschedule_interrupt)
692 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
693 END(reschedule_interrupt)
695 .macro INVALIDATE_ENTRY num
696 ENTRY(invalidate_interrupt\num)
697 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
698 END(invalidate_interrupt\num)
710 ENTRY(call_function_interrupt)
711 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
712 END(call_function_interrupt)
713 ENTRY(irq_move_cleanup_interrupt)
714 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
715 END(irq_move_cleanup_interrupt)
718 ENTRY(apic_timer_interrupt)
719 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
720 END(apic_timer_interrupt)
722 ENTRY(uv_bau_message_intr1)
723 apicinterrupt 220,uv_bau_message_interrupt
724 END(uv_bau_message_intr1)
726 ENTRY(error_interrupt)
727 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
730 ENTRY(spurious_interrupt)
731 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
732 END(spurious_interrupt)
735 * Exception entry points.
739 PARAVIRT_ADJUST_EXCEPTION_FRAME
740 pushq $0 /* push error code/oldrax */
741 CFI_ADJUST_CFA_OFFSET 8
742 pushq %rax /* push real oldrax to the rdi slot */
743 CFI_ADJUST_CFA_OFFSET 8
750 .macro errorentry sym
752 PARAVIRT_ADJUST_EXCEPTION_FRAME
754 CFI_ADJUST_CFA_OFFSET 8
761 /* error code is on the stack already */
762 /* handle NMI like exceptions that can happen everywhere */
763 .macro paranoidentry sym, ist=0, irqtrace=1
767 movl $MSR_GS_BASE,%ecx
775 movq %gs:pda_data_offset, %rbp
778 movq ORIG_RAX(%rsp),%rsi
779 movq $-1,ORIG_RAX(%rsp)
781 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
785 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
787 DISABLE_INTERRUPTS(CLBR_NONE)
794 * "Paranoid" exit path from exception stack.
795 * Paranoid because this is used by NMIs and cannot take
796 * any kernel state for granted.
797 * We don't do kernel preemption checks here, because only
798 * NMI should be common and it does not enable IRQs and
799 * cannot get reschedule ticks.
801 * "trace" is 0 for the NMI handler only, because irq-tracing
802 * is fundamentally NMI-unsafe. (we cannot change the soft and
803 * hard flags at once, atomically)
805 .macro paranoidexit trace=1
806 /* ebx: no swapgs flag */
808 testl %ebx,%ebx /* swapgs needed? */
809 jnz paranoid_restore\trace
811 jnz paranoid_userspace\trace
812 paranoid_swapgs\trace:
817 paranoid_restore\trace:
820 paranoid_userspace\trace:
821 GET_THREAD_INFO(%rcx)
822 movl TI_flags(%rcx),%ebx
823 andl $_TIF_WORK_MASK,%ebx
824 jz paranoid_swapgs\trace
825 movq %rsp,%rdi /* &pt_regs */
827 movq %rax,%rsp /* switch stack for scheduling */
828 testl $_TIF_NEED_RESCHED,%ebx
829 jnz paranoid_schedule\trace
830 movl %ebx,%edx /* arg3: thread flags */
834 ENABLE_INTERRUPTS(CLBR_NONE)
835 xorl %esi,%esi /* arg2: oldset */
836 movq %rsp,%rdi /* arg1: &pt_regs */
837 call do_notify_resume
838 DISABLE_INTERRUPTS(CLBR_NONE)
842 jmp paranoid_userspace\trace
843 paranoid_schedule\trace:
847 ENABLE_INTERRUPTS(CLBR_ANY)
849 DISABLE_INTERRUPTS(CLBR_ANY)
853 jmp paranoid_userspace\trace
858 * Exception entry point. This expects an error code/orig_rax on the stack
859 * and the exception handler in %rax.
861 KPROBE_ENTRY(error_entry)
864 /* rdi slot contains rax, oldrax contains error code */
867 CFI_ADJUST_CFA_OFFSET (14*8)
869 CFI_REL_OFFSET rsi,RSI
870 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
873 CFI_REL_OFFSET rdx,RDX
875 CFI_REL_OFFSET rcx,RCX
876 movq %rsi,10*8(%rsp) /* store rax */
877 CFI_REL_OFFSET rax,RAX
883 CFI_REL_OFFSET r10,R10
885 CFI_REL_OFFSET r11,R11
887 CFI_REL_OFFSET rbx,RBX
889 CFI_REL_OFFSET rbp,RBP
891 CFI_REL_OFFSET r12,R12
893 CFI_REL_OFFSET r13,R13
895 CFI_REL_OFFSET r14,R14
897 CFI_REL_OFFSET r15,R15
905 CFI_REL_OFFSET rdi,RDI
907 movq ORIG_RAX(%rsp),%rsi /* get error code */
908 movq $-1,ORIG_RAX(%rsp)
910 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
914 DISABLE_INTERRUPTS(CLBR_NONE)
916 GET_THREAD_INFO(%rcx)
920 movl TI_flags(%rcx),%edx
921 movl $_TIF_WORK_MASK,%edi
929 /* There are two places in the kernel that can potentially fault with
930 usergs. Handle them here. The exception handlers after
931 iret run with kernel gs again, so don't set the user space flag.
932 B stepping K8s sometimes report an truncated RIP for IRET
933 exceptions returning to compat mode. Check for these here too. */
934 leaq irq_return(%rip),%rcx
937 movl %ecx,%ecx /* zero extend */
940 cmpq $gs_change,RIP(%rsp)
943 KPROBE_END(error_entry)
945 /* Reload gs selector with exception handling */
946 /* edi: new selector */
947 ENTRY(native_load_gs_index)
950 CFI_ADJUST_CFA_OFFSET 8
951 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
955 2: mfence /* workaround */
958 CFI_ADJUST_CFA_OFFSET -8
961 ENDPROC(native_load_gs_index)
963 .section __ex_table,"a"
965 .quad gs_change,bad_gs
968 /* running with kernelgs */
970 SWAPGS /* switch back to user gs */
977 * Create a kernel thread.
979 * C extern interface:
980 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
982 * asm input arguments:
983 * rdi: fn, rsi: arg, rdx: flags
987 FAKE_STACK_FRAME $child_rip
990 # rdi: flags, rsi: usp, rdx: will be &pt_regs
992 orq kernel_thread_flags(%rip),%rdi
1005 * It isn't worth to check for reschedule here,
1006 * so internally to the x86_64 port you can rely on kernel_thread()
1007 * not to reschedule the child before returning, this avoids the need
1008 * of hacks for example to fork off the per-CPU idle tasks.
1009 * [Hopefully no generic code relies on the reschedule -AK]
1015 ENDPROC(kernel_thread)
1018 pushq $0 # fake return address
1021 * Here we are in the child and the registers are set as they were
1022 * at kernel_thread() invocation in the parent.
1034 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1036 * C extern interface:
1037 * extern long execve(char *name, char **argv, char **envp)
1039 * asm input arguments:
1040 * rdi: name, rsi: argv, rdx: envp
1042 * We want to fallback into:
1043 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1045 * do_sys_execve asm fallback arguments:
1046 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1048 ENTRY(kernel_execve)
1054 movq %rax, RAX(%rsp)
1057 je int_ret_from_sys_call
1062 ENDPROC(kernel_execve)
1064 KPROBE_ENTRY(page_fault)
1065 errorentry do_page_fault
1066 KPROBE_END(page_fault)
1068 ENTRY(coprocessor_error)
1069 zeroentry do_coprocessor_error
1070 END(coprocessor_error)
1072 ENTRY(simd_coprocessor_error)
1073 zeroentry do_simd_coprocessor_error
1074 END(simd_coprocessor_error)
1076 ENTRY(device_not_available)
1077 zeroentry math_state_restore
1078 END(device_not_available)
1080 /* runs on exception stack */
1084 CFI_ADJUST_CFA_OFFSET 8
1085 paranoidentry do_debug, DEBUG_STACK
1089 /* runs on exception stack */
1093 CFI_ADJUST_CFA_OFFSET 8
1094 paranoidentry do_nmi, 0, 0
1095 #ifdef CONFIG_TRACE_IRQFLAGS
1106 CFI_ADJUST_CFA_OFFSET 8
1107 paranoidentry do_int3, DEBUG_STACK
1113 zeroentry do_overflow
1121 zeroentry do_invalid_op
1124 ENTRY(coprocessor_segment_overrun)
1125 zeroentry do_coprocessor_segment_overrun
1126 END(coprocessor_segment_overrun)
1128 /* runs on exception stack */
1131 paranoidentry do_double_fault
1137 errorentry do_invalid_TSS
1140 ENTRY(segment_not_present)
1141 errorentry do_segment_not_present
1142 END(segment_not_present)
1144 /* runs on exception stack */
1145 ENTRY(stack_segment)
1147 paranoidentry do_stack_segment
1152 KPROBE_ENTRY(general_protection)
1153 errorentry do_general_protection
1154 KPROBE_END(general_protection)
1156 ENTRY(alignment_check)
1157 errorentry do_alignment_check
1158 END(alignment_check)
1161 zeroentry do_divide_error
1164 ENTRY(spurious_interrupt_bug)
1165 zeroentry do_spurious_interrupt_bug
1166 END(spurious_interrupt_bug)
1168 #ifdef CONFIG_X86_MCE
1169 /* runs on exception stack */
1170 ENTRY(machine_check)
1173 CFI_ADJUST_CFA_OFFSET 8
1174 paranoidentry do_machine_check
1180 /* Call softirq on interrupt stack. Interrupts are off. */
1184 CFI_ADJUST_CFA_OFFSET 8
1185 CFI_REL_OFFSET rbp,0
1187 CFI_DEF_CFA_REGISTER rbp
1188 incl %gs:pda_irqcount
1189 cmove %gs:pda_irqstackptr,%rsp
1190 push %rbp # backlink for old unwinder
1193 CFI_DEF_CFA_REGISTER rsp
1194 CFI_ADJUST_CFA_OFFSET -8
1195 decl %gs:pda_irqcount
1198 ENDPROC(call_softirq)
1200 KPROBE_ENTRY(ignore_sysret)
1205 ENDPROC(ignore_sysret)