3 * Copyright (C) 1991, 1992 Linus Torvalds
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
40 * "current" is in register %ebx during any slow entries.
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
51 #include <asm/percpu.h>
52 #include <asm/dwarf2.h>
53 #include <asm/processor-flags.h>
54 #include "irq_vectors.h"
57 * We use macros for low-level operations which need to be overridden
58 * for paravirtualization. The following will never clobber any registers:
59 * INTERRUPT_RETURN (aka. "iret")
60 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
61 * ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
63 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
64 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
65 * Allowing a register to be clobbered can shrink the paravirt replacement
66 * enough to patch inline, increasing performance.
69 #define nr_syscalls ((syscall_table_size)/4)
72 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
74 #define preempt_stop(clobbers)
75 #define resume_kernel restore_nocheck
78 .macro TRACE_IRQS_IRET
79 #ifdef CONFIG_TRACE_IRQFLAGS
80 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
88 #define resume_userspace_sig check_userspace
90 #define resume_userspace_sig resume_userspace
96 CFI_ADJUST_CFA_OFFSET 4;\
97 /*CFI_REL_OFFSET fs, 0;*/\
99 CFI_ADJUST_CFA_OFFSET 4;\
100 /*CFI_REL_OFFSET es, 0;*/\
102 CFI_ADJUST_CFA_OFFSET 4;\
103 /*CFI_REL_OFFSET ds, 0;*/\
105 CFI_ADJUST_CFA_OFFSET 4;\
106 CFI_REL_OFFSET eax, 0;\
108 CFI_ADJUST_CFA_OFFSET 4;\
109 CFI_REL_OFFSET ebp, 0;\
111 CFI_ADJUST_CFA_OFFSET 4;\
112 CFI_REL_OFFSET edi, 0;\
114 CFI_ADJUST_CFA_OFFSET 4;\
115 CFI_REL_OFFSET esi, 0;\
117 CFI_ADJUST_CFA_OFFSET 4;\
118 CFI_REL_OFFSET edx, 0;\
120 CFI_ADJUST_CFA_OFFSET 4;\
121 CFI_REL_OFFSET ecx, 0;\
123 CFI_ADJUST_CFA_OFFSET 4;\
124 CFI_REL_OFFSET ebx, 0;\
125 movl $(__USER_DS), %edx; \
128 movl $(__KERNEL_PERCPU), %edx; \
131 #define RESTORE_INT_REGS \
133 CFI_ADJUST_CFA_OFFSET -4;\
136 CFI_ADJUST_CFA_OFFSET -4;\
139 CFI_ADJUST_CFA_OFFSET -4;\
142 CFI_ADJUST_CFA_OFFSET -4;\
145 CFI_ADJUST_CFA_OFFSET -4;\
148 CFI_ADJUST_CFA_OFFSET -4;\
151 CFI_ADJUST_CFA_OFFSET -4;\
154 #define RESTORE_REGS \
157 CFI_ADJUST_CFA_OFFSET -4;\
160 CFI_ADJUST_CFA_OFFSET -4;\
163 CFI_ADJUST_CFA_OFFSET -4;\
165 .pushsection .fixup,"ax"; \
172 .section __ex_table,"a";\
179 #define RING0_INT_FRAME \
180 CFI_STARTPROC simple;\
182 CFI_DEF_CFA esp, 3*4;\
183 /*CFI_OFFSET cs, -2*4;*/\
186 #define RING0_EC_FRAME \
187 CFI_STARTPROC simple;\
189 CFI_DEF_CFA esp, 4*4;\
190 /*CFI_OFFSET cs, -2*4;*/\
193 #define RING0_PTREGS_FRAME \
194 CFI_STARTPROC simple;\
196 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
197 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
198 CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
199 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
200 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
201 CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
202 CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
203 CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
204 CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
205 CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
206 CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
207 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
212 CFI_ADJUST_CFA_OFFSET 4
214 GET_THREAD_INFO(%ebp)
216 CFI_ADJUST_CFA_OFFSET -4
217 pushl $0x0202 # Reset kernel eflags
218 CFI_ADJUST_CFA_OFFSET 4
220 CFI_ADJUST_CFA_OFFSET -4
226 * Return to user mode is not as complex as all this looks,
227 * but we want the default path for a system call return to
228 * go as quickly as possible which is why some of this is
229 * less clear than it otherwise should be.
232 # userspace resumption stub bypassing syscall exit tracing
236 preempt_stop(CLBR_ANY)
238 GET_THREAD_INFO(%ebp)
240 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
241 movb PT_CS(%esp), %al
242 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
244 jb resume_kernel # not returning to v8086 or userspace
246 ENTRY(resume_userspace)
248 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
249 # setting need_resched or sigpending
250 # between sampling and the iret
251 movl TI_flags(%ebp), %ecx
252 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
253 # int/exception return?
256 END(ret_from_exception)
258 #ifdef CONFIG_PREEMPT
260 DISABLE_INTERRUPTS(CLBR_ANY)
261 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
264 movl TI_flags(%ebp), %ecx # need_resched set ?
265 testb $_TIF_NEED_RESCHED, %cl
267 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
269 call preempt_schedule_irq
275 /* SYSENTER_RETURN points to after the "sysenter" instruction in
276 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
278 # sysenter call handler stub
279 ENTRY(ia32_sysenter_target)
283 CFI_REGISTER esp, ebp
284 movl TSS_sysenter_sp0(%esp),%esp
287 * Interrupts are disabled here, but we can't trace it until
288 * enough kernel state to call TRACE_IRQS_OFF can be called - but
289 * we immediately enable interrupts at that point anyway.
292 CFI_ADJUST_CFA_OFFSET 4
293 /*CFI_REL_OFFSET ss, 0*/
295 CFI_ADJUST_CFA_OFFSET 4
296 CFI_REL_OFFSET esp, 0
298 orl $X86_EFLAGS_IF, (%esp)
299 CFI_ADJUST_CFA_OFFSET 4
301 CFI_ADJUST_CFA_OFFSET 4
302 /*CFI_REL_OFFSET cs, 0*/
304 * Push current_thread_info()->sysenter_return to the stack.
305 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
306 * pushed above; +8 corresponds to copy_thread's esp0 setting.
308 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
309 CFI_ADJUST_CFA_OFFSET 4
310 CFI_REL_OFFSET eip, 0
313 CFI_ADJUST_CFA_OFFSET 4
315 ENABLE_INTERRUPTS(CLBR_NONE)
318 * Load the potential sixth argument from user stack.
319 * Careful about security.
321 cmpl $__PAGE_OFFSET-3,%ebp
324 movl %ebp,PT_EBP(%esp)
325 .section __ex_table,"a"
327 .long 1b,syscall_fault
330 GET_THREAD_INFO(%ebp)
332 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
333 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
334 jnz syscall_trace_entry
335 cmpl $(nr_syscalls), %eax
337 call *sys_call_table(,%eax,4)
338 movl %eax,PT_EAX(%esp)
340 DISABLE_INTERRUPTS(CLBR_ANY)
342 movl TI_flags(%ebp), %ecx
343 testw $_TIF_ALLWORK_MASK, %cx
344 jne syscall_exit_work
345 /* if something modifies registers it must also disable sysexit */
346 movl PT_EIP(%esp), %edx
347 movl PT_OLDESP(%esp), %ecx
350 1: mov PT_FS(%esp), %fs
351 ENABLE_INTERRUPTS_SYSCALL_RET
353 .pushsection .fixup,"ax"
354 2: movl $0,PT_FS(%esp)
356 .section __ex_table,"a"
360 ENDPROC(ia32_sysenter_target)
362 # system call handler stub
364 RING0_INT_FRAME # can't unwind into user space anyway
365 pushl %eax # save orig_eax
366 CFI_ADJUST_CFA_OFFSET 4
368 GET_THREAD_INFO(%ebp)
369 # system call tracing in operation / emulation
370 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
371 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
372 jnz syscall_trace_entry
373 cmpl $(nr_syscalls), %eax
376 call *sys_call_table(,%eax,4)
377 movl %eax,PT_EAX(%esp) # store the return value
380 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
381 # setting need_resched or sigpending
382 # between sampling and the iret
384 testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
386 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
388 movl TI_flags(%ebp), %ecx
389 testw $_TIF_ALLWORK_MASK, %cx # current->work
390 jne syscall_exit_work
393 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
394 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
395 # are returning to the kernel.
396 # See comments in process.c:copy_thread() for details.
397 movb PT_OLDSS(%esp), %ah
398 movb PT_CS(%esp), %al
399 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
400 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
402 je ldt_ss # returning to user-space with LDT SS
405 restore_nocheck_notrace:
407 addl $4, %esp # skip orig_eax/error_code
408 CFI_ADJUST_CFA_OFFSET -4
413 pushl $0 # no error code
417 .section __ex_table,"a"
419 .long irq_return,iret_exc
424 larl PT_OLDSS(%esp), %eax
426 testl $0x00400000, %eax # returning to 32bit stack?
427 jnz restore_nocheck # allright, normal return
429 #ifdef CONFIG_PARAVIRT
431 * The kernel can't run on a non-flat stack if paravirt mode
432 * is active. Rather than try to fixup the high bits of
433 * ESP, bypass this code entirely. This may break DOSemu
434 * and/or Wine support in a paravirt VM, although the option
435 * is still available to implement the setting of the high
436 * 16-bits in the INTERRUPT_RETURN paravirt-op.
438 cmpl $0, pv_info+PARAVIRT_enabled
442 /* If returning to userspace with 16bit stack,
443 * try to fix the higher word of ESP, as the CPU
445 * This is an "official" bug of all the x86-compatible
446 * CPUs, which we can try to work around to make
447 * dosemu and wine happy. */
448 movl PT_OLDESP(%esp), %eax
450 call patch_espfix_desc
452 CFI_ADJUST_CFA_OFFSET 4
454 CFI_ADJUST_CFA_OFFSET 4
455 DISABLE_INTERRUPTS(CLBR_EAX)
458 CFI_ADJUST_CFA_OFFSET -8
463 # perform work that needs to be done immediately before resumption
465 RING0_PTREGS_FRAME # can't unwind into user space anyway
467 testb $_TIF_NEED_RESCHED, %cl
472 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
473 # setting need_resched or sigpending
474 # between sampling and the iret
476 movl TI_flags(%ebp), %ecx
477 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
478 # than syscall tracing?
480 testb $_TIF_NEED_RESCHED, %cl
483 work_notifysig: # deal with pending signals and
484 # notify-resume requests
486 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
488 jne work_notifysig_v86 # returning to kernel-space or
491 call do_notify_resume
492 jmp resume_userspace_sig
496 pushl %ecx # save ti_flags for do_notify_resume
497 CFI_ADJUST_CFA_OFFSET 4
498 call save_v86_state # %eax contains pt_regs pointer
500 CFI_ADJUST_CFA_OFFSET -4
506 call do_notify_resume
507 jmp resume_userspace_sig
510 # perform syscall exit tracing
513 movl $-ENOSYS,PT_EAX(%esp)
516 call do_syscall_trace
518 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
519 # so must skip actual syscall
520 movl PT_ORIG_EAX(%esp), %eax
521 cmpl $(nr_syscalls), %eax
524 END(syscall_trace_entry)
526 # perform syscall exit tracing
529 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
532 ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
536 call do_syscall_trace
538 END(syscall_exit_work)
541 RING0_INT_FRAME # can't unwind into user space anyway
543 GET_THREAD_INFO(%ebp)
544 movl $-EFAULT,PT_EAX(%esp)
549 movl $-ENOSYS,PT_EAX(%esp)
554 #define FIXUP_ESPFIX_STACK \
555 /* since we are on a wrong stack, we cant make it a C code :( */ \
556 PER_CPU(gdt_page, %ebx); \
557 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
559 pushl $__KERNEL_DS; \
560 CFI_ADJUST_CFA_OFFSET 4; \
562 CFI_ADJUST_CFA_OFFSET 4; \
564 CFI_ADJUST_CFA_OFFSET -8;
565 #define UNWIND_ESPFIX_STACK \
567 /* see if on espfix stack */ \
568 cmpw $__ESPFIX_SS, %ax; \
570 movl $__KERNEL_DS, %eax; \
573 /* switch to normal stack */ \
574 FIXUP_ESPFIX_STACK; \
578 * Build the entry stubs and pointer table with
579 * some assembler magic.
585 ENTRY(irq_entries_start)
591 CFI_ADJUST_CFA_OFFSET -4
594 CFI_ADJUST_CFA_OFFSET 4
601 END(irq_entries_start)
608 * the CPU automatically disables interrupts when executing an IRQ vector,
609 * so IRQ-flags tracing has to follow that:
618 ENDPROC(common_interrupt)
621 #define BUILD_INTERRUPT(name, nr) \
625 CFI_ADJUST_CFA_OFFSET 4; \
634 /* The include is where all of the SMP etc. interrupts come from */
635 #include "entry_arch.h"
637 KPROBE_ENTRY(page_fault)
640 CFI_ADJUST_CFA_OFFSET 4
643 /* the function address is in %fs's slot on the stack */
645 CFI_ADJUST_CFA_OFFSET 4
646 /*CFI_REL_OFFSET es, 0*/
648 CFI_ADJUST_CFA_OFFSET 4
649 /*CFI_REL_OFFSET ds, 0*/
651 CFI_ADJUST_CFA_OFFSET 4
652 CFI_REL_OFFSET eax, 0
654 CFI_ADJUST_CFA_OFFSET 4
655 CFI_REL_OFFSET ebp, 0
657 CFI_ADJUST_CFA_OFFSET 4
658 CFI_REL_OFFSET edi, 0
660 CFI_ADJUST_CFA_OFFSET 4
661 CFI_REL_OFFSET esi, 0
663 CFI_ADJUST_CFA_OFFSET 4
664 CFI_REL_OFFSET edx, 0
666 CFI_ADJUST_CFA_OFFSET 4
667 CFI_REL_OFFSET ecx, 0
669 CFI_ADJUST_CFA_OFFSET 4
670 CFI_REL_OFFSET ebx, 0
673 CFI_ADJUST_CFA_OFFSET 4
674 /*CFI_REL_OFFSET fs, 0*/
675 movl $(__KERNEL_PERCPU), %ecx
679 CFI_ADJUST_CFA_OFFSET -4
680 /*CFI_REGISTER es, ecx*/
681 movl PT_FS(%esp), %edi # get the function address
682 movl PT_ORIG_EAX(%esp), %edx # get the error code
683 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
684 mov %ecx, PT_FS(%esp)
685 /*CFI_REL_OFFSET fs, ES*/
686 movl $(__USER_DS), %ecx
689 movl %esp,%eax # pt_regs pointer
691 jmp ret_from_exception
693 KPROBE_END(page_fault)
695 ENTRY(coprocessor_error)
698 CFI_ADJUST_CFA_OFFSET 4
699 pushl $do_coprocessor_error
700 CFI_ADJUST_CFA_OFFSET 4
703 END(coprocessor_error)
705 ENTRY(simd_coprocessor_error)
708 CFI_ADJUST_CFA_OFFSET 4
709 pushl $do_simd_coprocessor_error
710 CFI_ADJUST_CFA_OFFSET 4
713 END(simd_coprocessor_error)
715 ENTRY(device_not_available)
717 pushl $-1 # mark this as an int
718 CFI_ADJUST_CFA_OFFSET 4
721 testl $0x4, %eax # EM (math emulation bit)
722 jne device_not_available_emulate
723 preempt_stop(CLBR_ANY)
724 call math_state_restore
725 jmp ret_from_exception
726 device_not_available_emulate:
727 pushl $0 # temporary storage for ORIG_EIP
728 CFI_ADJUST_CFA_OFFSET 4
731 CFI_ADJUST_CFA_OFFSET -4
732 jmp ret_from_exception
734 END(device_not_available)
737 * Debug traps and NMI can happen at the one SYSENTER instruction
738 * that sets up the real kernel stack. Check here, since we can't
739 * allow the wrong stack to be used.
741 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
742 * already pushed 3 words if it hits on the sysenter instruction:
743 * eflags, cs and eip.
745 * We just load the right stack, and push the three (known) values
746 * by hand onto the new stack - while updating the return eip past
747 * the instruction that would have done it for sysenter.
749 #define FIX_STACK(offset, ok, label) \
750 cmpw $__KERNEL_CS,4(%esp); \
753 movl TSS_sysenter_sp0+offset(%esp),%esp; \
754 CFI_DEF_CFA esp, 0; \
757 CFI_ADJUST_CFA_OFFSET 4; \
758 pushl $__KERNEL_CS; \
759 CFI_ADJUST_CFA_OFFSET 4; \
760 pushl $sysenter_past_esp; \
761 CFI_ADJUST_CFA_OFFSET 4; \
762 CFI_REL_OFFSET eip, 0
766 cmpl $ia32_sysenter_target,(%esp)
767 jne debug_stack_correct
768 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
770 pushl $-1 # mark this as an int
771 CFI_ADJUST_CFA_OFFSET 4
773 xorl %edx,%edx # error code 0
774 movl %esp,%eax # pt_regs pointer
776 jmp ret_from_exception
781 * NMI is doubly nasty. It can happen _while_ we're handling
782 * a debug fault, and the debug fault hasn't yet been able to
783 * clear up the stack. So we first check whether we got an
784 * NMI on the sysenter entry path, but after that we need to
785 * check whether we got an NMI on the debug path where the debug
786 * fault happened on the sysenter path.
791 CFI_ADJUST_CFA_OFFSET 4
793 cmpw $__ESPFIX_SS, %ax
795 CFI_ADJUST_CFA_OFFSET -4
797 cmpl $ia32_sysenter_target,(%esp)
800 CFI_ADJUST_CFA_OFFSET 4
802 /* Do not access memory above the end of our stack page,
803 * it might not exist.
805 andl $(THREAD_SIZE-1),%eax
806 cmpl $(THREAD_SIZE-20),%eax
808 CFI_ADJUST_CFA_OFFSET -4
809 jae nmi_stack_correct
810 cmpl $ia32_sysenter_target,12(%esp)
811 je nmi_debug_stack_check
813 /* We have a RING0_INT_FRAME here */
815 CFI_ADJUST_CFA_OFFSET 4
817 xorl %edx,%edx # zero error code
818 movl %esp,%eax # pt_regs pointer
820 jmp restore_nocheck_notrace
825 FIX_STACK(12,nmi_stack_correct, 1)
826 jmp nmi_stack_correct
828 nmi_debug_stack_check:
829 /* We have a RING0_INT_FRAME here */
830 cmpw $__KERNEL_CS,16(%esp)
831 jne nmi_stack_correct
834 cmpl $debug_esp_fix_insn,(%esp)
836 FIX_STACK(24,nmi_stack_correct, 1)
837 jmp nmi_stack_correct
840 /* We have a RING0_INT_FRAME here.
842 * create the pointer to lss back
845 CFI_ADJUST_CFA_OFFSET 4
847 CFI_ADJUST_CFA_OFFSET 4
849 /* copy the iret frame of 12 bytes */
852 CFI_ADJUST_CFA_OFFSET 4
855 CFI_ADJUST_CFA_OFFSET 4
857 FIXUP_ESPFIX_STACK # %eax == %esp
858 xorl %edx,%edx # zero error code
861 lss 12+4(%esp), %esp # back to espfix stack
862 CFI_ADJUST_CFA_OFFSET -24
867 #ifdef CONFIG_PARAVIRT
870 .section __ex_table,"a"
872 .long native_iret, iret_exc
876 ENTRY(native_irq_enable_syscall_ret)
879 END(native_irq_enable_syscall_ret)
884 pushl $-1 # mark this as an int
885 CFI_ADJUST_CFA_OFFSET 4
887 xorl %edx,%edx # zero error code
888 movl %esp,%eax # pt_regs pointer
890 jmp ret_from_exception
897 CFI_ADJUST_CFA_OFFSET 4
899 CFI_ADJUST_CFA_OFFSET 4
907 CFI_ADJUST_CFA_OFFSET 4
909 CFI_ADJUST_CFA_OFFSET 4
917 CFI_ADJUST_CFA_OFFSET 4
919 CFI_ADJUST_CFA_OFFSET 4
924 ENTRY(coprocessor_segment_overrun)
927 CFI_ADJUST_CFA_OFFSET 4
928 pushl $do_coprocessor_segment_overrun
929 CFI_ADJUST_CFA_OFFSET 4
932 END(coprocessor_segment_overrun)
936 pushl $do_invalid_TSS
937 CFI_ADJUST_CFA_OFFSET 4
942 ENTRY(segment_not_present)
944 pushl $do_segment_not_present
945 CFI_ADJUST_CFA_OFFSET 4
948 END(segment_not_present)
952 pushl $do_stack_segment
953 CFI_ADJUST_CFA_OFFSET 4
958 KPROBE_ENTRY(general_protection)
960 pushl $do_general_protection
961 CFI_ADJUST_CFA_OFFSET 4
964 KPROBE_END(general_protection)
966 ENTRY(alignment_check)
968 pushl $do_alignment_check
969 CFI_ADJUST_CFA_OFFSET 4
976 pushl $0 # no error code
977 CFI_ADJUST_CFA_OFFSET 4
978 pushl $do_divide_error
979 CFI_ADJUST_CFA_OFFSET 4
984 #ifdef CONFIG_X86_MCE
988 CFI_ADJUST_CFA_OFFSET 4
989 pushl machine_check_vector
990 CFI_ADJUST_CFA_OFFSET 4
996 ENTRY(spurious_interrupt_bug)
999 CFI_ADJUST_CFA_OFFSET 4
1000 pushl $do_spurious_interrupt_bug
1001 CFI_ADJUST_CFA_OFFSET 4
1004 END(spurious_interrupt_bug)
1006 ENTRY(kernel_thread_helper)
1007 pushl $0 # fake return address for unwinder
1011 CFI_ADJUST_CFA_OFFSET 4
1014 CFI_ADJUST_CFA_OFFSET 4
1017 ENDPROC(kernel_thread_helper)
1020 /* Xen doesn't set %esp to be precisely what the normal sysenter
1021 entrypoint expects, so fix it up before using the normal path. */
1022 ENTRY(xen_sysenter_target)
1024 addl $5*4, %esp /* remove xen-provided frame */
1025 jmp sysenter_past_esp
1027 ENTRY(xen_hypervisor_callback)
1030 CFI_ADJUST_CFA_OFFSET 4
1034 /* Check to see if we got the event in the critical
1035 region in xen_iret_direct, after we've reenabled
1036 events and checked for pending events. This simulates
1037 iret instruction's behaviour where it delivers a
1038 pending interrupt when enabling interrupts. */
1039 movl PT_EIP(%esp),%eax
1040 cmpl $xen_iret_start_crit,%eax
1042 cmpl $xen_iret_end_crit,%eax
1045 jmp xen_iret_crit_fixup
1047 ENTRY(xen_do_upcall)
1049 call xen_evtchn_do_upcall
1052 ENDPROC(xen_hypervisor_callback)
1054 # Hypervisor uses this for application faults while it executes.
1055 # We get here for two reasons:
1056 # 1. Fault while reloading DS, ES, FS or GS
1057 # 2. Fault while executing IRET
1058 # Category 1 we fix up by reattempting the load, and zeroing the segment
1059 # register if the load fails.
1060 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1061 # normal Linux return path in this case because if we use the IRET hypercall
1062 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1063 # We distinguish between categories by maintaining a status value in EAX.
1064 ENTRY(xen_failsafe_callback)
1067 CFI_ADJUST_CFA_OFFSET 4
1075 CFI_ADJUST_CFA_OFFSET -4
1077 CFI_ADJUST_CFA_OFFSET -16
1080 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
1081 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
1082 CFI_ADJUST_CFA_OFFSET 4
1084 jmp ret_from_exception
1087 .section .fixup,"ax"
1101 .section __ex_table,"a"
1108 ENDPROC(xen_failsafe_callback)
1110 #endif /* CONFIG_XEN */
1112 .section .rodata,"a"
1113 #include "syscall_table_32.S"
1115 syscall_table_size=(.-sys_call_table)