2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Low-level vector interface routines
13 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
14 * it to save wrong values... Be aware!
16 #include <linux/config.h>
18 #include <asm/memory.h>
20 #include <asm/vfpmacros.h>
21 #include <asm/hardware.h> /* should be moved into entry-macro.S */
22 #include <asm/arch/irqs.h> /* should be moved into entry-macro.S */
23 #include <asm/arch/entry-macro.S>
25 #include "entry-header.S"
28 * Interrupt handling. Preserves r7, r8, r9
31 1: get_irqnr_and_base r0, r6, r5, lr
34 @ routine called with r0 = irq number, r1 = struct pt_regs *
43 * this macro assumes that irqstat (r6) and base (r5) are
44 * preserved from get_irqnr_and_base above
46 test_for_ipi r0, r6, r5, lr
55 * Invalid mode handlers
57 .macro inv_entry, reason
58 sub sp, sp, #S_FRAME_SIZE
64 inv_entry BAD_PREFETCH
76 inv_entry BAD_UNDEFINSTR
79 @ XXX fall through to common_invalid
83 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
89 add r0, sp, #S_PC @ here for interlock avoidance
90 mov r7, #-1 @ "" "" "" ""
91 str r4, [sp] @ save preserved r0
92 stmia r0, {r5 - r7} @ lr_<exception>,
93 @ cpsr_<exception>, "old_r0"
103 sub sp, sp, #S_FRAME_SIZE
107 add r5, sp, #S_SP @ here for interlock avoidance
108 mov r4, #-1 @ "" "" "" ""
109 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
110 str r1, [sp] @ save the "real" r0 copied
111 @ from the exception stack
116 @ We are now ready to fill in the remaining blanks on the stack:
120 @ r2 - lr_<exception>, already fixed up for correct return/restart
121 @ r3 - spsr_<exception>
122 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
132 @ get ready to re-enable interrupts if appropriate
136 biceq r9, r9, #PSR_I_BIT
139 @ Call the processor-specific abort handler:
141 @ r2 - aborted context pc
142 @ r3 - aborted context cpsr
144 @ The abort handler must return the aborted address in r0, and
145 @ the fault status register in r1. r9 must be preserved.
156 @ set desired IRQ state, then call main handler
163 @ IRQs off again before pulling preserved data off the stack
168 @ restore SPSR and restart the instruction
172 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
178 #ifdef CONFIG_PREEMPT
180 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
181 add r7, r8, #1 @ increment it
182 str r7, [tsk, #TI_PREEMPT]
186 #ifdef CONFIG_PREEMPT
187 ldr r0, [tsk, #TI_FLAGS] @ get flags
188 tst r0, #_TIF_NEED_RESCHED
191 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
192 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
194 strne r0, [r0, -r0] @ bug()
196 ldr r0, [sp, #S_PSR] @ irqs are already disabled
198 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
202 #ifdef CONFIG_PREEMPT
204 teq r8, #0 @ was preempt count = 0
205 ldreq r6, .LCirq_stat
207 ldr r0, [r6, #4] @ local_irq_count
208 ldr r1, [r6, #8] @ local_bh_count
211 mov r7, #0 @ preempt_schedule_irq
212 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
213 1: bl preempt_schedule_irq @ irq en/disable is done inside
214 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
215 tst r0, #_TIF_NEED_RESCHED
216 beq preempt_return @ go again
225 @ call emulation code, which returns using r9 if it has emulated
226 @ the instruction, or the more conventional lr if we are to treat
227 @ this as a real undefined instruction
235 mov r0, sp @ struct pt_regs *regs
239 @ IRQs off again before pulling preserved data off the stack
244 @ restore SPSR and restart the instruction
246 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
248 ldmia sp, {r0 - pc}^ @ Restore SVC registers
255 @ re-enable interrupts if appropriate
259 biceq r9, r9, #PSR_I_BIT
263 @ set args, then call main handler
265 @ r0 - address of faulting instruction
266 @ r1 - pointer to registers on stack
268 mov r0, r2 @ address (pc)
270 bl do_PrefetchAbort @ call abort handler
273 @ IRQs off again before pulling preserved data off the stack
278 @ restore SPSR and restart the instruction
282 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
293 #ifdef CONFIG_PREEMPT
302 sub sp, sp, #S_FRAME_SIZE
306 add r0, sp, #S_PC @ here for interlock avoidance
307 mov r4, #-1 @ "" "" "" ""
309 str r1, [sp] @ save the "real" r0 copied
310 @ from the exception stack
312 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
313 @ make sure our user space atomic helper is aborted
315 bichs r3, r3, #PSR_Z_BIT
319 @ We are now ready to fill in the remaining blanks on the stack:
321 @ r2 - lr_<exception>, already fixed up for correct return/restart
322 @ r3 - spsr_<exception>
323 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
325 @ Also, separately save sp_usr and lr_usr
331 @ Enable the alignment trap while in kernel mode
336 @ Clear FP to mark the first stack frame
346 @ Call the processor-specific abort handler:
348 @ r2 - aborted context pc
349 @ r3 - aborted context cpsr
351 @ The abort handler must return the aborted address in r0, and
352 @ the fault status register in r1.
363 @ IRQs on, then call the main handler
367 adr lr, ret_from_exception
375 #ifdef CONFIG_PREEMPT
376 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
377 add r7, r8, #1 @ increment it
378 str r7, [tsk, #TI_PREEMPT]
382 #ifdef CONFIG_PREEMPT
383 ldr r0, [tsk, #TI_PREEMPT]
384 str r8, [tsk, #TI_PREEMPT]
398 tst r3, #PSR_T_BIT @ Thumb mode?
399 bne fpundefinstr @ ignore FP
403 @ fall through to the emulation code, which returns using r9 if
404 @ it has emulated the instruction, or the more conventional lr
405 @ if we are to treat this as a real undefined instruction
410 adr r9, ret_from_exception
413 @ fallthrough to call_fpe
417 * The out of line fixup for the ldrt above.
419 .section .fixup, "ax"
422 .section __ex_table,"a"
427 * Check whether the instruction is a co-processor instruction.
428 * If yes, we need to call the relevant co-processor handler.
430 * Note that we don't do a full check here for the co-processor
431 * instructions; all instructions with bit 27 set are well
432 * defined. The only instructions that should fault are the
433 * co-processor instructions. However, we have to watch out
434 * for the ARM6/ARM7 SWI bug.
436 * Emulators may wish to make use of the following registers:
437 * r0 = instruction opcode.
439 * r10 = this threads thread_info structure.
442 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
443 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
444 and r8, r0, #0x0f000000 @ mask out op-code bits
445 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
448 get_thread_info r10 @ get current thread
449 and r8, r0, #0x00000f00 @ mask out CP number
451 add r6, r10, #TI_USED_CP
452 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
454 @ Test if we need to give access to iWMMXt coprocessors
455 ldr r5, [r10, #TI_FLAGS]
456 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
457 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
458 bcs iwmmxt_task_enable
461 add pc, pc, r8, lsr #6
465 b do_fpe @ CP#1 (FPE)
466 b do_fpe @ CP#2 (FPE)
475 b do_vfp @ CP#10 (VFP)
476 b do_vfp @ CP#11 (VFP)
478 mov pc, lr @ CP#10 (VFP)
479 mov pc, lr @ CP#11 (VFP)
483 mov pc, lr @ CP#14 (Debug)
484 mov pc, lr @ CP#15 (Control)
488 add r10, r10, #TI_FPSTATE @ r10 = workspace
489 ldr pc, [r4] @ Call FP module USR entry point
492 * The FP module is called with these registers set:
495 * r9 = normal "successful" return address
497 * lr = unrecognised FP instruction return address
507 adr lr, ret_from_exception
514 enable_irq @ Enable interrupts
515 mov r0, r2 @ address (pc)
517 bl do_PrefetchAbort @ call abort handler
520 * This is the return code to user mode for abort handlers
522 ENTRY(ret_from_exception)
528 * Register switch for ARMv3 and ARMv4 processors
529 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
530 * previous and next are guaranteed not to be the same.
533 add ip, r1, #TI_CPU_SAVE
534 ldr r3, [r2, #TI_TP_VALUE]
535 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
536 ldr r6, [r2, #TI_CPU_DOMAIN]!
537 #if __LINUX_ARM_ARCH__ >= 6
538 #ifdef CONFIG_CPU_MPCORE
541 strex r5, r4, [ip] @ Clear exclusive monitor
544 #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
548 #if defined(CONFIG_HAS_TLS_REG)
549 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
550 #elif !defined(CONFIG_TLS_REG_EMUL)
552 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
554 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
556 @ Always disable VFP so we can lazily save/restore the old
557 @ state. This occurs in the context of the previous thread.
559 bic r4, r4, #FPEXC_ENABLE
562 #if defined(CONFIG_IWMMXT)
563 bl iwmmxt_task_switch
564 #elif defined(CONFIG_CPU_XSCALE)
565 add r4, r2, #40 @ cpu_context_save->extra
569 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
576 * These are segment of kernel provided user code reachable from user space
577 * at a fixed address in kernel memory. This is used to provide user space
578 * with some operations which require kernel help because of unimplemented
579 * native feature and/or instructions in many ARM CPUs. The idea is for
580 * this code to be executed directly in user mode for best efficiency but
581 * which is too intimate with the kernel counter part to be left to user
582 * libraries. In fact this code might even differ from one CPU to another
583 * depending on the available instruction set and restrictions like on
584 * SMP systems. In other words, the kernel reserves the right to change
585 * this code as needed without warning. Only the entry points and their
586 * results are guaranteed to be stable.
588 * Each segment is 32-byte aligned and will be moved to the top of the high
589 * vector page. New segments (if ever needed) must be added in front of
590 * existing ones. This mechanism should be used only for things that are
591 * really small and justified, and not be abused freely.
593 * User space is expected to implement those things inline when optimizing
594 * for a processor that has the necessary native support, but only if such
595 * resulting binaries are already to be incompatible with earlier ARM
596 * processors due to the use of unsupported instructions other than what
597 * is provided here. In other words don't make binaries unable to run on
598 * earlier processors just for the sake of not using these kernel helpers
599 * if your compiled code is not going to use the new instructions for other
604 .globl __kuser_helper_start
605 __kuser_helper_start:
608 * Reference prototype:
610 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
617 * lr = return address
621 * r0 = returned value (zero or non-zero)
622 * C flag = set if r0 == 0, clear if r0 != 0
628 * Definition and user space usage example:
630 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
631 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
633 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
634 * Return zero if *ptr was changed or non-zero if no exchange happened.
635 * The C flag is also set if *ptr was changed to allow for assembly
636 * optimization in the calling code.
638 * For example, a user space atomic_add implementation could look like this:
640 * #define atomic_add(ptr, val) \
641 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
642 * register unsigned int __result asm("r1"); \
644 * "1: @ atomic_add\n\t" \
645 * "ldr r0, [r2]\n\t" \
646 * "mov r3, #0xffff0fff\n\t" \
647 * "add lr, pc, #4\n\t" \
648 * "add r1, r0, %2\n\t" \
649 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
651 * : "=&r" (__result) \
652 * : "r" (__ptr), "rIL" (val) \
653 * : "r0","r3","ip","lr","cc","memory" ); \
657 __kuser_cmpxchg: @ 0xffff0fc0
659 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
662 * Poor you. No fast solution possible...
663 * The kernel itself must perform the operation.
664 * A special ghost syscall is used for that (see traps.c).
669 #elif __LINUX_ARM_ARCH__ < 6
672 * Theory of operation:
674 * We set the Z flag before loading oldval. If ever an exception
675 * occurs we can not be sure the loaded value will still be the same
676 * when the exception returns, therefore the user exception handler
677 * will clear the Z flag whenever the interrupted user code was
678 * actually from the kernel address space (see the usr_entry macro).
680 * The post-increment on the str is used to prevent a race with an
681 * exception happening just after the str instruction which would
682 * clear the Z flag although the exchange was done.
684 teq ip, ip @ set Z flag
685 ldr ip, [r2] @ load current val
686 add r3, r2, #1 @ prepare store ptr
687 teqeq ip, r0 @ compare with oldval if still allowed
688 streq r1, [r3, #-1]! @ store newval if still allowed
689 subs r0, r2, r3 @ if r2 == r3 the str occured
705 * Reference prototype:
707 * int __kernel_get_tls(void)
711 * lr = return address
719 * the Z flag might be lost
721 * Definition and user space usage example:
723 * typedef int (__kernel_get_tls_t)(void);
724 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
726 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
728 * This could be used as follows:
730 * #define __kernel_get_tls() \
731 * ({ register unsigned int __val asm("r0"); \
732 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
733 * : "=r" (__val) : : "lr","cc" ); \
737 __kuser_get_tls: @ 0xffff0fe0
739 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
741 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
746 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
752 .word 0 @ pad up to __kuser_helper_version
756 * Reference declaration:
758 * extern unsigned int __kernel_helper_version;
760 * Definition and user space usage example:
762 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
764 * User space may read this to determine the curent number of helpers
768 __kuser_helper_version: @ 0xffff0ffc
769 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
771 .globl __kuser_helper_end
778 * This code is copied to 0xffff0200 so we can use branches in the
779 * vectors, rather than ldr's. Note that this code must not
780 * exceed 0x300 bytes.
782 * Common stub entry macro:
783 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
785 * SP points to a minimal amount of processor-private memory, the address
786 * of which is copied into r0 for the mode specific abort handler.
788 .macro vector_stub, name, mode, correction=0
793 sub lr, lr, #\correction
797 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
800 stmia sp, {r0, lr} @ save r0, lr
802 str lr, [sp, #8] @ save spsr
805 @ Prepare for SVC32 mode. IRQs remain disabled.
808 eor r0, r0, #(\mode ^ SVC_MODE)
812 @ the branch table must immediately follow this code
816 ldr lr, [pc, lr, lsl #2]
817 movs pc, lr @ branch to handler in SVC mode
823 * Interrupt dispatcher
825 vector_stub irq, IRQ_MODE, 4
827 .long __irq_usr @ 0 (USR_26 / USR_32)
828 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
829 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
830 .long __irq_svc @ 3 (SVC_26 / SVC_32)
831 .long __irq_invalid @ 4
832 .long __irq_invalid @ 5
833 .long __irq_invalid @ 6
834 .long __irq_invalid @ 7
835 .long __irq_invalid @ 8
836 .long __irq_invalid @ 9
837 .long __irq_invalid @ a
838 .long __irq_invalid @ b
839 .long __irq_invalid @ c
840 .long __irq_invalid @ d
841 .long __irq_invalid @ e
842 .long __irq_invalid @ f
845 * Data abort dispatcher
846 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
848 vector_stub dabt, ABT_MODE, 8
850 .long __dabt_usr @ 0 (USR_26 / USR_32)
851 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
852 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
853 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
854 .long __dabt_invalid @ 4
855 .long __dabt_invalid @ 5
856 .long __dabt_invalid @ 6
857 .long __dabt_invalid @ 7
858 .long __dabt_invalid @ 8
859 .long __dabt_invalid @ 9
860 .long __dabt_invalid @ a
861 .long __dabt_invalid @ b
862 .long __dabt_invalid @ c
863 .long __dabt_invalid @ d
864 .long __dabt_invalid @ e
865 .long __dabt_invalid @ f
868 * Prefetch abort dispatcher
869 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
871 vector_stub pabt, ABT_MODE, 4
873 .long __pabt_usr @ 0 (USR_26 / USR_32)
874 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
875 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
876 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
877 .long __pabt_invalid @ 4
878 .long __pabt_invalid @ 5
879 .long __pabt_invalid @ 6
880 .long __pabt_invalid @ 7
881 .long __pabt_invalid @ 8
882 .long __pabt_invalid @ 9
883 .long __pabt_invalid @ a
884 .long __pabt_invalid @ b
885 .long __pabt_invalid @ c
886 .long __pabt_invalid @ d
887 .long __pabt_invalid @ e
888 .long __pabt_invalid @ f
891 * Undef instr entry dispatcher
892 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
894 vector_stub und, UND_MODE
896 .long __und_usr @ 0 (USR_26 / USR_32)
897 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
898 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
899 .long __und_svc @ 3 (SVC_26 / SVC_32)
900 .long __und_invalid @ 4
901 .long __und_invalid @ 5
902 .long __und_invalid @ 6
903 .long __und_invalid @ 7
904 .long __und_invalid @ 8
905 .long __und_invalid @ 9
906 .long __und_invalid @ a
907 .long __und_invalid @ b
908 .long __und_invalid @ c
909 .long __und_invalid @ d
910 .long __und_invalid @ e
911 .long __und_invalid @ f
915 /*=============================================================================
917 *-----------------------------------------------------------------------------
918 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
919 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
920 * Basically to switch modes, we *HAVE* to clobber one register... brain
921 * damage alert! I don't think that we can execute any code in here in any
922 * other mode than FIQ... Ok you can switch to another mode, but you can't
923 * get out of that mode without clobbering one register.
929 /*=============================================================================
930 * Address exception handler
931 *-----------------------------------------------------------------------------
932 * These aren't too critical.
933 * (they're not supposed to happen, and won't happen in 32-bit data mode).
940 * We group all the following data together to optimise
941 * for CPUs with separate I & D caches.
951 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
953 .globl __vectors_start
956 b vector_und + stubs_offset
957 ldr pc, .LCvswi + stubs_offset
958 b vector_pabt + stubs_offset
959 b vector_dabt + stubs_offset
960 b vector_addrexcptn + stubs_offset
961 b vector_irq + stubs_offset
962 b vector_fiq + stubs_offset
970 .globl cr_no_alignment