2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
15 * it to save wrong values... Be aware!
17 #include <linux/config.h>
19 #include <asm/memory.h>
21 #include <asm/vfpmacros.h>
22 #include <asm/arch/entry-macro.S>
24 #include "entry-header.S"
27 * Interrupt handling. Preserves r7, r8, r9
30 1: get_irqnr_and_base r0, r6, r5, lr
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
45 test_for_ipi r0, r6, r5, lr
50 #ifdef CONFIG_LOCAL_TIMERS
51 test_for_ltirq r0, r6, r5, lr
61 * Invalid mode handlers
63 .macro inv_entry, reason
64 sub sp, sp, #S_FRAME_SIZE
70 inv_entry BAD_PREFETCH
82 inv_entry BAD_UNDEFINSTR
85 @ XXX fall through to common_invalid
89 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
95 add r0, sp, #S_PC @ here for interlock avoidance
96 mov r7, #-1 @ "" "" "" ""
97 str r4, [sp] @ save preserved r0
98 stmia r0, {r5 - r7} @ lr_<exception>,
99 @ cpsr_<exception>, "old_r0"
109 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
110 #define SPFIX(code...) code
112 #define SPFIX(code...)
116 sub sp, sp, #S_FRAME_SIZE
118 SPFIX( bicne sp, sp, #4 )
122 add r5, sp, #S_SP @ here for interlock avoidance
123 mov r4, #-1 @ "" "" "" ""
124 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
125 SPFIX( addne r0, r0, #4 )
126 str r1, [sp] @ save the "real" r0 copied
127 @ from the exception stack
132 @ We are now ready to fill in the remaining blanks on the stack:
136 @ r2 - lr_<exception>, already fixed up for correct return/restart
137 @ r3 - spsr_<exception>
138 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
148 @ get ready to re-enable interrupts if appropriate
152 biceq r9, r9, #PSR_I_BIT
155 @ Call the processor-specific abort handler:
157 @ r2 - aborted context pc
158 @ r3 - aborted context cpsr
160 @ The abort handler must return the aborted address in r0, and
161 @ the fault status register in r1. r9 must be preserved.
172 @ set desired IRQ state, then call main handler
179 @ IRQs off again before pulling preserved data off the stack
184 @ restore SPSR and restart the instruction
188 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
194 #ifdef CONFIG_PREEMPT
196 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
197 add r7, r8, #1 @ increment it
198 str r7, [tsk, #TI_PREEMPT]
202 #ifdef CONFIG_PREEMPT
203 ldr r0, [tsk, #TI_FLAGS] @ get flags
204 tst r0, #_TIF_NEED_RESCHED
207 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
208 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
210 strne r0, [r0, -r0] @ bug()
212 ldr r0, [sp, #S_PSR] @ irqs are already disabled
214 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
218 #ifdef CONFIG_PREEMPT
220 teq r8, #0 @ was preempt count = 0
221 ldreq r6, .LCirq_stat
223 ldr r0, [r6, #4] @ local_irq_count
224 ldr r1, [r6, #8] @ local_bh_count
227 mov r7, #0 @ preempt_schedule_irq
228 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
229 1: bl preempt_schedule_irq @ irq en/disable is done inside
230 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
231 tst r0, #_TIF_NEED_RESCHED
232 beq preempt_return @ go again
241 @ call emulation code, which returns using r9 if it has emulated
242 @ the instruction, or the more conventional lr if we are to treat
243 @ this as a real undefined instruction
251 mov r0, sp @ struct pt_regs *regs
255 @ IRQs off again before pulling preserved data off the stack
260 @ restore SPSR and restart the instruction
262 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
264 ldmia sp, {r0 - pc}^ @ Restore SVC registers
271 @ re-enable interrupts if appropriate
275 biceq r9, r9, #PSR_I_BIT
279 @ set args, then call main handler
281 @ r0 - address of faulting instruction
282 @ r1 - pointer to registers on stack
284 mov r0, r2 @ address (pc)
286 bl do_PrefetchAbort @ call abort handler
289 @ IRQs off again before pulling preserved data off the stack
294 @ restore SPSR and restart the instruction
298 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
309 #ifdef CONFIG_PREEMPT
317 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
320 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
321 #error "sizeof(struct pt_regs) must be a multiple of 8"
325 sub sp, sp, #S_FRAME_SIZE
329 add r0, sp, #S_PC @ here for interlock avoidance
330 mov r4, #-1 @ "" "" "" ""
332 str r1, [sp] @ save the "real" r0 copied
333 @ from the exception stack
335 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
336 @ make sure our user space atomic helper is aborted
338 bichs r3, r3, #PSR_Z_BIT
342 @ We are now ready to fill in the remaining blanks on the stack:
344 @ r2 - lr_<exception>, already fixed up for correct return/restart
345 @ r3 - spsr_<exception>
346 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
348 @ Also, separately save sp_usr and lr_usr
354 @ Enable the alignment trap while in kernel mode
359 @ Clear FP to mark the first stack frame
369 @ Call the processor-specific abort handler:
371 @ r2 - aborted context pc
372 @ r3 - aborted context cpsr
374 @ The abort handler must return the aborted address in r0, and
375 @ the fault status register in r1.
386 @ IRQs on, then call the main handler
390 adr lr, ret_from_exception
398 #ifdef CONFIG_PREEMPT
399 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
400 add r7, r8, #1 @ increment it
401 str r7, [tsk, #TI_PREEMPT]
405 #ifdef CONFIG_PREEMPT
406 ldr r0, [tsk, #TI_PREEMPT]
407 str r8, [tsk, #TI_PREEMPT]
421 tst r3, #PSR_T_BIT @ Thumb mode?
422 bne fpundefinstr @ ignore FP
426 @ fall through to the emulation code, which returns using r9 if
427 @ it has emulated the instruction, or the more conventional lr
428 @ if we are to treat this as a real undefined instruction
433 adr r9, ret_from_exception
436 @ fallthrough to call_fpe
440 * The out of line fixup for the ldrt above.
442 .section .fixup, "ax"
445 .section __ex_table,"a"
450 * Check whether the instruction is a co-processor instruction.
451 * If yes, we need to call the relevant co-processor handler.
453 * Note that we don't do a full check here for the co-processor
454 * instructions; all instructions with bit 27 set are well
455 * defined. The only instructions that should fault are the
456 * co-processor instructions. However, we have to watch out
457 * for the ARM6/ARM7 SWI bug.
459 * Emulators may wish to make use of the following registers:
460 * r0 = instruction opcode.
462 * r10 = this threads thread_info structure.
465 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
466 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
467 and r8, r0, #0x0f000000 @ mask out op-code bits
468 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
471 get_thread_info r10 @ get current thread
472 and r8, r0, #0x00000f00 @ mask out CP number
474 add r6, r10, #TI_USED_CP
475 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
477 @ Test if we need to give access to iWMMXt coprocessors
478 ldr r5, [r10, #TI_FLAGS]
479 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
480 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
481 bcs iwmmxt_task_enable
484 add pc, pc, r8, lsr #6
488 b do_fpe @ CP#1 (FPE)
489 b do_fpe @ CP#2 (FPE)
498 b do_vfp @ CP#10 (VFP)
499 b do_vfp @ CP#11 (VFP)
501 mov pc, lr @ CP#10 (VFP)
502 mov pc, lr @ CP#11 (VFP)
506 mov pc, lr @ CP#14 (Debug)
507 mov pc, lr @ CP#15 (Control)
511 add r10, r10, #TI_FPSTATE @ r10 = workspace
512 ldr pc, [r4] @ Call FP module USR entry point
515 * The FP module is called with these registers set:
518 * r9 = normal "successful" return address
520 * lr = unrecognised FP instruction return address
530 adr lr, ret_from_exception
537 enable_irq @ Enable interrupts
538 mov r0, r2 @ address (pc)
540 bl do_PrefetchAbort @ call abort handler
543 * This is the return code to user mode for abort handlers
545 ENTRY(ret_from_exception)
551 * Register switch for ARMv3 and ARMv4 processors
552 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
553 * previous and next are guaranteed not to be the same.
556 add ip, r1, #TI_CPU_SAVE
557 ldr r3, [r2, #TI_TP_VALUE]
558 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
560 add r2, r2, #TI_CPU_DOMAIN
562 ldr r6, [r2, #TI_CPU_DOMAIN]!
564 #if __LINUX_ARM_ARCH__ >= 6
565 #ifdef CONFIG_CPU_MPCORE
568 strex r5, r4, [ip] @ Clear exclusive monitor
571 #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
575 #if defined(CONFIG_HAS_TLS_REG)
576 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
577 #elif !defined(CONFIG_TLS_REG_EMUL)
579 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
582 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
585 @ Always disable VFP so we can lazily save/restore the old
586 @ state. This occurs in the context of the previous thread.
588 bic r4, r4, #FPEXC_ENABLE
591 #if defined(CONFIG_IWMMXT)
592 bl iwmmxt_task_switch
593 #elif defined(CONFIG_CPU_XSCALE)
594 add r4, r2, #40 @ cpu_context_save->extra
598 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
605 * These are segment of kernel provided user code reachable from user space
606 * at a fixed address in kernel memory. This is used to provide user space
607 * with some operations which require kernel help because of unimplemented
608 * native feature and/or instructions in many ARM CPUs. The idea is for
609 * this code to be executed directly in user mode for best efficiency but
610 * which is too intimate with the kernel counter part to be left to user
611 * libraries. In fact this code might even differ from one CPU to another
612 * depending on the available instruction set and restrictions like on
613 * SMP systems. In other words, the kernel reserves the right to change
614 * this code as needed without warning. Only the entry points and their
615 * results are guaranteed to be stable.
617 * Each segment is 32-byte aligned and will be moved to the top of the high
618 * vector page. New segments (if ever needed) must be added in front of
619 * existing ones. This mechanism should be used only for things that are
620 * really small and justified, and not be abused freely.
622 * User space is expected to implement those things inline when optimizing
623 * for a processor that has the necessary native support, but only if such
624 * resulting binaries are already to be incompatible with earlier ARM
625 * processors due to the use of unsupported instructions other than what
626 * is provided here. In other words don't make binaries unable to run on
627 * earlier processors just for the sake of not using these kernel helpers
628 * if your compiled code is not going to use the new instructions for other
633 .globl __kuser_helper_start
634 __kuser_helper_start:
637 * Reference prototype:
639 * void __kernel_memory_barrier(void)
643 * lr = return address
651 * the Z flag might be lost
653 * Definition and user space usage example:
655 * typedef void (__kernel_dmb_t)(void);
656 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
658 * Apply any needed memory barrier to preserve consistency with data modified
659 * manually and __kuser_cmpxchg usage.
661 * This could be used as follows:
663 * #define __kernel_dmb() \
664 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
668 __kuser_memory_barrier: @ 0xffff0fa0
670 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
671 mcr p15, 0, r0, c7, c10, 5 @ dmb
678 * Reference prototype:
680 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
687 * lr = return address
691 * r0 = returned value (zero or non-zero)
692 * C flag = set if r0 == 0, clear if r0 != 0
698 * Definition and user space usage example:
700 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
701 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
703 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
704 * Return zero if *ptr was changed or non-zero if no exchange happened.
705 * The C flag is also set if *ptr was changed to allow for assembly
706 * optimization in the calling code.
708 * Note: this routine already includes memory barriers as needed.
710 * For example, a user space atomic_add implementation could look like this:
712 * #define atomic_add(ptr, val) \
713 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
714 * register unsigned int __result asm("r1"); \
716 * "1: @ atomic_add\n\t" \
717 * "ldr r0, [r2]\n\t" \
718 * "mov r3, #0xffff0fff\n\t" \
719 * "add lr, pc, #4\n\t" \
720 * "add r1, r0, %2\n\t" \
721 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
723 * : "=&r" (__result) \
724 * : "r" (__ptr), "rIL" (val) \
725 * : "r0","r3","ip","lr","cc","memory" ); \
729 __kuser_cmpxchg: @ 0xffff0fc0
731 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
734 * Poor you. No fast solution possible...
735 * The kernel itself must perform the operation.
736 * A special ghost syscall is used for that (see traps.c).
741 #elif __LINUX_ARM_ARCH__ < 6
744 * Theory of operation:
746 * We set the Z flag before loading oldval. If ever an exception
747 * occurs we can not be sure the loaded value will still be the same
748 * when the exception returns, therefore the user exception handler
749 * will clear the Z flag whenever the interrupted user code was
750 * actually from the kernel address space (see the usr_entry macro).
752 * The post-increment on the str is used to prevent a race with an
753 * exception happening just after the str instruction which would
754 * clear the Z flag although the exchange was done.
756 teq ip, ip @ set Z flag
757 ldr ip, [r2] @ load current val
758 add r3, r2, #1 @ prepare store ptr
759 teqeq ip, r0 @ compare with oldval if still allowed
760 streq r1, [r3, #-1]! @ store newval if still allowed
761 subs r0, r2, r3 @ if r2 == r3 the str occured
767 mcr p15, 0, r0, c7, c10, 5 @ dmb
774 mcr p15, 0, r0, c7, c10, 5 @ dmb
783 * Reference prototype:
785 * int __kernel_get_tls(void)
789 * lr = return address
797 * the Z flag might be lost
799 * Definition and user space usage example:
801 * typedef int (__kernel_get_tls_t)(void);
802 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
804 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
806 * This could be used as follows:
808 * #define __kernel_get_tls() \
809 * ({ register unsigned int __val asm("r0"); \
810 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
811 * : "=r" (__val) : : "lr","cc" ); \
815 __kuser_get_tls: @ 0xffff0fe0
817 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
819 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
824 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
830 .word 0 @ pad up to __kuser_helper_version
834 * Reference declaration:
836 * extern unsigned int __kernel_helper_version;
838 * Definition and user space usage example:
840 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
842 * User space may read this to determine the curent number of helpers
846 __kuser_helper_version: @ 0xffff0ffc
847 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
849 .globl __kuser_helper_end
856 * This code is copied to 0xffff0200 so we can use branches in the
857 * vectors, rather than ldr's. Note that this code must not
858 * exceed 0x300 bytes.
860 * Common stub entry macro:
861 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
863 * SP points to a minimal amount of processor-private memory, the address
864 * of which is copied into r0 for the mode specific abort handler.
866 .macro vector_stub, name, mode, correction=0
871 sub lr, lr, #\correction
875 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
878 stmia sp, {r0, lr} @ save r0, lr
880 str lr, [sp, #8] @ save spsr
883 @ Prepare for SVC32 mode. IRQs remain disabled.
886 eor r0, r0, #(\mode ^ SVC_MODE)
890 @ the branch table must immediately follow this code
894 ldr lr, [pc, lr, lsl #2]
895 movs pc, lr @ branch to handler in SVC mode
901 * Interrupt dispatcher
903 vector_stub irq, IRQ_MODE, 4
905 .long __irq_usr @ 0 (USR_26 / USR_32)
906 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
907 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
908 .long __irq_svc @ 3 (SVC_26 / SVC_32)
909 .long __irq_invalid @ 4
910 .long __irq_invalid @ 5
911 .long __irq_invalid @ 6
912 .long __irq_invalid @ 7
913 .long __irq_invalid @ 8
914 .long __irq_invalid @ 9
915 .long __irq_invalid @ a
916 .long __irq_invalid @ b
917 .long __irq_invalid @ c
918 .long __irq_invalid @ d
919 .long __irq_invalid @ e
920 .long __irq_invalid @ f
923 * Data abort dispatcher
924 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
926 vector_stub dabt, ABT_MODE, 8
928 .long __dabt_usr @ 0 (USR_26 / USR_32)
929 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
930 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
931 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
932 .long __dabt_invalid @ 4
933 .long __dabt_invalid @ 5
934 .long __dabt_invalid @ 6
935 .long __dabt_invalid @ 7
936 .long __dabt_invalid @ 8
937 .long __dabt_invalid @ 9
938 .long __dabt_invalid @ a
939 .long __dabt_invalid @ b
940 .long __dabt_invalid @ c
941 .long __dabt_invalid @ d
942 .long __dabt_invalid @ e
943 .long __dabt_invalid @ f
946 * Prefetch abort dispatcher
947 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
949 vector_stub pabt, ABT_MODE, 4
951 .long __pabt_usr @ 0 (USR_26 / USR_32)
952 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
953 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
954 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
955 .long __pabt_invalid @ 4
956 .long __pabt_invalid @ 5
957 .long __pabt_invalid @ 6
958 .long __pabt_invalid @ 7
959 .long __pabt_invalid @ 8
960 .long __pabt_invalid @ 9
961 .long __pabt_invalid @ a
962 .long __pabt_invalid @ b
963 .long __pabt_invalid @ c
964 .long __pabt_invalid @ d
965 .long __pabt_invalid @ e
966 .long __pabt_invalid @ f
969 * Undef instr entry dispatcher
970 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
972 vector_stub und, UND_MODE
974 .long __und_usr @ 0 (USR_26 / USR_32)
975 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
976 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
977 .long __und_svc @ 3 (SVC_26 / SVC_32)
978 .long __und_invalid @ 4
979 .long __und_invalid @ 5
980 .long __und_invalid @ 6
981 .long __und_invalid @ 7
982 .long __und_invalid @ 8
983 .long __und_invalid @ 9
984 .long __und_invalid @ a
985 .long __und_invalid @ b
986 .long __und_invalid @ c
987 .long __und_invalid @ d
988 .long __und_invalid @ e
989 .long __und_invalid @ f
993 /*=============================================================================
995 *-----------------------------------------------------------------------------
996 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
997 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
998 * Basically to switch modes, we *HAVE* to clobber one register... brain
999 * damage alert! I don't think that we can execute any code in here in any
1000 * other mode than FIQ... Ok you can switch to another mode, but you can't
1001 * get out of that mode without clobbering one register.
1007 /*=============================================================================
1008 * Address exception handler
1009 *-----------------------------------------------------------------------------
1010 * These aren't too critical.
1011 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1018 * We group all the following data together to optimise
1019 * for CPUs with separate I & D caches.
1029 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1031 .globl __vectors_start
1034 b vector_und + stubs_offset
1035 ldr pc, .LCvswi + stubs_offset
1036 b vector_pabt + stubs_offset
1037 b vector_dabt + stubs_offset
1038 b vector_addrexcptn + stubs_offset
1039 b vector_irq + stubs_offset
1040 b vector_fiq + stubs_offset
1042 .globl __vectors_end
1048 .globl cr_no_alignment