2 * arch/sh/kernel/entry.S
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2003 - 2006 Paul Mundt
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 #include <linux/sys.h>
12 #include <linux/errno.h>
13 #include <linux/linkage.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/thread_info.h>
16 #include <asm/unistd.h>
17 #include <asm/cpu/mmu_context.h>
18 #include <asm/pgtable.h>
22 ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
23 ! to be jumped is too far, but it causes illegal slot exception.
26 * entry.S contains the system-call and fault low-level handling routines.
27 * This also contains the timer-interrupt handler, as well as all interrupts
28 * and faults that can result in a task-switch.
30 * NOTE: This code handles signal-recognition, which happens every time
31 * after a timer-interrupt and after each system call.
33 * NOTE: This code uses a convention that instructions in the delay slot
34 * of a transfer-control instruction are indented by an extra space, thus:
36 * jmp @k0 ! control-transfer instruction
37 * ldc k1, ssr ! delay slot
39 * Stack layout in 'ret_from_syscall':
40 * ptrace needs to have all regs on the stack.
41 * if the order here is changed, it needs to be
42 * updated in ptrace.c and ptrace.h
56 #if defined(CONFIG_KGDB_NMI)
57 NMI_VEC = 0x1c0 ! Must catch early for debounce
60 /* Offsets to the stack */
61 OFF_R0 = 0 /* Return value. New ABI also arg4 */
62 OFF_R1 = 4 /* New ABI: arg5 */
63 OFF_R2 = 8 /* New ABI: arg6 */
64 OFF_R3 = 12 /* New ABI: syscall_nr */
65 OFF_R4 = 16 /* New ABI: arg0 */
66 OFF_R5 = 20 /* New ABI: arg1 */
67 OFF_R6 = 24 /* New ABI: arg2 */
68 OFF_R7 = 28 /* New ABI: arg3 */
81 #define g_imask r6 /* r6_bank1 */
82 #define k_g_imask r6_bank /* r6_bank1 */
83 #define current r7 /* r7_bank1 */
85 #include <asm/entry-macros.S>
88 * Kernel mode register usage:
91 * k2 scratch (Exception code)
92 * k3 scratch (Return address)
95 * k6 Global Interrupt Mask (0--15 << 4)
96 * k7 CURRENT_THREAD_INFO (pointer to current thread info)
100 ! TLB Miss / Initial Page write exception handling
102 ! TLB hits, but the access violate the protection.
103 ! It can be valid access, such as stack grow and/or C-O-W.
106 ! Find the pmd/pte entry and loadtlb
107 ! If it's not found, cause address error (SEGV)
109 ! Although this could be written in assembly language (and it'd be faster),
110 ! this first version depends *much* on C implementation.
113 #if defined(CONFIG_MMU)
120 ENTRY(tlb_miss_store)
125 ENTRY(initial_page_write)
130 ENTRY(tlb_protection_violation_load)
135 ENTRY(tlb_protection_violation_store)
141 mov.l @r0, r6 ! address
149 3: .long do_page_fault
152 ENTRY(address_error_load)
154 mov #0,r5 ! writeaccess = 0
157 ENTRY(address_error_store)
159 mov #1,r5 ! writeaccess = 1
164 mov.l @r0, r6 ! address
171 2: .long do_address_error
172 #endif /* CONFIG_MMU */
174 #if defined(CONFIG_SH_STANDARD_BIOS)
175 /* Unwind the stack and jmp to the debug entry */
186 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
188 ldc r8, sr ! here, change the register bank
211 2: .long gdb_vbr_vector
212 #endif /* CONFIG_SH_STANDARD_BIOS */
226 or r9, r8 ! BL =1, RB=1
227 ldc r8, sr ! here, change the register bank
236 mov.l @r15+, k4 ! original stack pointer
239 mov.l @r15+, k3 ! original SR
243 add #4, r15 ! Skip syscall number
246 mov.l @r15+, k0 ! DSP mode marker
248 cmp/eq k0, k1 ! Do we have a DSP stack frame?
251 stc sr, k0 ! Enable CPU DSP mode
252 or k1, k0 ! (within kernel it may be disabled)
254 mov r2, k0 ! Backup r2
256 ! Restore DSP registers from stack
275 mov k0, r2 ! Restore r2
279 ! Calculate new SR value
280 mov k3, k2 ! original SR value
284 and k1, k2 ! Mask orignal SR value
286 mov k3, k0 ! Calculate IMASK-bits
294 6: or k0, k2 ! Set the IMASK-bits
297 #if defined(CONFIG_KGDB_NMI)
303 mov.l @r15+, k2 ! restore EXPEVT
309 5: .long 0x00001000 ! DSP
312 ! common exception handler
313 #include "../../entry-common.S"
315 ! Exception Vector Base
317 ! Should be aligned page boundary.
331 2: .long ret_from_exception
335 /* This code makes some assumptions to improve performance.
336 * Make sure they are stil true. */
337 #if PTRS_PER_PGD != PTRS_PER_PTE
338 #error PGD and PTE sizes don't match
341 /* gas doesn't flag impossible values for mov #immediate as an error */
342 #if (_PAGE_PRESENT >> 2) > 0x7f
343 #error cannot load PAGE_PRESENT as an immediate
345 #if _PAGE_DIRTY > 0x7f
346 #error cannot load PAGE_DIRTY as an immediate
348 #if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
349 #error cannot derive PAGE_ACCESSED from PAGE_PRESENT
352 #if defined(CONFIG_CPU_SH4)
353 #define ldmmupteh(r) mov.l 8f, r
355 #define ldmmupteh(r) mov #MMU_PTEH, r
360 #ifdef COUNT_EXCEPTIONS
361 ! Increment the counts
369 ! k1 pgd and pte pointers
370 ! k2 faulting address
371 ! k3 pgd and pte index masks
374 ! Load up the pgd entry (k1)
376 ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
378 mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
379 mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
381 mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
383 mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
385 mov k2, k0 ! 5 MT (latency=0)
390 mov.l @(k0, k1), k1 ! 21 LS (latency=2)
391 mov #-(PAGE_SHIFT-2), k4 ! 6 EX
393 ! Load up the pte entry (k2)
395 mov k2, k0 ! 5 MT (latency=0)
403 mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
405 mov.l @(k0, k1), k2 ! 21 LS (latency=2)
408 #ifdef CONFIG_CPU_HAS_PTEA
409 ! Test the entry for present and _PAGE_ACCESSED
412 mov k2, k0 ! 5 MT (latency=0)
420 ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
422 ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
426 mov k0, k3 ! 5 MT (latency=0)
427 mov k2, k0 ! 5 MT (latency=0)
433 ldmmupteh(k0) ! 9 LS (latency=2)
434 shll2 k4 ! 101 EX _PAGE_ACCESSED
438 mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
440 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
442 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
445 ! Test the entry for present and _PAGE_ACCESSED
447 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
450 shll2 k4 ! 101 EX _PAGE_ACCESSED
451 ldmmupteh(k0) ! 9 LS (latency=2)
456 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
465 mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
469 ! At least one instruction between ldtlb and rte
477 10: or k4, k2 ! 82 EX
481 ! At least one instruction between ldtlb and rte
482 mov.l k2, @k1 ! 27 LS
486 ! Note we cannot execute mov here, because it is executed after
487 ! restoring SSR, so would be executed in user space.
492 ! Once cache line if possible...
493 1: .long swapper_pg_dir
494 4: .short (PTRS_PER_PGD-1) << 2
495 5: .short _PAGE_PRESENT
496 7: .long _PAGE_FLAGS_HARDWARE_MASK
498 #ifdef COUNT_EXCEPTIONS
499 9: .long exception_count_miss
502 ! Either pgd or pte not present
512 #if defined(CONFIG_KGDB_NMI)
513 ! Debounce (filter nested NMI)
527 #endif /* defined(CONFIG_KGDB_NMI) */
529 mov #-1, k2 ! interrupt exception marker
534 3: .long ret_from_irq
535 4: .long ret_from_exception
540 ENTRY(handle_exception)
541 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
542 ! save all registers onto stack.
544 stc ssr, k0 ! Is it from kernel space?
545 shll k0 ! Check MD bit (bit30) by shifting it into...
546 shll k0 ! ...the T bit
547 bt/s 1f ! It's a kernel to kernel transition.
548 mov r15, k0 ! save original stack to k0
549 /* User space to kernel */
550 mov #(THREAD_SIZE >> 10), k1
551 shll8 k1 ! k1 := THREAD_SIZE
554 mov k1, r15 ! change to kernel stack
559 mov.l r2, @-r15 ! Save r2, we need another reg
562 tst r2, k4 ! Check if in DSP mode
563 mov.l @r15+, r2 ! Restore r2 now
565 mov #0, k4 ! Set marker for no stack frame
567 mov r2, k4 ! Backup r2 (in k4) for later
569 ! Save DSP registers on stack
580 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
582 ! FIXME: Make sure that this is still the case with newer toolchains,
583 ! as we're not at all interested in supporting ancient toolchains at
584 ! this point. -- PFM.
587 .word 0xf653 ! movs.l a1, @-r2
588 .word 0xf6f3 ! movs.l a0g, @-r2
589 .word 0xf6d3 ! movs.l a1g, @-r2
590 .word 0xf6c3 ! movs.l m0, @-r2
591 .word 0xf6e3 ! movs.l m1, @-r2
594 mov k4, r2 ! Restore r2
595 mov.l 1f, k4 ! Force DSP stack frame
597 mov.l k4, @-r15 ! Push DSP mode marker onto stack
599 ! Save the user registers on the stack.
600 mov.l k2, @-r15 ! EXPEVT
603 mov.l k4, @-r15 ! set TRA (default: -1)
612 lds k3, pr ! Set the return address to pr
614 mov.l k0, @-r15 ! save orignal stack
623 stc sr, r8 ! Back to normal register bank, and
624 or k1, r8 ! Block all interrupts
627 ldc r8, sr ! ...changed here.
639 * This gets a bit tricky.. in the INTEVT case we don't want to use
640 * the VBR offset as a destination in the jump call table, since all
641 * of the destinations are the same. In this case, (interrupt) sets
642 * a marker in r2 (now r2_bank since SR.RB changed), which we check
643 * to determine the exception type. For all other exceptions, we
644 * forcibly read EXPEVT from memory and fix up the jump address, in
645 * the interrupt exception case we jump to do_IRQ() and defer the
646 * INTEVT read until there. As a bonus, we can also clean up the SR.RB
647 * checks that do_IRQ() was doing..
651 bf interrupt_exception
655 #ifdef COUNT_EXCEPTIONS
672 1: .long 0x00001000 ! DSP=1
673 2: .long 0x000080f0 ! FD=1, IMASK=15
674 3: .long 0xcfffffff ! RB=0, BL=0
675 4: .long exception_handling_table
676 #ifdef COUNT_EXCEPTIONS
677 5: .long exception_count_table
691 ENTRY(exception_none)