2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/kernel/entry.S
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2004, 2005 Paul Mundt
10 * Copyright (C) 2003, 2004 Richard Curnow
13 #include <linux/errno.h>
14 #include <linux/sys.h>
15 #include <asm/cpu/registers.h>
16 #include <asm/processor.h>
17 #include <asm/unistd.h>
18 #include <asm/thread_info.h>
19 #include <asm/asm-offsets.h>
24 #define SR_ASID_MASK 0x00ff0000
25 #define SR_FD_MASK 0x00008000
26 #define SR_SS 0x08000000
27 #define SR_BL 0x10000000
28 #define SR_MD 0x40000000
33 #define EVENT_INTERRUPT 0
34 #define EVENT_FAULT_TLB 1
35 #define EVENT_FAULT_NOT_TLB 2
39 #define RESET_CAUSE 0x20
40 #define DEBUGSS_CAUSE 0x980
43 * Frame layout. Quad index.
45 #define FRAME_T(x) FRAME_TBASE+(x*8)
46 #define FRAME_R(x) FRAME_RBASE+(x*8)
47 #define FRAME_S(x) FRAME_SBASE+(x*8)
52 /* Arrange the save frame to be a multiple of 32 bytes long */
54 #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
55 #define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
56 #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
57 #define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
59 #define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
60 #define FP_FRAME_BASE 0
70 /* These are the registers saved in the TLB path that aren't saved in the first
71 level of the normal one. */
72 #define TLB_SAVED_R25 7*8
73 #define TLB_SAVED_TR1 8*8
74 #define TLB_SAVED_TR2 9*8
75 #define TLB_SAVED_TR3 10*8
76 #define TLB_SAVED_TR4 11*8
77 /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
78 breakage otherwise. */
79 #define TLB_SAVED_R0 12*8
80 #define TLB_SAVED_R1 13*8
93 # define preempt_stop() CLI()
95 # define preempt_stop()
96 # define resume_kernel restore_all
101 #define FAST_TLBMISS_STACK_CACHELINES 4
102 #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
104 /* Register back-up area for all exceptions */
106 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
107 * register saves etc. */
108 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
109 /* This is 32 byte aligned by construction */
110 /* Register back-up area for all exceptions */
130 /* Save area for RESVEC exceptions. We cannot use reg_save_area because of
131 * reentrancy. Note this area may be accessed via physical address.
132 * Align so this fits a whole single cache line, for ease of purging.
143 /* Jump table of 3rd level handlers */
145 .long do_exception_error /* 0x000 */
146 .long do_exception_error /* 0x020 */
147 .long tlb_miss_load /* 0x040 */
148 .long tlb_miss_store /* 0x060 */
149 ! ARTIFICIAL pseudo-EXPEVT setting
150 .long do_debug_interrupt /* 0x080 */
151 .long tlb_miss_load /* 0x0A0 */
152 .long tlb_miss_store /* 0x0C0 */
153 .long do_address_error_load /* 0x0E0 */
154 .long do_address_error_store /* 0x100 */
156 .long do_fpu_error /* 0x120 */
158 .long do_exception_error /* 0x120 */
160 .long do_exception_error /* 0x140 */
161 .long system_call /* 0x160 */
162 .long do_reserved_inst /* 0x180 */
163 .long do_illegal_slot_inst /* 0x1A0 */
164 .long do_exception_error /* 0x1C0 - NMI */
165 .long do_exception_error /* 0x1E0 */
167 .long do_IRQ /* 0x200 - 0x3C0 */
169 .long do_exception_error /* 0x3E0 */
171 .long do_IRQ /* 0x400 - 0x7E0 */
173 .long fpu_error_or_IRQA /* 0x800 */
174 .long fpu_error_or_IRQB /* 0x820 */
175 .long do_IRQ /* 0x840 */
176 .long do_IRQ /* 0x860 */
178 .long do_exception_error /* 0x880 - 0x920 */
180 .long do_software_break_point /* 0x940 */
181 .long do_exception_error /* 0x960 */
182 .long do_single_step /* 0x980 */
185 .long do_exception_error /* 0x9A0 - 0x9E0 */
187 .long do_IRQ /* 0xA00 */
188 .long do_IRQ /* 0xA20 */
189 .long itlb_miss_or_IRQ /* 0xA40 */
190 .long do_IRQ /* 0xA60 */
191 .long do_IRQ /* 0xA80 */
192 .long itlb_miss_or_IRQ /* 0xAA0 */
193 .long do_exception_error /* 0xAC0 */
194 .long do_address_error_exec /* 0xAE0 */
196 .long do_exception_error /* 0xB00 - 0xBE0 */
199 .long do_IRQ /* 0xC00 - 0xE20 */
202 .section .text64, "ax"
205 * --- Exception/Interrupt/Event Handling Section
209 * VBR and RESVEC blocks.
211 * First level handler for VBR-based exceptions.
213 * To avoid waste of space, align to the maximum text block size.
214 * This is assumed to be at most 128 bytes or 32 instructions.
215 * DO NOT EXCEED 32 instructions on the first level handlers !
217 * Also note that RESVEC is contained within the VBR block
218 * where the room left (1KB - TEXT_SIZE) allows placing
219 * the RESVEC block (at most 512B + TEXT_SIZE).
221 * So first (and only) level handler for RESVEC-based exceptions.
223 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
224 * and interrupt) we are a lot tight with register space until
225 * saving onto the stack frame, which is done in handle_exception().
229 #define TEXT_SIZE 128
230 #define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
234 .space 256, 0 /* Power-on class handler, */
235 /* not required here */
237 synco /* TAKum03020 (but probably a good idea anyway.) */
238 /* Save original stack pointer into KCR1 */
241 /* Save other original registers into reg_save_area */
242 movi reg_save_area, SP
243 st.q SP, SAVED_R2, r2
244 st.q SP, SAVED_R3, r3
245 st.q SP, SAVED_R4, r4
246 st.q SP, SAVED_R5, r5
247 st.q SP, SAVED_R6, r6
248 st.q SP, SAVED_R18, r18
250 st.q SP, SAVED_TR0, r3
252 /* Set args for Non-debug, Not a TLB miss class handler */
254 movi ret_from_exception, r3
256 movi EVENT_FAULT_NOT_TLB, r4
259 pta handle_exception, tr0
270 * Instead of the natural .balign 1024 place RESVEC here
271 * respecting the final 1KB alignment.
275 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
276 * block making sure the final alignment is correct.
279 synco /* TAKum03020 (but probably a good idea anyway.) */
281 movi reg_save_area, SP
282 /* SP is guaranteed 32-byte aligned. */
283 st.q SP, TLB_SAVED_R0 , r0
284 st.q SP, TLB_SAVED_R1 , r1
285 st.q SP, SAVED_R2 , r2
286 st.q SP, SAVED_R3 , r3
287 st.q SP, SAVED_R4 , r4
288 st.q SP, SAVED_R5 , r5
289 st.q SP, SAVED_R6 , r6
290 st.q SP, SAVED_R18, r18
292 /* Save R25 for safety; as/ld may want to use it to achieve the call to
293 * the code in mm/tlbmiss.c */
294 st.q SP, TLB_SAVED_R25, r25
300 st.q SP, SAVED_TR0 , r2
301 st.q SP, TLB_SAVED_TR1 , r3
302 st.q SP, TLB_SAVED_TR2 , r4
303 st.q SP, TLB_SAVED_TR3 , r5
304 st.q SP, TLB_SAVED_TR4 , r18
306 pt do_fast_page_fault, tr0
311 andi r2, 1, r2 /* r2 = SSR.MD */
314 pt fixup_to_invoke_general_handler, tr1
316 /* If the fast path handler fixed the fault, just drop through quickly
317 to the restore code right away to return to the excepting context.
321 fast_tlb_miss_restore:
322 ld.q SP, SAVED_TR0, r2
323 ld.q SP, TLB_SAVED_TR1, r3
324 ld.q SP, TLB_SAVED_TR2, r4
326 ld.q SP, TLB_SAVED_TR3, r5
327 ld.q SP, TLB_SAVED_TR4, r18
335 ld.q SP, TLB_SAVED_R0, r0
336 ld.q SP, TLB_SAVED_R1, r1
337 ld.q SP, SAVED_R2, r2
338 ld.q SP, SAVED_R3, r3
339 ld.q SP, SAVED_R4, r4
340 ld.q SP, SAVED_R5, r5
341 ld.q SP, SAVED_R6, r6
342 ld.q SP, SAVED_R18, r18
343 ld.q SP, TLB_SAVED_R25, r25
347 nop /* for safety, in case the code is run on sh5-101 cut1.x */
349 fixup_to_invoke_general_handler:
351 /* OK, new method. Restore stuff that's not expected to get saved into
352 the 'first-level' reg save area, then just fall through to setting
353 up the registers and calling the second-level handler. */
355 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
356 r25,tr1-4 and save r6 to get into the right state. */
358 ld.q SP, TLB_SAVED_TR1, r3
359 ld.q SP, TLB_SAVED_TR2, r4
360 ld.q SP, TLB_SAVED_TR3, r5
361 ld.q SP, TLB_SAVED_TR4, r18
362 ld.q SP, TLB_SAVED_R25, r25
364 ld.q SP, TLB_SAVED_R0, r0
365 ld.q SP, TLB_SAVED_R1, r1
372 /* Set args for Non-debug, TLB miss class handler */
374 movi ret_from_exception, r3
376 movi EVENT_FAULT_TLB, r4
379 pta handle_exception, tr0
382 /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
383 DOES END UP AT VBR+0x600 */
395 synco /* TAKum03020 (but probably a good idea anyway.) */
396 /* Save original stack pointer into KCR1 */
399 /* Save other original registers into reg_save_area */
400 movi reg_save_area, SP
401 st.q SP, SAVED_R2, r2
402 st.q SP, SAVED_R3, r3
403 st.q SP, SAVED_R4, r4
404 st.q SP, SAVED_R5, r5
405 st.q SP, SAVED_R6, r6
406 st.q SP, SAVED_R18, r18
408 st.q SP, SAVED_TR0, r3
410 /* Set args for interrupt class handler */
412 movi ret_from_irq, r3
414 movi EVENT_INTERRUPT, r4
417 pta handle_exception, tr0
419 .balign TEXT_SIZE /* let's waste the bare minimum */
421 LVBR_block_end: /* Marker. Used for total checking */
425 /* Panic handler. Called with MMU off. Possible causes/actions:
426 * - Reset: Jump to program start.
427 * - Single Step: Turn off Single Step & return.
428 * - Others: Call panic handler, passing PC as arg.
429 * (this may need to be extended...)
432 synco /* TAKum03020 (but probably a good idea anyway.) */
434 /* First save r0-1 and tr0, as we need to use these */
435 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
444 sub r1, r0, r1 /* r1=0 if reset */
445 movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0
448 beqi r1, 0, tr0 /* Jump to start address if reset */
451 movi DEBUGSS_CAUSE, r1
452 sub r1, r0, r1 /* r1=0 if single step */
453 pta single_step_panic, tr0
454 beqi r1, 0, tr0 /* jump if single step */
456 /* Now jump to where we save the registers. */
457 movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1
462 /* We are in a handler with Single Step set. We need to resume the
463 * handler, by turning on MMU & turning off Single Step. */
470 /* Restore EXPEVT, as the rte won't do this */
485 synco /* TAKum03020 (but probably a good idea anyway.) */
487 * Single step/software_break_point first level handler.
488 * Called with MMU off, so the first thing we do is enable it
489 * by doing an rte with appropriate SSR.
492 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
493 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
495 /* With the MMU off, we are bypassing the cache, so purge any
496 * data that will be made stale by the following stores.
508 /* Enable MMU, block exceptions, set priv mode, disable single step */
509 movi SR_MMU | SR_BL | SR_MD, r1
514 /* Force control to debug_exception_2 when rte is executed */
515 movi debug_exeception_2, r0
516 ori r0, 1, r0 /* force SHmedia, just in case */
522 /* Restore saved regs */
524 movi resvec_save_area, SP
532 /* Save other original registers into reg_save_area */
533 movi reg_save_area, SP
534 st.q SP, SAVED_R2, r2
535 st.q SP, SAVED_R3, r3
536 st.q SP, SAVED_R4, r4
537 st.q SP, SAVED_R5, r5
538 st.q SP, SAVED_R6, r6
539 st.q SP, SAVED_R18, r18
541 st.q SP, SAVED_TR0, r3
543 /* Set args for debug class handler */
545 movi ret_from_exception, r3
550 pta handle_exception, tr0
555 /* !!! WE COME HERE IN REAL MODE !!! */
556 /* Hook-up debug interrupt to allow various debugging options to be
557 * hooked into its handler. */
558 /* Save original stack pointer into KCR1 */
561 movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
566 /* Save other original registers into reg_save_area thru real addresses */
567 st.q SP, SAVED_R2, r2
568 st.q SP, SAVED_R3, r3
569 st.q SP, SAVED_R4, r4
570 st.q SP, SAVED_R5, r5
571 st.q SP, SAVED_R6, r6
572 st.q SP, SAVED_R18, r18
574 st.q SP, SAVED_TR0, r3
576 /* move (spc,ssr)->(pspc,pssr). The rte will shift
577 them back again, so that they look like the originals
578 as far as the real handler code is concerned. */
584 ! construct useful SR for handle_exception
591 ! SSR is now the current SR with the MD and MMU bits set
592 ! i.e. the rte will switch back to priv mode and put
596 movi handle_exception, r18
597 ori r18, 1, r18 ! for safety (do we need this?)
600 /* Set args for Non-debug, Not a TLB miss class handler */
602 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
603 ! debug interrupt handler in the vectoring table
605 movi ret_from_exception, r3
607 movi EVENT_FAULT_NOT_TLB, r4
610 movi CONFIG_CACHED_MEMORY_OFFSET, r6
615 rte ! -> handle_exception, switch back to priv mode again
617 LRESVEC_block_end: /* Marker. Unused. */
622 * Second level handler for VBR-based exceptions. Pre-handler.
623 * In common to all stack-frame sensitive handlers.
626 * (KCR0) Current [current task union]
629 * (r3) appropriate return address
630 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
631 * (r5) Pointer to reg_save_area
634 * Available registers:
641 /* Common 2nd level handler. */
643 /* First thing we need an appropriate stack pointer */
648 bne r6, ZERO, tr0 /* Original stack pointer is fine */
650 /* Set stack pointer for user fault */
652 movi THREAD_SIZE, r6 /* Point to the end */
657 /* DEBUG : check for underflow/overflow of the kernel stack */
658 pta no_underflow, tr0
662 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
664 /* Just panic to cause a crash. */
672 movi THREAD_SIZE, r18
674 bgt SP, r6, tr0 ! sp above the stack
676 /* Make some room for the BASIC frame. */
677 movi -(FRAME_SIZE), r6
680 /* Could do this with no stalling if we had another spare register, but the
681 code below will be OK. */
682 ld.q r5, SAVED_R2, r6
683 ld.q r5, SAVED_R3, r18
684 st.q SP, FRAME_R(2), r6
685 ld.q r5, SAVED_R4, r6
686 st.q SP, FRAME_R(3), r18
687 ld.q r5, SAVED_R5, r18
688 st.q SP, FRAME_R(4), r6
689 ld.q r5, SAVED_R6, r6
690 st.q SP, FRAME_R(5), r18
691 ld.q r5, SAVED_R18, r18
692 st.q SP, FRAME_R(6), r6
693 ld.q r5, SAVED_TR0, r6
694 st.q SP, FRAME_R(18), r18
695 st.q SP, FRAME_T(0), r6
697 /* Keep old SP around */
700 /* Save the rest of the general purpose registers */
701 st.q SP, FRAME_R(0), r0
702 st.q SP, FRAME_R(1), r1
703 st.q SP, FRAME_R(7), r7
704 st.q SP, FRAME_R(8), r8
705 st.q SP, FRAME_R(9), r9
706 st.q SP, FRAME_R(10), r10
707 st.q SP, FRAME_R(11), r11
708 st.q SP, FRAME_R(12), r12
709 st.q SP, FRAME_R(13), r13
710 st.q SP, FRAME_R(14), r14
712 /* SP is somewhere else */
713 st.q SP, FRAME_R(15), r6
715 st.q SP, FRAME_R(16), r16
716 st.q SP, FRAME_R(17), r17
717 /* r18 is saved earlier. */
718 st.q SP, FRAME_R(19), r19
719 st.q SP, FRAME_R(20), r20
720 st.q SP, FRAME_R(21), r21
721 st.q SP, FRAME_R(22), r22
722 st.q SP, FRAME_R(23), r23
723 st.q SP, FRAME_R(24), r24
724 st.q SP, FRAME_R(25), r25
725 st.q SP, FRAME_R(26), r26
726 st.q SP, FRAME_R(27), r27
727 st.q SP, FRAME_R(28), r28
728 st.q SP, FRAME_R(29), r29
729 st.q SP, FRAME_R(30), r30
730 st.q SP, FRAME_R(31), r31
731 st.q SP, FRAME_R(32), r32
732 st.q SP, FRAME_R(33), r33
733 st.q SP, FRAME_R(34), r34
734 st.q SP, FRAME_R(35), r35
735 st.q SP, FRAME_R(36), r36
736 st.q SP, FRAME_R(37), r37
737 st.q SP, FRAME_R(38), r38
738 st.q SP, FRAME_R(39), r39
739 st.q SP, FRAME_R(40), r40
740 st.q SP, FRAME_R(41), r41
741 st.q SP, FRAME_R(42), r42
742 st.q SP, FRAME_R(43), r43
743 st.q SP, FRAME_R(44), r44
744 st.q SP, FRAME_R(45), r45
745 st.q SP, FRAME_R(46), r46
746 st.q SP, FRAME_R(47), r47
747 st.q SP, FRAME_R(48), r48
748 st.q SP, FRAME_R(49), r49
749 st.q SP, FRAME_R(50), r50
750 st.q SP, FRAME_R(51), r51
751 st.q SP, FRAME_R(52), r52
752 st.q SP, FRAME_R(53), r53
753 st.q SP, FRAME_R(54), r54
754 st.q SP, FRAME_R(55), r55
755 st.q SP, FRAME_R(56), r56
756 st.q SP, FRAME_R(57), r57
757 st.q SP, FRAME_R(58), r58
758 st.q SP, FRAME_R(59), r59
759 st.q SP, FRAME_R(60), r60
760 st.q SP, FRAME_R(61), r61
761 st.q SP, FRAME_R(62), r62
764 * Save the S* registers.
767 st.q SP, FRAME_S(FSSR), r61
769 st.q SP, FRAME_S(FSPC), r62
770 movi -1, r62 /* Reset syscall_nr */
771 st.q SP, FRAME_S(FSYSCALL_ID), r62
773 /* Save the rest of the target registers */
775 st.q SP, FRAME_T(1), r6
777 st.q SP, FRAME_T(2), r6
779 st.q SP, FRAME_T(3), r6
781 st.q SP, FRAME_T(4), r6
783 st.q SP, FRAME_T(5), r6
785 st.q SP, FRAME_T(6), r6
787 st.q SP, FRAME_T(7), r6
789 ! setup FP so that unwinder can wind back through nested kernel mode
793 #ifdef CONFIG_POOR_MANS_STRACE
794 /* We've pushed all the registers now, so only r2-r4 hold anything
795 * useful. Move them into callee save registers */
800 /* Preserve r2 as the event code */
814 /* For syscall and debug race condition, get TRA now */
817 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
818 * Also set FD, to catch FPU usage in the kernel.
820 * benedict.gaster@superh.com 29/07/2002
822 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
823 * same time change BL from 1->0, as any pending interrupt of a level
824 * higher than he previous value of IMASK will leak through and be
825 * taken unexpectedly.
827 * To avoid this we raise the IMASK and then issue another PUTCON to
831 movi SR_IMASK | SR_FD, r7
834 movi SR_UNBLOCK_EXC, r7
839 /* Now call the appropriate 3rd level handler */
850 * Second level handler for VBR-based exceptions. Post-handlers.
852 * Post-handlers for interrupts (ret_from_irq), exceptions
853 * (ret_from_exception) and common reentrance doors (restore_all
854 * to get back to the original context, ret_from_syscall loop to
855 * check kernel exiting).
857 * ret_with_reschedule and work_notifysig are an inner lables of
858 * the ret_from_syscall loop.
860 * In common to all stack-frame sensitive handlers.
863 * (SP) struct pt_regs *, original register's frame pointer (basic)
868 #ifdef CONFIG_POOR_MANS_STRACE
869 pta evt_debug_ret_from_irq, tr0
873 ld.q SP, FRAME_S(FSSR), r6
876 pta resume_kernel, tr0
877 bne r6, ZERO, tr0 /* no further checks */
879 pta ret_with_reschedule, tr0
880 blink tr0, ZERO /* Do not check softirqs */
882 .global ret_from_exception
886 #ifdef CONFIG_POOR_MANS_STRACE
887 pta evt_debug_ret_from_exc, tr0
892 ld.q SP, FRAME_S(FSSR), r6
895 pta resume_kernel, tr0
896 bne r6, ZERO, tr0 /* no further checks */
900 #ifdef CONFIG_PREEMPT
901 pta ret_from_syscall, tr0
908 ld.l r6, TI_PRE_COUNT, r7
912 ld.l r6, TI_FLAGS, r7
913 movi (1 << TIF_NEED_RESCHED), r8
921 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
922 shori (PREEMPT_ACTIVE & 65535), r8
923 st.l r6, TI_PRE_COUNT, r8
931 st.l r6, TI_PRE_COUNT, ZERO
934 pta need_resched, tr1
938 .global ret_from_syscall
942 getcon KCR0, r6 ! r6 contains current_thread_info
943 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
946 ! no handling of TIF_SYSCALL_TRACE yet!!
948 movi _TIF_NEED_RESCHED, r8
950 pta work_resched, tr0
955 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
957 pta work_notifysig, tr0
963 pta ret_from_syscall, tr0
967 blink tr0, ZERO /* Call schedule(), return on top */
976 blink tr0, LINK /* Call do_signal(regs, 0), return here */
981 ld.q SP, FRAME_T(0), r6
982 ld.q SP, FRAME_T(1), r7
983 ld.q SP, FRAME_T(2), r8
984 ld.q SP, FRAME_T(3), r9
989 ld.q SP, FRAME_T(4), r6
990 ld.q SP, FRAME_T(5), r7
991 ld.q SP, FRAME_T(6), r8
992 ld.q SP, FRAME_T(7), r9
998 ld.q SP, FRAME_R(0), r0
999 ld.q SP, FRAME_R(1), r1
1000 ld.q SP, FRAME_R(2), r2
1001 ld.q SP, FRAME_R(3), r3
1002 ld.q SP, FRAME_R(4), r4
1003 ld.q SP, FRAME_R(5), r5
1004 ld.q SP, FRAME_R(6), r6
1005 ld.q SP, FRAME_R(7), r7
1006 ld.q SP, FRAME_R(8), r8
1007 ld.q SP, FRAME_R(9), r9
1008 ld.q SP, FRAME_R(10), r10
1009 ld.q SP, FRAME_R(11), r11
1010 ld.q SP, FRAME_R(12), r12
1011 ld.q SP, FRAME_R(13), r13
1012 ld.q SP, FRAME_R(14), r14
1014 ld.q SP, FRAME_R(16), r16
1015 ld.q SP, FRAME_R(17), r17
1016 ld.q SP, FRAME_R(18), r18
1017 ld.q SP, FRAME_R(19), r19
1018 ld.q SP, FRAME_R(20), r20
1019 ld.q SP, FRAME_R(21), r21
1020 ld.q SP, FRAME_R(22), r22
1021 ld.q SP, FRAME_R(23), r23
1022 ld.q SP, FRAME_R(24), r24
1023 ld.q SP, FRAME_R(25), r25
1024 ld.q SP, FRAME_R(26), r26
1025 ld.q SP, FRAME_R(27), r27
1026 ld.q SP, FRAME_R(28), r28
1027 ld.q SP, FRAME_R(29), r29
1028 ld.q SP, FRAME_R(30), r30
1029 ld.q SP, FRAME_R(31), r31
1030 ld.q SP, FRAME_R(32), r32
1031 ld.q SP, FRAME_R(33), r33
1032 ld.q SP, FRAME_R(34), r34
1033 ld.q SP, FRAME_R(35), r35
1034 ld.q SP, FRAME_R(36), r36
1035 ld.q SP, FRAME_R(37), r37
1036 ld.q SP, FRAME_R(38), r38
1037 ld.q SP, FRAME_R(39), r39
1038 ld.q SP, FRAME_R(40), r40
1039 ld.q SP, FRAME_R(41), r41
1040 ld.q SP, FRAME_R(42), r42
1041 ld.q SP, FRAME_R(43), r43
1042 ld.q SP, FRAME_R(44), r44
1043 ld.q SP, FRAME_R(45), r45
1044 ld.q SP, FRAME_R(46), r46
1045 ld.q SP, FRAME_R(47), r47
1046 ld.q SP, FRAME_R(48), r48
1047 ld.q SP, FRAME_R(49), r49
1048 ld.q SP, FRAME_R(50), r50
1049 ld.q SP, FRAME_R(51), r51
1050 ld.q SP, FRAME_R(52), r52
1051 ld.q SP, FRAME_R(53), r53
1052 ld.q SP, FRAME_R(54), r54
1053 ld.q SP, FRAME_R(55), r55
1054 ld.q SP, FRAME_R(56), r56
1055 ld.q SP, FRAME_R(57), r57
1056 ld.q SP, FRAME_R(58), r58
1059 movi SR_BLOCK_EXC, r60
1061 putcon r59, SR /* SR.BL = 1, keep nesting out */
1062 ld.q SP, FRAME_S(FSSR), r61
1063 ld.q SP, FRAME_S(FSPC), r62
1064 movi SR_ASID_MASK, r60
1066 andc r61, r60, r61 /* Clear out older ASID */
1067 or r59, r61, r61 /* Retain current ASID */
1071 /* Ignore FSYSCALL_ID */
1073 ld.q SP, FRAME_R(59), r59
1074 ld.q SP, FRAME_R(60), r60
1075 ld.q SP, FRAME_R(61), r61
1076 ld.q SP, FRAME_R(62), r62
1079 ld.q SP, FRAME_R(15), SP
1084 * Third level handlers for VBR-based exceptions. Adapting args to
1085 * and/or deflecting to fourth level handlers.
1087 * Fourth level handlers interface.
1088 * Most are C-coded handlers directly pointed by the trap_jtable.
1089 * (Third = Fourth level)
1091 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1092 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1093 * (r3) struct pt_regs *, original register's frame pointer
1094 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1095 * (r5) TRA control register (for syscall/debug benefit only)
1096 * (LINK) return address
1099 * Kernel TLB fault handlers will get a slightly different interface.
1100 * (r2) struct pt_regs *, original register's frame pointer
1101 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1102 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1103 * (r5) Effective Address of fault
1104 * (LINK) return address
1107 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1112 or ZERO, ZERO, r3 /* Read */
1113 or ZERO, ZERO, r4 /* Data */
1115 pta call_do_page_fault, tr0
1120 movi 1, r3 /* Write */
1121 or ZERO, ZERO, r4 /* Data */
1123 pta call_do_page_fault, tr0
1128 beqi/u r4, EVENT_INTERRUPT, tr0
1130 or ZERO, ZERO, r3 /* Read */
1131 movi 1, r4 /* Text */
1136 movi do_page_fault, r6
1142 beqi/l r4, EVENT_INTERRUPT, tr0
1143 #ifdef CONFIG_SH_FPU
1144 movi do_fpu_state_restore, r6
1146 movi do_exception_error, r6
1153 beqi/l r4, EVENT_INTERRUPT, tr0
1154 #ifdef CONFIG_SH_FPU
1155 movi do_fpu_state_restore, r6
1157 movi do_exception_error, r6
1168 * system_call/unknown_trap third level handler:
1171 * (r2) fault/interrupt code, entry number (TRAP = 11)
1172 * (r3) struct pt_regs *, original register's frame pointer
1173 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1174 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1176 * (LINK) return address: ret_from_exception
1177 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1180 * (*r3) Syscall reply (Saved r2)
1181 * (LINK) In case of syscall only it can be scrapped.
1182 * Common second level post handler will be ret_from_syscall.
1183 * Common (non-trace) exit point to that is syscall_ret (saving
1184 * result to r2). Common bad exit point is syscall_bad (returning
1185 * ENOSYS then saved to r2).
1190 /* Unknown Trap or User Trace */
1191 movi do_unknown_trapa, r6
1193 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1194 andi r2, 0x1ff, r2 /* r2 = syscall # */
1197 pta syscall_ret, tr0
1200 /* New syscall implementation*/
1202 pta unknown_trap, tr0
1203 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1205 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1207 /* It's a system call */
1208 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1209 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1213 pta syscall_allowed, tr0
1214 movi NR_syscalls - 1, r4 /* Last valid */
1218 /* Return ENOSYS ! */
1219 movi -(ENOSYS), r2 /* Fall-through */
1223 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1225 #ifdef CONFIG_POOR_MANS_STRACE
1226 /* nothing useful in registers at this point */
1231 ld.q SP, FRAME_R(9), r2
1236 ld.q SP, FRAME_S(FSPC), r2
1237 addi r2, 4, r2 /* Move PC, being pre-execution event */
1238 st.q SP, FRAME_S(FSPC), r2
1239 pta ret_from_syscall, tr0
1243 /* A different return path for ret_from_fork, because we now need
1244 * to call schedule_tail with the later kernels. Because prev is
1245 * loaded into r2 by switch_to() means we can just call it straight away
1248 .global ret_from_fork
1251 movi schedule_tail,r5
1256 #ifdef CONFIG_POOR_MANS_STRACE
1257 /* nothing useful in registers at this point */
1262 ld.q SP, FRAME_R(9), r2
1267 ld.q SP, FRAME_S(FSPC), r2
1268 addi r2, 4, r2 /* Move PC, being pre-execution event */
1269 st.q SP, FRAME_S(FSPC), r2
1270 pta ret_from_syscall, tr0
1276 /* Use LINK to deflect the exit point, default is syscall_ret */
1277 pta syscall_ret, tr0
1279 pta syscall_notrace, tr0
1282 ld.l r2, TI_FLAGS, r4
1283 movi (1 << TIF_SYSCALL_TRACE), r6
1287 /* Trace it by calling syscall_trace before and after */
1288 movi syscall_trace, r4
1291 /* Reload syscall number as r5 is trashed by syscall_trace */
1292 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1295 pta syscall_ret_trace, tr0
1299 /* Now point to the appropriate 4th level syscall handler */
1300 movi sys_call_table, r4
1305 /* Prepare original args */
1306 ld.q SP, FRAME_R(2), r2
1307 ld.q SP, FRAME_R(3), r3
1308 ld.q SP, FRAME_R(4), r4
1309 ld.q SP, FRAME_R(5), r5
1310 ld.q SP, FRAME_R(6), r6
1311 ld.q SP, FRAME_R(7), r7
1313 /* And now the trick for those syscalls requiring regs * ! */
1317 blink tr0, ZERO /* LINK is already properly set */
1320 /* We get back here only if under trace */
1321 st.q SP, FRAME_R(9), r2 /* Save return value */
1323 movi syscall_trace, LINK
1327 /* This needs to be done after any syscall tracing */
1328 ld.q SP, FRAME_S(FSPC), r2
1329 addi r2, 4, r2 /* Move PC, being pre-execution event */
1330 st.q SP, FRAME_S(FSPC), r2
1332 pta ret_from_syscall, tr0
1333 blink tr0, ZERO /* Resume normal return sequence */
1336 * --- Switch to running under a particular ASID and return the previous ASID value
1337 * --- The caller is assumed to have done a cli before calling this.
1339 * Input r2 : new ASID
1340 * Output r2 : old ASID
1343 .global switch_and_save_asid
1344 switch_and_save_asid:
1347 shlli r4, 16, r4 /* r4 = mask to select ASID */
1348 and r0, r4, r3 /* r3 = shifted old ASID */
1349 andi r2, 255, r2 /* mask down new ASID */
1350 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1351 andc r0, r4, r0 /* efface old ASID from SR */
1352 or r0, r2, r0 /* insert the new ASID */
1360 shlri r3, 16, r2 /* r2 = old ASID */
1363 .global route_to_panic_handler
1364 route_to_panic_handler:
1365 /* Switch to real mode, goto panic_handler, don't return. Useful for
1366 last-chance debugging, e.g. if no output wants to go to the console.
1369 movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
1381 1: /* Now in real mode */
1385 .global peek_real_address_q
1386 peek_real_address_q:
1388 r2 : real mode address to peek
1389 r2(out) : result quadword
1391 This is provided as a cheapskate way of manipulating device
1392 registers for debugging (to avoid the need to onchip_remap the debug
1393 module, and to avoid the need to onchip_remap the watchpoint
1394 controller in a way that identity maps sufficient bits to avoid the
1395 SH5-101 cut2 silicon defect).
1397 This code is not performance critical
1400 add.l r2, r63, r2 /* sign extend address */
1401 getcon sr, r0 /* r0 = saved original SR */
1404 or r0, r1, r1 /* r0 with block bit set */
1405 putcon r1, sr /* now in critical section */
1408 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1411 movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1412 movi 1f, r37 /* virtual mode return addr */
1419 .peek0: /* come here in real mode, don't touch caches!!
1420 still in critical section (sr.bl==1) */
1423 /* Here's the actual peek. If the address is bad, all bets are now off
1424 * what will happen (handlers invoked in real-mode = bad news) */
1427 rte /* Back to virtual mode */
1434 .global poke_real_address_q
1435 poke_real_address_q:
1437 r2 : real mode address to poke
1438 r3 : quadword value to write.
1440 This is provided as a cheapskate way of manipulating device
1441 registers for debugging (to avoid the need to onchip_remap the debug
1442 module, and to avoid the need to onchip_remap the watchpoint
1443 controller in a way that identity maps sufficient bits to avoid the
1444 SH5-101 cut2 silicon defect).
1446 This code is not performance critical
1449 add.l r2, r63, r2 /* sign extend address */
1450 getcon sr, r0 /* r0 = saved original SR */
1453 or r0, r1, r1 /* r0 with block bit set */
1454 putcon r1, sr /* now in critical section */
1457 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1460 movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1461 movi 1f, r37 /* virtual mode return addr */
1468 .poke0: /* come here in real mode, don't touch caches!!
1469 still in critical section (sr.bl==1) */
1472 /* Here's the actual poke. If the address is bad, all bets are now off
1473 * what will happen (handlers invoked in real-mode = bad news) */
1476 rte /* Back to virtual mode */
1484 * --- User Access Handling Section
1488 * User Access support. It all moved to non inlined Assembler
1489 * functions in here.
1491 * __kernel_size_t __copy_user(void *__to, const void *__from,
1492 * __kernel_size_t __n)
1495 * (r2) target address
1496 * (r3) source address
1497 * (r4) size in bytes
1501 * (r2) non-copied bytes
1503 * If a fault occurs on the user pointer, bail out early and return the
1504 * number of bytes not copied in r2.
1505 * Strategy : for large blocks, call a real memcpy function which can
1506 * move >1 byte at a time using unaligned ld/st instructions, and can
1507 * manipulate the cache using prefetch + alloco to improve the speed
1508 * further. If a fault occurs in that function, just revert to the
1509 * byte-by-byte approach used for small blocks; this is rare so the
1510 * performance hit for that case does not matter.
1512 * For small blocks it's not worth the overhead of setting up and calling
1513 * the memcpy routine; do the copy a byte at a time.
1518 pta __copy_user_byte_by_byte, tr1
1519 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1521 pta copy_user_memcpy, tr0
1523 /* Save arguments in case we have to fix-up unhandled page fault */
1527 st.q SP, 24, r35 ! r35 is callee-save
1528 /* Save LINK in a register to reduce RTS time later (otherwise
1529 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1533 /* Copy completed normally if we get back here */
1536 /* don't restore r2-r4, pointless */
1537 /* set result=r2 to zero as the copy must have succeeded. */
1540 blink tr0, r63 ! RTS
1542 .global __copy_user_fixup
1544 /* Restore stack frame */
1551 /* Fall through to original code, in the 'same' state we entered with */
1553 /* The slow byte-by-byte method is used if the fast copy traps due to a bad
1554 user address. In that rare case, the speed drop can be tolerated. */
1555 __copy_user_byte_by_byte:
1556 pta ___copy_user_exit, tr1
1557 pta ___copy_user1, tr0
1558 beq/u r4, r63, tr1 /* early exit for zero length copy */
1563 ld.b r3, 0, r5 /* Fault address 1 */
1565 /* Could rewrite this to use just 1 add, but the second comes 'free'
1566 due to load latency */
1568 addi r4, -1, r4 /* No real fixup required */
1570 stx.b r3, r0, r5 /* Fault address 2 */
1579 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1582 * (r2) target address
1583 * (r3) size in bytes
1586 * (*r2) zero-ed target data
1587 * (r2) non-zero-ed bytes
1589 .global __clear_user
1591 pta ___clear_user_exit, tr1
1592 pta ___clear_user1, tr0
1596 st.b r2, 0, ZERO /* Fault address */
1598 addi r3, -1, r3 /* No real fixup required */
1608 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1612 * (r2) target address
1613 * (r3) source address
1614 * (r4) maximum size in bytes
1618 * (r2) -EFAULT (in case of faulting)
1619 * copied data (otherwise)
1621 .global __strncpy_from_user
1622 __strncpy_from_user:
1623 pta ___strncpy_from_user1, tr0
1624 pta ___strncpy_from_user_done, tr1
1625 or r4, ZERO, r5 /* r5 = original count */
1626 beq/u r4, r63, tr1 /* early exit if r4==0 */
1627 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1628 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1630 ___strncpy_from_user1:
1631 ld.b r3, 0, r7 /* Fault address: only in reading */
1636 addi r4, -1, r4 /* return real number of copied bytes */
1639 ___strncpy_from_user_done:
1640 sub r5, r4, r6 /* If done, return copied */
1642 ___strncpy_from_user_exit:
1648 * extern long __strnlen_user(const char *__s, long __n)
1651 * (r2) source address
1652 * (r3) source size in bytes
1655 * (r2) -EFAULT (in case of faulting)
1656 * string length (otherwise)
1658 .global __strnlen_user
1660 pta ___strnlen_user_set_reply, tr0
1661 pta ___strnlen_user1, tr1
1662 or ZERO, ZERO, r5 /* r5 = counter */
1663 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1664 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1668 ldx.b r2, r5, r7 /* Fault address: only in reading */
1669 addi r3, -1, r3 /* No real fixup */
1673 ! The line below used to be active. This meant led to a junk byte lying between each pair
1674 ! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1675 ! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1676 ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1677 ! addi r5, 1, r5 /* Include '\0' */
1679 ___strnlen_user_set_reply:
1680 or r5, ZERO, r6 /* If done, return counter */
1682 ___strnlen_user_exit:
1688 * extern long __get_user_asm_?(void *val, long addr)
1692 * (r3) source address (in User Space)
1695 * (r2) -EFAULT (faulting)
1698 .global __get_user_asm_b
1701 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1704 ld.b r3, 0, r5 /* r5 = data */
1708 ___get_user_asm_b_exit:
1713 .global __get_user_asm_w
1716 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1719 ld.w r3, 0, r5 /* r5 = data */
1723 ___get_user_asm_w_exit:
1728 .global __get_user_asm_l
1731 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1734 ld.l r3, 0, r5 /* r5 = data */
1738 ___get_user_asm_l_exit:
1743 .global __get_user_asm_q
1746 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1749 ld.q r3, 0, r5 /* r5 = data */
1753 ___get_user_asm_q_exit:
1758 * extern long __put_user_asm_?(void *pval, long addr)
1761 * (r2) kernel pointer to value
1762 * (r3) dest address (in User Space)
1765 * (r2) -EFAULT (faulting)
1768 .global __put_user_asm_b
1770 ld.b r2, 0, r4 /* r4 = data */
1771 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1777 ___put_user_asm_b_exit:
1782 .global __put_user_asm_w
1784 ld.w r2, 0, r4 /* r4 = data */
1785 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1791 ___put_user_asm_w_exit:
1796 .global __put_user_asm_l
1798 ld.l r2, 0, r4 /* r4 = data */
1799 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1805 ___put_user_asm_l_exit:
1810 .global __put_user_asm_q
1812 ld.q r2, 0, r4 /* r4 = data */
1813 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1819 ___put_user_asm_q_exit:
1824 /* The idea is : when we get an unhandled panic, we dump the registers
1825 to a known memory location, the just sit in a tight loop.
1826 This allows the human to look at the memory region through the GDB
1827 session (assuming the debug module's SHwy initiator isn't locked up
1828 or anything), to hopefully analyze the cause of the panic. */
1830 /* On entry, former r15 (SP) is in DCR
1831 former r0 is at resvec_saved_area + 0
1832 former r1 is at resvec_saved_area + 8
1833 former tr0 is at resvec_saved_area + 32
1834 DCR is the only register whose value is lost altogether.
1837 movi 0xffffffff80000000, r0 ! phy of dump area
1838 ld.q SP, 0x000, r1 ! former r0
1840 ld.q SP, 0x008, r1 ! former r1
1904 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1906 ld.q SP, 0x020, r1 ! former tr0
1956 /* Prepare to jump to C - physical address */
1957 movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
1971 * --- Signal Handling Section
1975 * extern long long _sa_default_rt_restorer
1976 * extern long long _sa_default_restorer
1980 * extern void _sa_default_rt_restorer(void)
1981 * extern void _sa_default_restorer(void)
1983 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1984 * from user space. Copied into user space by signal management.
1985 * Both must be quad aligned and 2 quad long (4 instructions).
1989 .global sa_default_rt_restorer
1990 sa_default_rt_restorer:
1992 shori __NR_rt_sigreturn, r9
1997 .global sa_default_restorer
1998 sa_default_restorer:
2000 shori __NR_sigreturn, r9
2005 * --- __ex_table Section
2009 * User Access Exception Table.
2011 .section __ex_table, "a"
2013 .global asm_uaccess_start /* Just a marker */
2016 .long ___copy_user1, ___copy_user_exit
2017 .long ___copy_user2, ___copy_user_exit
2018 .long ___clear_user1, ___clear_user_exit
2019 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2020 .long ___strnlen_user1, ___strnlen_user_exit
2021 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2022 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2023 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2024 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2025 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2026 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2027 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2028 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2030 .global asm_uaccess_end /* Just a marker */
2037 * --- .text.init Section
2040 .section .text.init, "ax"
2043 * void trap_init (void)
2048 addi SP, -24, SP /* Room to save r28/r29/r30 */
2053 /* Set VBR and RESVEC */
2054 movi LVBR_block, r19
2055 andi r19, -4, r19 /* reset MMUOFF + reserved */
2056 /* For RESVEC exceptions we force the MMU off, which means we need the
2057 physical address. */
2058 movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
2059 andi r20, -4, r20 /* reset reserved */
2060 ori r20, 1, r20 /* set MMUOFF */
2065 movi LVBR_block_end, r21
2067 movi BLOCK_SIZE, r29 /* r29 = expected size */
2072 * Ugly, but better loop forever now than crash afterwards.
2073 * We should print a message, but if we touch LVBR or
2074 * LRESVEC blocks we should not be surprised if we get stuck
2077 pta trap_init_loop, tr1
2078 gettr tr1, r28 /* r28 = trap_init_loop */
2079 sub r21, r30, r30 /* r30 = actual size */
2082 * VBR/RESVEC handlers overlap by being bigger than
2083 * allowed. Very bad. Just loop forever.
2084 * (r28) panic/loop address
2085 * (r29) expected size
2091 /* Now that exception vectors are set up reset SR.BL */
2093 movi SR_UNBLOCK_EXC, r23