2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
14 #include <linux/errno.h>
17 #include <asm/hardirq.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
28 # define preempt_stop mask_interrupts
31 # define fault_resume_kernel fault_restore_all
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
38 .section .ex.text,"ax",@progbits
45 bral do_bus_error_write
47 bral do_bus_error_read
51 bral handle_address_fault
53 bral handle_protection_fault
57 bral do_illegal_opcode_ll
59 bral do_illegal_opcode_ll
61 bral do_illegal_opcode_ll
65 bral do_illegal_opcode_ll
67 bral handle_address_fault
69 bral handle_address_fault
71 bral handle_protection_fault
73 bral handle_protection_fault
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
83 #define tlbmiss_save pushm r0-r3
84 #define tlbmiss_restore popm r0-r3
86 .section .tlbx.ex.text,"ax",@progbits
92 .section .tlbr.ex.text,"ax",@progbits
97 .section .tlbw.ex.text,"ax",@progbits
101 .global tlb_miss_common
104 mfsr r1, SYSREG_TLBEAR
106 /* Is it the vmalloc space? */
108 brcs handle_vmalloc_miss
110 /* First level lookup */
112 lsr r2, r1, PGDIR_SHIFT
114 bld r0, _PAGE_BIT_PRESENT
115 brcc page_table_not_present
117 /* TODO: Check access rights on page table if necessary */
119 /* Translate to virtual address in P1. */
123 /* Second level lookup */
124 lsl r1, (32 - PGDIR_SHIFT)
125 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
128 bld r1, _PAGE_BIT_PRESENT
129 brcc page_not_present
131 /* Mark the page as accessed */
132 sbr r1, _PAGE_BIT_ACCESSED
135 /* Drop software flags */
136 andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
137 mtsr SYSREG_TLBELO, r1
139 /* Figure out which entry we want to replace */
140 mfsr r0, SYSREG_TLBARLO
143 mov r1, -1 /* All entries have been accessed, */
144 mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
145 mov r2, 0 /* and start at 0 */
146 1: mfsr r1, SYSREG_MMUCR
150 mtsr SYSREG_MMUCR, r1
158 /* Simply do the lookup in init's page table */
159 mov r0, lo(swapper_pg_dir)
160 orh r0, hi(swapper_pg_dir)
164 /* --- System Call --- */
166 .section .scall.text,"ax",@progbits
168 pushm r12 /* r12_orig */
171 mfsr r0, SYSREG_RAR_SUP
172 mfsr r1, SYSREG_RSR_SUP
175 /* check for syscall tracing */
177 ld.w r1, r0[TI_flags]
178 bld r1, TIF_SYSCALL_TRACE
179 brcs syscall_trace_enter
185 lddpc lr, syscall_table_addr
187 mov r8, r5 /* 5th argument (6th is pushed by stub) */
190 .global syscall_return
193 mask_interrupts /* make sure we don't miss an interrupt
194 setting need_resched or sigpending
195 between sampling and the rets */
197 /* Store the return value so that the correct value is loaded below */
198 stdsp sp[REG_R12], r12
200 ld.w r1, r0[TI_flags]
201 andl r1, _TIF_ALLWORK_MASK, COH
202 brne syscall_exit_work
206 mtsr SYSREG_RAR_SUP, r8
207 mtsr SYSREG_RSR_SUP, r9
209 sub sp, -4 /* r12_orig */
220 .global ret_from_fork
224 /* check for syscall tracing */
226 ld.w r1, r0[TI_flags]
227 andl r1, _TIF_ALLWORK_MASK, COH
228 brne syscall_exit_work
229 rjmp syscall_exit_cont
235 rjmp syscall_trace_cont
238 bld r1, TIF_SYSCALL_TRACE
243 ld.w r1, r0[TI_flags]
245 1: bld r1, TIF_NEED_RESCHED
250 ld.w r1, r0[TI_flags]
253 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
259 rcall do_notify_resume
261 ld.w r1, r0[TI_flags]
264 3: bld r1, TIF_BREAKPOINT
265 brcc syscall_exit_cont
266 mfsr r3, SYSREG_TLBEHI
272 mtdr DBGREG_BWA2A, r2
273 mtdr DBGREG_BWC2A, r3
274 rjmp syscall_exit_cont
277 /* The slow path of the TLB miss handler */
278 page_table_not_present:
283 rcall save_full_context_ex
287 rjmp ret_from_exception
289 /* This function expects to find offending PC in SYSREG_RAR_EX */
290 save_full_context_ex:
291 mfsr r8, SYSREG_RSR_EX
293 andh r8, (MODE_MASK >> 16), COH
294 mfsr r11, SYSREG_RAR_EX
297 1: pushm r11, r12 /* PC and SR */
301 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
302 stdsp sp[4], r10 /* replace saved SP */
305 /* Low-level exception handlers */
309 rcall save_full_context_ex
312 rcall do_critical_exception
314 /* We should never get here... */
316 sub r12, pc, (. - 1f)
319 1: .asciz "Return from critical exception!"
325 rcall save_full_context_ex
332 rcall save_full_context_ex
334 1: mfsr r12, SYSREG_BEAR
337 rjmp ret_from_exception
343 /* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */
344 rcall save_full_context_ex
350 handle_address_fault:
353 rcall save_full_context_ex
356 rcall do_address_exception
357 rjmp ret_from_exception
359 handle_protection_fault:
362 rcall save_full_context_ex
366 rjmp ret_from_exception
369 do_illegal_opcode_ll:
372 rcall save_full_context_ex
375 rcall do_illegal_opcode
376 rjmp ret_from_exception
380 mfsr r1, SYSREG_TLBEAR
382 lsr r2, r1, PGDIR_SHIFT
384 lsl r1, (32 - PGDIR_SHIFT)
385 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
387 /* Translate to virtual address in P1 */
392 sbr r3, _PAGE_BIT_DIRTY
396 /* The page table is up-to-date. Update the TLB entry as well */
397 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
398 mtsr SYSREG_TLBELO, r0
400 /* MMUCR[DRP] is updated automatically, so let's go... */
409 rcall save_full_context_ex
414 rjmp ret_from_exception
419 andh r4, (MODE_MASK >> 16), COH
420 brne fault_resume_kernel
423 ld.w r1, r0[TI_flags]
424 andl r1, _TIF_WORK_MASK, COH
430 mtsr SYSREG_RAR_EX, r8
431 mtsr SYSREG_RSR_EX, r9
437 #ifdef CONFIG_PREEMPT
439 ld.w r2, r0[TI_preempt_count]
442 ld.w r1, r0[TI_flags]
443 bld r1, TIF_NEED_RESCHED
446 bld r4, SYSREG_GM_OFFSET
448 rcall preempt_schedule_irq
455 mtsr SYSREG_RAR_EX, r8
456 mtsr SYSREG_RSR_EX, r9
458 sub sp, -4 /* ignore SP */
460 sub sp, -4 /* ignore r12_orig */
464 /* Switch to exception mode so that we can share the same code. */
466 cbr r8, SYSREG_M0_OFFSET
467 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
471 ld.w r1, r0[TI_flags]
474 bld r1, TIF_NEED_RESCHED
479 ld.w r1, r0[TI_flags]
482 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
488 rcall do_notify_resume
490 ld.w r1, r0[TI_flags]
493 2: bld r1, TIF_BREAKPOINT
494 brcc fault_resume_user
495 mfsr r3, SYSREG_TLBEHI
501 mtdr DBGREG_BWA2A, r2
502 mtdr DBGREG_BWC2A, r3
503 rjmp fault_resume_user
505 /* If we get a debug trap from privileged context we end up here */
507 /* Fix up LR and SP in regs. r11 contains the mode we came from */
510 andh r8, hi(~MODE_MASK)
517 sub r10, sp, -FRAME_SIZE_FULL
518 stdsp sp[REG_SP], r10
522 /* Now, put everything back */
525 mtsr SYSREG_RAR_DBG, r10
526 mtsr SYSREG_RSR_DBG, r11
529 andh r8, hi(~MODE_MASK)
530 andh r11, hi(MODE_MASK)
537 sub sp, -4 /* skip SP */
543 * At this point, everything is masked, that is, interrupts,
544 * exceptions and debugging traps. We might get called from
545 * interrupt or exception context in some rare cases, but this
546 * will be taken care of by do_debug(), so we're not going to
547 * do a 100% correct context save here.
550 sub sp, 4 /* r12_orig */
552 mfsr r10, SYSREG_RAR_DBG
553 mfsr r11, SYSREG_RSR_DBG
556 andh r11, (MODE_MASK >> 16), COH
557 brne handle_debug_priv
562 lddsp r10, sp[REG_SR]
563 andh r10, (MODE_MASK >> 16), COH
564 breq debug_resume_user
569 mtsr SYSREG_RSR_DBG, r11
570 mtsr SYSREG_RAR_DBG, r10
579 ld.w r1, r0[TI_flags]
580 andl r1, _TIF_DBGWORK_MASK, COH
581 breq debug_restore_all
583 1: bld r1, TIF_NEED_RESCHED
588 ld.w r1, r0[TI_flags]
591 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
597 rcall do_notify_resume
599 ld.w r1, r0[TI_flags]
602 3: bld r1, TIF_SINGLE_STEP
603 brcc debug_restore_all
607 rjmp debug_restore_all
609 .set rsr_int0, SYSREG_RSR_INT0
610 .set rsr_int1, SYSREG_RSR_INT1
611 .set rsr_int2, SYSREG_RSR_INT2
612 .set rsr_int3, SYSREG_RSR_INT3
613 .set rar_int0, SYSREG_RAR_INT0
614 .set rar_int1, SYSREG_RAR_INT1
615 .set rar_int2, SYSREG_RAR_INT2
616 .set rar_int3, SYSREG_RAR_INT3
618 .macro IRQ_LEVEL level
619 .type irq_level\level, @function
621 sub sp, 4 /* r12_orig */
623 mfsr r8, rar_int\level
624 mfsr r9, rsr_int\level
633 andh r4, (MODE_MASK >> 16), COH
634 #ifdef CONFIG_PREEMPT
641 ld.w r1, r0[TI_flags]
642 andl r1, _TIF_WORK_MASK, COH
646 mtsr rar_int\level, r8
647 mtsr rsr_int\level, r9
649 sub sp, -4 /* ignore r12_orig */
652 #ifdef CONFIG_PREEMPT
655 ld.w r2, r0[TI_preempt_count]
658 ld.w r1, r0[TI_flags]
659 bld r1, TIF_NEED_RESCHED
662 bld r4, SYSREG_GM_OFFSET
664 rcall preempt_schedule_irq
669 .section .irq.text,"ax",@progbits