2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
14 #include <linux/errno.h>
17 #include <asm/hardirq.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
28 # define preempt_stop mask_interrupts
31 # define fault_resume_kernel fault_restore_all
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
38 .section .ex.text,"ax",@progbits
45 bral do_bus_error_write
47 bral do_bus_error_read
51 bral handle_address_fault
53 bral handle_protection_fault
57 bral do_illegal_opcode_ll
59 bral do_illegal_opcode_ll
61 bral do_illegal_opcode_ll
65 bral do_illegal_opcode_ll
67 bral handle_address_fault
69 bral handle_address_fault
71 bral handle_protection_fault
73 bral handle_protection_fault
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
83 #define tlbmiss_save pushm r0-r3
84 #define tlbmiss_restore popm r0-r3
86 .section .tlbx.ex.text,"ax",@progbits
92 .section .tlbr.ex.text,"ax",@progbits
97 .section .tlbw.ex.text,"ax",@progbits
101 .global tlb_miss_common
103 mfsr r0, SYSREG_TLBEAR
106 /* Is it the vmalloc space? */
108 brcs handle_vmalloc_miss
110 /* First level lookup */
112 lsr r2, r0, PGDIR_SHIFT
114 bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
115 bld r3, _PAGE_BIT_PRESENT
116 brcc page_table_not_present
118 /* Translate to virtual address in P1. */
122 /* Second level lookup */
124 mfsr r0, SYSREG_TLBARLO
125 bld r2, _PAGE_BIT_PRESENT
126 brcc page_not_present
128 /* Mark the page as accessed */
129 sbr r2, _PAGE_BIT_ACCESSED
132 /* Drop software flags */
133 andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
134 mtsr SYSREG_TLBELO, r2
136 /* Figure out which entry we want to replace */
137 mfsr r1, SYSREG_MMUCR
140 mov r3, -1 /* All entries have been accessed, */
141 mov r2, 0 /* so start at 0 */
142 mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
144 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
145 mtsr SYSREG_MMUCR, r1
152 /* Simply do the lookup in init's page table */
153 mov r1, lo(swapper_pg_dir)
154 orh r1, hi(swapper_pg_dir)
158 /* --- System Call --- */
160 .section .scall.text,"ax",@progbits
162 pushm r12 /* r12_orig */
165 mfsr r0, SYSREG_RAR_SUP
166 mfsr r1, SYSREG_RSR_SUP
169 /* check for syscall tracing */
171 ld.w r1, r0[TI_flags]
172 bld r1, TIF_SYSCALL_TRACE
173 brcs syscall_trace_enter
179 lddpc lr, syscall_table_addr
181 mov r8, r5 /* 5th argument (6th is pushed by stub) */
184 .global syscall_return
187 mask_interrupts /* make sure we don't miss an interrupt
188 setting need_resched or sigpending
189 between sampling and the rets */
191 /* Store the return value so that the correct value is loaded below */
192 stdsp sp[REG_R12], r12
194 ld.w r1, r0[TI_flags]
195 andl r1, _TIF_ALLWORK_MASK, COH
196 brne syscall_exit_work
200 mtsr SYSREG_RAR_SUP, r8
201 mtsr SYSREG_RSR_SUP, r9
203 sub sp, -4 /* r12_orig */
214 .global ret_from_fork
218 /* check for syscall tracing */
220 ld.w r1, r0[TI_flags]
221 andl r1, _TIF_ALLWORK_MASK, COH
222 brne syscall_exit_work
223 rjmp syscall_exit_cont
229 rjmp syscall_trace_cont
232 bld r1, TIF_SYSCALL_TRACE
237 ld.w r1, r0[TI_flags]
239 1: bld r1, TIF_NEED_RESCHED
244 ld.w r1, r0[TI_flags]
247 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
253 rcall do_notify_resume
255 ld.w r1, r0[TI_flags]
258 3: bld r1, TIF_BREAKPOINT
259 brcc syscall_exit_cont
260 mfsr r3, SYSREG_TLBEHI
266 mtdr DBGREG_BWA2A, r2
267 mtdr DBGREG_BWC2A, r3
268 rjmp syscall_exit_cont
271 /* The slow path of the TLB miss handler */
272 page_table_not_present:
277 rcall save_full_context_ex
281 rjmp ret_from_exception
283 /* This function expects to find offending PC in SYSREG_RAR_EX */
284 save_full_context_ex:
285 mfsr r8, SYSREG_RSR_EX
287 andh r8, (MODE_MASK >> 16), COH
288 mfsr r11, SYSREG_RAR_EX
291 1: pushm r11, r12 /* PC and SR */
295 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
296 stdsp sp[4], r10 /* replace saved SP */
299 /* Low-level exception handlers */
303 rcall save_full_context_ex
306 rcall do_critical_exception
308 /* We should never get here... */
310 sub r12, pc, (. - 1f)
313 1: .asciz "Return from critical exception!"
319 rcall save_full_context_ex
326 rcall save_full_context_ex
328 1: mfsr r12, SYSREG_BEAR
331 rjmp ret_from_exception
337 mfsr r9, SYSREG_RSR_NMI
338 mfsr r8, SYSREG_RAR_NMI
339 bfextu r0, r9, MODE_SHIFT, 3
342 1: pushm r8, r9 /* PC and SR */
347 mtsr SYSREG_RAR_NMI, r8
349 mtsr SYSREG_RSR_NMI, r9
353 sub sp, -4 /* skip r12_orig */
356 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
357 stdsp sp[4], r10 /* replace saved SP */
361 sub sp, -4 /* skip sp */
363 sub sp, -4 /* skip r12_orig */
366 handle_address_fault:
369 rcall save_full_context_ex
372 rcall do_address_exception
373 rjmp ret_from_exception
375 handle_protection_fault:
378 rcall save_full_context_ex
382 rjmp ret_from_exception
385 do_illegal_opcode_ll:
388 rcall save_full_context_ex
391 rcall do_illegal_opcode
392 rjmp ret_from_exception
396 mfsr r1, SYSREG_TLBEAR
398 lsr r2, r1, PGDIR_SHIFT
400 lsl r1, (32 - PGDIR_SHIFT)
401 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
403 /* Translate to virtual address in P1 */
408 sbr r3, _PAGE_BIT_DIRTY
412 /* The page table is up-to-date. Update the TLB entry as well */
413 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
414 mtsr SYSREG_TLBELO, r0
416 /* MMUCR[DRP] is updated automatically, so let's go... */
425 rcall save_full_context_ex
430 rjmp ret_from_exception
435 andh r4, (MODE_MASK >> 16), COH
436 brne fault_resume_kernel
439 ld.w r1, r0[TI_flags]
440 andl r1, _TIF_WORK_MASK, COH
446 mtsr SYSREG_RAR_EX, r8
447 mtsr SYSREG_RSR_EX, r9
453 #ifdef CONFIG_PREEMPT
455 ld.w r2, r0[TI_preempt_count]
458 ld.w r1, r0[TI_flags]
459 bld r1, TIF_NEED_RESCHED
462 bld r4, SYSREG_GM_OFFSET
464 rcall preempt_schedule_irq
471 mtsr SYSREG_RAR_EX, r8
472 mtsr SYSREG_RSR_EX, r9
474 sub sp, -4 /* ignore SP */
476 sub sp, -4 /* ignore r12_orig */
480 /* Switch to exception mode so that we can share the same code. */
482 cbr r8, SYSREG_M0_OFFSET
483 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
487 ld.w r1, r0[TI_flags]
490 bld r1, TIF_NEED_RESCHED
495 ld.w r1, r0[TI_flags]
498 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
504 rcall do_notify_resume
506 ld.w r1, r0[TI_flags]
509 2: bld r1, TIF_BREAKPOINT
510 brcc fault_resume_user
511 mfsr r3, SYSREG_TLBEHI
517 mtdr DBGREG_BWA2A, r2
518 mtdr DBGREG_BWC2A, r3
519 rjmp fault_resume_user
521 /* If we get a debug trap from privileged context we end up here */
523 /* Fix up LR and SP in regs. r11 contains the mode we came from */
526 andh r8, hi(~MODE_MASK)
533 sub r10, sp, -FRAME_SIZE_FULL
534 stdsp sp[REG_SP], r10
538 /* Now, put everything back */
541 mtsr SYSREG_RAR_DBG, r10
542 mtsr SYSREG_RSR_DBG, r11
545 andh r8, hi(~MODE_MASK)
546 andh r11, hi(MODE_MASK)
553 sub sp, -4 /* skip SP */
559 * At this point, everything is masked, that is, interrupts,
560 * exceptions and debugging traps. We might get called from
561 * interrupt or exception context in some rare cases, but this
562 * will be taken care of by do_debug(), so we're not going to
563 * do a 100% correct context save here.
566 sub sp, 4 /* r12_orig */
568 mfsr r10, SYSREG_RAR_DBG
569 mfsr r11, SYSREG_RSR_DBG
572 andh r11, (MODE_MASK >> 16), COH
573 brne handle_debug_priv
578 lddsp r10, sp[REG_SR]
579 andh r10, (MODE_MASK >> 16), COH
580 breq debug_resume_user
585 mtsr SYSREG_RSR_DBG, r11
586 mtsr SYSREG_RAR_DBG, r10
595 ld.w r1, r0[TI_flags]
596 andl r1, _TIF_DBGWORK_MASK, COH
597 breq debug_restore_all
599 1: bld r1, TIF_NEED_RESCHED
604 ld.w r1, r0[TI_flags]
607 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
613 rcall do_notify_resume
615 ld.w r1, r0[TI_flags]
618 3: bld r1, TIF_SINGLE_STEP
619 brcc debug_restore_all
623 rjmp debug_restore_all
625 .set rsr_int0, SYSREG_RSR_INT0
626 .set rsr_int1, SYSREG_RSR_INT1
627 .set rsr_int2, SYSREG_RSR_INT2
628 .set rsr_int3, SYSREG_RSR_INT3
629 .set rar_int0, SYSREG_RAR_INT0
630 .set rar_int1, SYSREG_RAR_INT1
631 .set rar_int2, SYSREG_RAR_INT2
632 .set rar_int3, SYSREG_RAR_INT3
634 .macro IRQ_LEVEL level
635 .type irq_level\level, @function
637 sub sp, 4 /* r12_orig */
639 mfsr r8, rar_int\level
640 mfsr r9, rsr_int\level
649 bfextu r4, r4, SYSREG_M0_OFFSET, 3
650 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
652 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
653 #ifdef CONFIG_PREEMPT
660 ld.w r1, r0[TI_flags]
661 andl r1, _TIF_WORK_MASK, COH
665 mtsr rar_int\level, r8
666 mtsr rsr_int\level, r9
668 sub sp, -4 /* ignore r12_orig */
671 2: get_thread_info r0
672 ld.w r1, r0[TI_flags]
673 bld r1, TIF_CPU_GOING_TO_SLEEP
674 #ifdef CONFIG_PREEMPT
679 sub r1, pc, . - cpu_idle_skip_sleep
681 #ifdef CONFIG_PREEMPT
682 3: get_thread_info r0
683 ld.w r2, r0[TI_preempt_count]
686 ld.w r1, r0[TI_flags]
687 bld r1, TIF_NEED_RESCHED
690 bld r4, SYSREG_GM_OFFSET
692 rcall preempt_schedule_irq
697 .section .irq.text,"ax",@progbits
699 .global cpu_idle_sleep
703 ld.w r9, r8[TI_flags]
704 bld r9, TIF_NEED_RESCHED
705 brcs cpu_idle_enable_int_and_exit
706 sbr r9, TIF_CPU_GOING_TO_SLEEP
707 st.w r8[TI_flags], r9
712 ld.w r9, r8[TI_flags]
713 cbr r9, TIF_CPU_GOING_TO_SLEEP
714 st.w r8[TI_flags], r9
715 cpu_idle_enable_int_and_exit: