3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
36 #undef SHOW_SYSCALLS_TASK
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
48 #include "head_booke.h"
49 .globl mcheck_transfer_to_handler
50 mcheck_transfer_to_handler:
52 BOOKE_LOAD_MCHECK_STACK
53 lwz r0,GPR10-INT_FRAME_SIZE(r8)
55 lwz r0,GPR11-INT_FRAME_SIZE(r8)
58 b transfer_to_handler_full
60 .globl crit_transfer_to_handler
61 crit_transfer_to_handler:
64 lwz r0,GPR10-INT_FRAME_SIZE(r8)
66 lwz r0,GPR11-INT_FRAME_SIZE(r8)
73 .globl crit_transfer_to_handler
74 crit_transfer_to_handler:
83 * This code finishes saving the registers to the exception frame
84 * and jumps to the appropriate handler for the exception, turning
85 * on address translation.
86 * Note that we rely on the caller having set cr0.eq iff the exception
87 * occurred in kernel mode (i.e. MSR:PR = 0).
89 .globl transfer_to_handler_full
90 transfer_to_handler_full:
94 .globl transfer_to_handler
106 tovirt(r2,r2) /* set r2 to current */
107 beq 2f /* if from user, fix up THREAD.regs */
108 addi r11,r1,STACK_FRAME_OVERHEAD
110 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
111 /* Check to see if the dbcr0 register is set up to debug. Use the
112 single-step bit to do this. */
113 lwz r12,THREAD_DBCR0(r12)
114 andis. r12,r12,DBCR0_IC@h
116 /* From user and task is ptraced - load up global dbcr0 */
117 li r12,-1 /* clear all pending debug events */
119 lis r11,global_dbcr0@ha
121 addi r11,r11,global_dbcr0@l
129 2: /* if from kernel, check interrupted DOZE/NAP mode and
130 * check for stack overflow
136 bt- 8,power_save_6xx_restore /* Check DOZE */
137 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
139 bt- 9,power_save_6xx_restore /* Check NAP */
140 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
141 #endif /* CONFIG_6xx */
142 .globl transfer_to_handler_cont
143 transfer_to_handler_cont:
144 lwz r11,THREAD_INFO-THREAD(r12)
145 cmplw r1,r11 /* if r1 <= current->thread_info */
146 ble- stack_ovf /* then the kernel stack overflowed */
149 lwz r11,0(r9) /* virtual address of handler */
150 lwz r9,4(r9) /* where to go when done */
156 RFI /* jump to handler, enable MMU */
159 * On kernel stack overflow, load up an initial stack pointer
160 * and call StackOverflow(regs), which should not return.
163 /* sometimes we use a statically-allocated stack, which is OK. */
167 ble 3b /* r1 <= &_end is OK */
169 addi r3,r1,STACK_FRAME_OVERHEAD
170 lis r1,init_thread_union@ha
171 addi r1,r1,init_thread_union@l
172 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
173 lis r9,StackOverflow@ha
174 addi r9,r9,StackOverflow@l
175 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
183 * Handle a system call.
185 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
186 .stabs "entry.S",N_SO,0,0,0f
190 stw r0,THREAD+LAST_SYSCALL(r2)
194 lwz r11,_CCR(r1) /* Clear SO bit in CR */
199 #endif /* SHOW_SYSCALLS */
200 rlwinm r10,r1,0,0,18 /* current_thread_info() */
201 lwz r11,TI_LOCAL_FLAGS(r10)
202 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
203 stw r11,TI_LOCAL_FLAGS(r10)
204 lwz r11,TI_FLAGS(r10)
205 andi. r11,r11,_TIF_SYSCALL_T_OR_A
207 syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
215 addi r9,r1,STACK_FRAME_OVERHEAD
216 blrl /* Call handler */
217 .globl ret_from_syscall
220 bl do_show_syscall_exit
225 rlwinm r12,r1,0,0,18 /* current_thread_info() */
227 lwz r11,TI_LOCAL_FLAGS(r12)
228 andi. r11,r11,_TIFL_FORCE_NOERROR
231 lwz r10,_CCR(r1) /* Set SO bit in CR */
235 /* disable interrupts so current_thread_info()->flags can't change */
236 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
240 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
241 bne- syscall_exit_work
243 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
244 /* If the process has its own DBCR0 value, load it up. The single
245 step bit tells us that dbcr0 should be loaded. */
246 lwz r0,THREAD+THREAD_DBCR0(r2)
247 andis. r10,r0,DBCR0_IC@h
250 stwcx. r0,0,r1 /* to clear the reservation */
275 /* Traced system call support */
280 addi r3,r1,STACK_FRAME_OVERHEAD
281 bl do_syscall_trace_enter
282 lwz r0,GPR0(r1) /* Restore original registers */
290 b syscall_dotrace_cont
293 stw r6,RESULT(r1) /* Save result */
294 stw r3,GPR3(r1) /* Update return value */
295 andi. r0,r9,_TIF_SYSCALL_T_OR_A
299 MTMSRD(r10) /* re-enable interrupts */
307 addi r3,r1,STACK_FRAME_OVERHEAD
308 bl do_syscall_trace_leave
312 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
314 MTMSRD(r10) /* disable interrupts again */
315 rlwinm r12,r1,0,0,18 /* current_thread_info() */
318 andi. r0,r9,_TIF_NEED_RESCHED
322 beq syscall_exit_cont
323 andi. r0,r9,_TIF_SIGPENDING
324 beq syscall_exit_cont
329 MTMSRD(r10) /* re-enable interrupts */
335 #ifdef SHOW_SYSCALLS_TASK
336 lis r11,show_syscalls_task@ha
337 lwz r11,show_syscalls_task@l(r11)
368 do_show_syscall_exit:
369 #ifdef SHOW_SYSCALLS_TASK
370 lis r11,show_syscalls_task@ha
371 lwz r11,show_syscalls_task@l(r11)
377 stw r3,RESULT(r1) /* Save result */
387 7: .string "syscall %d(%x, %x, %x, %x, %x, "
388 77: .string "%x), current=%p\n"
389 79: .string " -> %x\n"
392 #ifdef SHOW_SYSCALLS_TASK
394 .globl show_syscalls_task
399 #endif /* SHOW_SYSCALLS */
402 * The sigsuspend and rt_sigsuspend system calls can call do_signal
403 * and thus put the process into the stopped state where we might
404 * want to examine its user state with ptrace. Therefore we need
405 * to save all the nonvolatile registers (r13 - r31) before calling
408 .globl ppc_sigsuspend
412 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
413 stw r0,TRAP(r1) /* register set saved */
416 .globl ppc_rt_sigsuspend
428 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
429 stw r0,TRAP(r1) /* register set saved */
436 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
437 stw r0,TRAP(r1) /* register set saved */
444 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
445 stw r0,TRAP(r1) /* register set saved */
448 .globl ppc_swapcontext
452 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
453 stw r0,TRAP(r1) /* register set saved */
457 * Top-level page fault handling.
458 * This is in assembler because if do_page_fault tells us that
459 * it is a bad kernel page fault, we want to save the non-volatile
460 * registers before calling bad_page_fault.
462 .globl handle_page_fault
465 addi r3,r1,STACK_FRAME_OVERHEAD
474 addi r3,r1,STACK_FRAME_OVERHEAD
477 b ret_from_except_full
480 * This routine switches between two different tasks. The process
481 * state of one is saved on its kernel stack. Then the state
482 * of the other is restored from its kernel stack. The memory
483 * management hardware is updated to the second process's state.
484 * Finally, we can return to the second process.
485 * On entry, r3 points to the THREAD for the current task, r4
486 * points to the THREAD for the new task.
488 * This routine is always called with interrupts disabled.
490 * Note: there are two ways to get to the "going out" portion
491 * of this code; either by coming in via the entry (_switch)
492 * or via "fork" which must set up an environment equivalent
493 * to the "_switch" path. If you change this , you'll have to
494 * change the fork code also.
496 * The code which creates the new task context is in 'copy_thread'
497 * in arch/ppc/kernel/process.c
500 stwu r1,-INT_FRAME_SIZE(r1)
502 stw r0,INT_FRAME_SIZE+4(r1)
503 /* r3-r12 are caller saved -- Cort */
505 stw r0,_NIP(r1) /* Return to switch caller */
507 li r0,MSR_FP /* Disable floating-point */
508 #ifdef CONFIG_ALTIVEC
510 oris r0,r0,MSR_VEC@h /* Disable altivec */
511 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
512 stw r12,THREAD+THREAD_VRSAVE(r2)
513 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
514 #endif /* CONFIG_ALTIVEC */
516 oris r0,r0,MSR_SPE@h /* Disable SPE */
517 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
518 stw r12,THREAD+THREAD_SPEFSCR(r2)
519 #endif /* CONFIG_SPE */
520 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
528 stw r1,KSP(r3) /* Set old stack pointer */
531 /* We need a sync somewhere here to make sure that if the
532 * previous task gets rescheduled on another CPU, it sees all
533 * stores it has performed on this one.
536 #endif /* CONFIG_SMP */
540 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
541 lwz r1,KSP(r4) /* Load new stack pointer */
543 /* save the old current 'last' for return value */
545 addi r2,r4,-THREAD /* Update current */
547 #ifdef CONFIG_ALTIVEC
549 lwz r0,THREAD+THREAD_VRSAVE(r2)
550 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
551 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
552 #endif /* CONFIG_ALTIVEC */
554 lwz r0,THREAD+THREAD_SPEFSCR(r2)
555 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
556 #endif /* CONFIG_SPE */
560 /* r3-r12 are destroyed -- Cort */
563 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
565 addi r1,r1,INT_FRAME_SIZE
568 .globl fast_exception_return
569 fast_exception_return:
570 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
571 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
572 beq 1f /* if not, we've got problems */
575 2: REST_4GPRS(3, r11)
590 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
591 /* check if the exception happened in a restartable section */
592 1: lis r3,exc_exit_restart_end@ha
593 addi r3,r3,exc_exit_restart_end@l
596 lis r4,exc_exit_restart@ha
597 addi r4,r4,exc_exit_restart@l
600 lis r3,fee_restarts@ha
602 lwz r5,fee_restarts@l(r3)
604 stw r5,fee_restarts@l(r3)
605 mr r12,r4 /* restart at exc_exit_restart */
610 /* aargh, a nonrecoverable interrupt, panic */
611 /* aargh, we don't know which trap this is */
612 /* but the 601 doesn't implement the RI bit, so assume it's OK */
616 END_FTR_SECTION_IFSET(CPU_FTR_601)
619 addi r3,r1,STACK_FRAME_OVERHEAD
621 ori r10,r10,MSR_KERNEL@l
622 bl transfer_to_handler_full
623 .long nonrecoverable_exception
624 .long ret_from_except
627 .globl sigreturn_exit
629 subi r1,r3,STACK_FRAME_OVERHEAD
630 rlwinm r12,r1,0,0,18 /* current_thread_info() */
632 andi. r0,r9,_TIF_SYSCALL_T_OR_A
633 bnel- do_syscall_trace_leave
636 .globl ret_from_except_full
637 ret_from_except_full:
641 .globl ret_from_except
643 /* Hard-disable interrupts so that current_thread_info()->flags
644 * can't change between when we test it and when we return
645 * from the interrupt. */
646 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
647 SYNC /* Some chip revs have problems here... */
648 MTMSRD(r10) /* disable interrupts */
650 lwz r3,_MSR(r1) /* Returning to user mode? */
654 user_exc_return: /* r10 contains MSR_KERNEL here */
655 /* Check current_thread_info()->flags */
658 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
663 /* Check whether this process has its own DBCR0 value. The single
664 step bit tells us that dbcr0 should be loaded. */
665 lwz r0,THREAD+THREAD_DBCR0(r2)
666 andis. r10,r0,DBCR0_IC@h
670 #ifdef CONFIG_PREEMPT
673 /* N.B. the only way to get here is from the beq following ret_from_except. */
675 /* check current_thread_info->preempt_count */
677 lwz r0,TI_PREEMPT(r9)
678 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
681 andi. r0,r0,_TIF_NEED_RESCHED
683 andi. r0,r3,MSR_EE /* interrupts off? */
684 beq restore /* don't schedule if so */
685 1: bl preempt_schedule_irq
688 andi. r0,r3,_TIF_NEED_RESCHED
692 #endif /* CONFIG_PREEMPT */
694 /* interrupts are hard-disabled at this point */
707 stwcx. r0,0,r1 /* to clear the reservation */
709 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
711 andi. r10,r9,MSR_RI /* check if this exception occurred */
712 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
720 * Once we put values in SRR0 and SRR1, we are in a state
721 * where exceptions are not recoverable, since taking an
722 * exception will trash SRR0 and SRR1. Therefore we clear the
723 * MSR:RI bit to indicate this. If we do take an exception,
724 * we can't return to the point of the exception but we
725 * can restart the exception exit path at the label
726 * exc_exit_restart below. -- paulus
728 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
730 MTMSRD(r10) /* clear the RI bit */
731 .globl exc_exit_restart
740 .globl exc_exit_restart_end
741 exc_exit_restart_end:
745 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
747 * This is a bit different on 4xx/Book-E because it doesn't have
748 * the RI bit in the MSR.
749 * The TLB miss handler checks if we have interrupted
750 * the exception exit path and restarts it if so
751 * (well maybe one day it will... :).
758 .globl exc_exit_restart
767 .globl exc_exit_restart_end
768 exc_exit_restart_end:
771 b . /* prevent prefetch past rfi */
774 * Returning from a critical interrupt in user mode doesn't need
775 * to be any different from a normal exception. For a critical
776 * interrupt in the kernel, we just return (without checking for
777 * preemption) since the interrupt may have happened at some crucial
778 * place (e.g. inside the TLB miss handler), and because we will be
779 * running with r1 pointing into critical_stack, not the current
780 * process's kernel stack (and therefore current_thread_info() will
781 * give the wrong answer).
782 * We have to restore various SPRs that may have been in use at the
783 * time of the critical interrupt.
786 .globl ret_from_crit_exc
791 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
805 stwcx. r0,0,r1 /* to clear the reservation */
812 /* avoid any possible TLB misses here by turning off MSR.DR, we
813 * assume the instructions here are mapped by a pinned TLB entry */
834 b . /* prevent prefetch past rfci */
838 * Return from a machine check interrupt, similar to a critical
841 .globl ret_from_mcheck_exc
846 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
859 stwcx. r0,0,r1 /* to clear the reservation */
871 mtspr SPRN_MCSRR0,r11
872 mtspr SPRN_MCSRR1,r12
879 #endif /* CONFIG_BOOKE */
882 * Load the DBCR0 value for a task that is being ptraced,
883 * having first saved away the global DBCR0. Note that r0
884 * has the dbcr0 value to set upon entry to this.
887 mfmsr r10 /* first disable debug exceptions */
888 rlwinm r10,r10,0,~MSR_DE
892 lis r11,global_dbcr0@ha
893 addi r11,r11,global_dbcr0@l
900 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
904 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
906 do_work: /* r10 contains MSR_KERNEL here */
907 andi. r0,r9,_TIF_NEED_RESCHED
910 do_resched: /* r10 contains MSR_KERNEL here */
913 MTMSRD(r10) /* hard-enable interrupts */
916 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
918 MTMSRD(r10) /* disable interrupts */
921 andi. r0,r9,_TIF_NEED_RESCHED
923 andi. r0,r9,_TIF_SIGPENDING
925 do_user_signal: /* r10 contains MSR_KERNEL here */
928 MTMSRD(r10) /* hard-enable interrupts */
929 /* save r13-r31 in the exception frame, if not already done */
937 addi r4,r1,STACK_FRAME_OVERHEAD
943 * We come here when we are at the end of handling an exception
944 * that occurred at a place where taking an exception will lose
945 * state information, such as the contents of SRR0 and SRR1.
948 lis r10,exc_exit_restart_end@ha
949 addi r10,r10,exc_exit_restart_end@l
952 lis r11,exc_exit_restart@ha
953 addi r11,r11,exc_exit_restart@l
956 lis r10,ee_restarts@ha
957 lwz r12,ee_restarts@l(r10)
959 stw r12,ee_restarts@l(r10)
960 mr r12,r11 /* restart at exc_exit_restart */
962 3: /* OK, we can't recover, kill this process */
963 /* but the 601 doesn't implement the RI bit, so assume it's OK */
966 END_FTR_SECTION_IFSET(CPU_FTR_601)
973 4: addi r3,r1,STACK_FRAME_OVERHEAD
974 bl nonrecoverable_exception
975 /* shouldn't return */
981 * PROM code for specific machines follows. Put it
982 * here so it's easy to add arch-specific sections later.
987 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
988 * called with the MMU off.
991 stwu r1,-INT_FRAME_SIZE(r1)
993 stw r0,INT_FRAME_SIZE+4(r1)
995 lwz r4,rtas_data@l(r4)
996 lis r6,1f@ha /* physical return address for rtas */
1000 lis r8,rtas_entry@ha
1001 lwz r8,rtas_entry@l(r8)
1004 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1005 SYNC /* disable interrupts so SRR0/1 */
1006 MTMSRD(r0) /* don't get trashed */
1007 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1015 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1016 lwz r9,8(r9) /* original msr value */
1018 addi r1,r1,INT_FRAME_SIZE
1023 RFI /* return to caller */
1025 .globl machine_check_in_rtas
1026 machine_check_in_rtas:
1028 /* XXX load up BATs and panic */
1030 #endif /* CONFIG_PPC_OF */