3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/config.h>
23 #include <linux/errno.h>
24 #include <linux/sys.h>
25 #include <linux/threads.h>
26 #include <asm/processor.h>
29 #include <asm/cputable.h>
30 #include <asm/thread_info.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/offsets.h>
33 #include <asm/unistd.h>
36 #undef SHOW_SYSCALLS_TASK
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
48 #include "head_booke.h"
49 #define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
56 mfspr r8,exc_level##_SPRG
58 .globl mcheck_transfer_to_handler
59 mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
63 .globl crit_transfer_to_handler
64 crit_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70 .globl crit_transfer_to_handler
71 crit_transfer_to_handler:
80 * This code finishes saving the registers to the exception frame
81 * and jumps to the appropriate handler for the exception, turning
82 * on address translation.
83 * Note that we rely on the caller having set cr0.eq iff the exception
84 * occurred in kernel mode (i.e. MSR:PR = 0).
86 .globl transfer_to_handler_full
87 transfer_to_handler_full:
91 .globl transfer_to_handler
103 tovirt(r2,r2) /* set r2 to current */
104 beq 2f /* if from user, fix up THREAD.regs */
105 addi r11,r1,STACK_FRAME_OVERHEAD
107 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
108 /* Check to see if the dbcr0 register is set up to debug. Use the
109 single-step bit to do this. */
110 lwz r12,THREAD_DBCR0(r12)
111 andis. r12,r12,DBCR0_IC@h
113 /* From user and task is ptraced - load up global dbcr0 */
114 li r12,-1 /* clear all pending debug events */
116 lis r11,global_dbcr0@ha
118 addi r11,r11,global_dbcr0@l
126 2: /* if from kernel, check interrupted DOZE/NAP mode and
127 * check for stack overflow
133 bt- 8,power_save_6xx_restore /* Check DOZE */
134 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
136 bt- 9,power_save_6xx_restore /* Check NAP */
137 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
138 #endif /* CONFIG_6xx */
139 .globl transfer_to_handler_cont
140 transfer_to_handler_cont:
141 lwz r11,THREAD_INFO-THREAD(r12)
142 cmplw r1,r11 /* if r1 <= current->thread_info */
143 ble- stack_ovf /* then the kernel stack overflowed */
146 lwz r11,0(r9) /* virtual address of handler */
147 lwz r9,4(r9) /* where to go when done */
153 RFI /* jump to handler, enable MMU */
156 * On kernel stack overflow, load up an initial stack pointer
157 * and call StackOverflow(regs), which should not return.
160 /* sometimes we use a statically-allocated stack, which is OK. */
164 ble 3b /* r1 <= &_end is OK */
166 addi r3,r1,STACK_FRAME_OVERHEAD
167 lis r1,init_thread_union@ha
168 addi r1,r1,init_thread_union@l
169 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
170 lis r9,StackOverflow@ha
171 addi r9,r9,StackOverflow@l
172 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180 * Handle a system call.
182 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
183 .stabs "entry.S",N_SO,0,0,0f
187 stw r0,THREAD+LAST_SYSCALL(r2)
191 lwz r11,_CCR(r1) /* Clear SO bit in CR */
196 #endif /* SHOW_SYSCALLS */
197 rlwinm r10,r1,0,0,18 /* current_thread_info() */
198 lwz r11,TI_LOCAL_FLAGS(r10)
199 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR
200 stw r11,TI_LOCAL_FLAGS(r10)
201 lwz r11,TI_FLAGS(r10)
202 andi. r11,r11,_TIF_SYSCALL_T_OR_A
204 syscall_dotrace_cont:
205 cmplwi 0,r0,NR_syscalls
206 lis r10,sys_call_table@h
207 ori r10,r10,sys_call_table@l
210 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
212 addi r9,r1,STACK_FRAME_OVERHEAD
213 blrl /* Call handler */
214 .globl ret_from_syscall
217 bl do_show_syscall_exit
222 rlwinm r12,r1,0,0,18 /* current_thread_info() */
224 lwz r11,TI_LOCAL_FLAGS(r12)
225 andi. r11,r11,_TIFL_FORCE_NOERROR
228 lwz r10,_CCR(r1) /* Set SO bit in CR */
232 /* disable interrupts so current_thread_info()->flags can't change */
233 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
237 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
238 bne- syscall_exit_work
240 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241 /* If the process has its own DBCR0 value, load it up. The single
242 step bit tells us that dbcr0 should be loaded. */
243 lwz r0,THREAD+THREAD_DBCR0(r2)
244 andis. r10,r0,DBCR0_IC@h
247 stwcx. r0,0,r1 /* to clear the reservation */
272 /* Traced system call support */
277 addi r3,r1,STACK_FRAME_OVERHEAD
278 bl do_syscall_trace_enter
279 lwz r0,GPR0(r1) /* Restore original registers */
287 b syscall_dotrace_cont
290 stw r6,RESULT(r1) /* Save result */
291 stw r3,GPR3(r1) /* Update return value */
292 andi. r0,r9,_TIF_SYSCALL_T_OR_A
296 MTMSRD(r10) /* re-enable interrupts */
304 addi r3,r1,STACK_FRAME_OVERHEAD
305 bl do_syscall_trace_leave
309 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
311 MTMSRD(r10) /* disable interrupts again */
312 rlwinm r12,r1,0,0,18 /* current_thread_info() */
315 andi. r0,r9,_TIF_NEED_RESCHED
319 beq syscall_exit_cont
320 andi. r0,r9,_TIF_SIGPENDING
321 beq syscall_exit_cont
326 MTMSRD(r10) /* re-enable interrupts */
332 #ifdef SHOW_SYSCALLS_TASK
333 lis r11,show_syscalls_task@ha
334 lwz r11,show_syscalls_task@l(r11)
365 do_show_syscall_exit:
366 #ifdef SHOW_SYSCALLS_TASK
367 lis r11,show_syscalls_task@ha
368 lwz r11,show_syscalls_task@l(r11)
374 stw r3,RESULT(r1) /* Save result */
384 7: .string "syscall %d(%x, %x, %x, %x, %x, "
385 77: .string "%x), current=%p\n"
386 79: .string " -> %x\n"
389 #ifdef SHOW_SYSCALLS_TASK
391 .globl show_syscalls_task
396 #endif /* SHOW_SYSCALLS */
399 * The sigsuspend and rt_sigsuspend system calls can call do_signal
400 * and thus put the process into the stopped state where we might
401 * want to examine its user state with ptrace. Therefore we need
402 * to save all the nonvolatile registers (r13 - r31) before calling
405 .globl ppc_sigsuspend
409 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
410 stw r0,TRAP(r1) /* register set saved */
413 .globl ppc_rt_sigsuspend
425 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
426 stw r0,TRAP(r1) /* register set saved */
433 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
434 stw r0,TRAP(r1) /* register set saved */
441 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
442 stw r0,TRAP(r1) /* register set saved */
445 .globl ppc_swapcontext
449 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
450 stw r0,TRAP(r1) /* register set saved */
454 * Top-level page fault handling.
455 * This is in assembler because if do_page_fault tells us that
456 * it is a bad kernel page fault, we want to save the non-volatile
457 * registers before calling bad_page_fault.
459 .globl handle_page_fault
462 addi r3,r1,STACK_FRAME_OVERHEAD
471 addi r3,r1,STACK_FRAME_OVERHEAD
474 b ret_from_except_full
477 * This routine switches between two different tasks. The process
478 * state of one is saved on its kernel stack. Then the state
479 * of the other is restored from its kernel stack. The memory
480 * management hardware is updated to the second process's state.
481 * Finally, we can return to the second process.
482 * On entry, r3 points to the THREAD for the current task, r4
483 * points to the THREAD for the new task.
485 * This routine is always called with interrupts disabled.
487 * Note: there are two ways to get to the "going out" portion
488 * of this code; either by coming in via the entry (_switch)
489 * or via "fork" which must set up an environment equivalent
490 * to the "_switch" path. If you change this , you'll have to
491 * change the fork code also.
493 * The code which creates the new task context is in 'copy_thread'
494 * in arch/ppc/kernel/process.c
497 stwu r1,-INT_FRAME_SIZE(r1)
499 stw r0,INT_FRAME_SIZE+4(r1)
500 /* r3-r12 are caller saved -- Cort */
502 stw r0,_NIP(r1) /* Return to switch caller */
504 li r0,MSR_FP /* Disable floating-point */
505 #ifdef CONFIG_ALTIVEC
507 oris r0,r0,MSR_VEC@h /* Disable altivec */
508 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
509 stw r12,THREAD+THREAD_VRSAVE(r2)
510 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
511 #endif /* CONFIG_ALTIVEC */
513 oris r0,r0,MSR_SPE@h /* Disable SPE */
514 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
515 stw r12,THREAD+THREAD_SPEFSCR(r2)
516 #endif /* CONFIG_SPE */
517 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
525 stw r1,KSP(r3) /* Set old stack pointer */
528 /* We need a sync somewhere here to make sure that if the
529 * previous task gets rescheduled on another CPU, it sees all
530 * stores it has performed on this one.
533 #endif /* CONFIG_SMP */
537 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
538 lwz r1,KSP(r4) /* Load new stack pointer */
540 /* save the old current 'last' for return value */
542 addi r2,r4,-THREAD /* Update current */
544 #ifdef CONFIG_ALTIVEC
546 lwz r0,THREAD+THREAD_VRSAVE(r2)
547 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
548 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
549 #endif /* CONFIG_ALTIVEC */
551 lwz r0,THREAD+THREAD_SPEFSCR(r2)
552 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
553 #endif /* CONFIG_SPE */
557 /* r3-r12 are destroyed -- Cort */
560 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
562 addi r1,r1,INT_FRAME_SIZE
565 .globl fast_exception_return
566 fast_exception_return:
567 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
568 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
569 beq 1f /* if not, we've got problems */
572 2: REST_4GPRS(3, r11)
587 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
588 /* check if the exception happened in a restartable section */
589 1: lis r3,exc_exit_restart_end@ha
590 addi r3,r3,exc_exit_restart_end@l
593 lis r4,exc_exit_restart@ha
594 addi r4,r4,exc_exit_restart@l
597 lis r3,fee_restarts@ha
599 lwz r5,fee_restarts@l(r3)
601 stw r5,fee_restarts@l(r3)
602 mr r12,r4 /* restart at exc_exit_restart */
607 /* aargh, a nonrecoverable interrupt, panic */
608 /* aargh, we don't know which trap this is */
609 /* but the 601 doesn't implement the RI bit, so assume it's OK */
613 END_FTR_SECTION_IFSET(CPU_FTR_601)
616 addi r3,r1,STACK_FRAME_OVERHEAD
618 ori r10,r10,MSR_KERNEL@l
619 bl transfer_to_handler_full
620 .long nonrecoverable_exception
621 .long ret_from_except
624 .globl sigreturn_exit
626 subi r1,r3,STACK_FRAME_OVERHEAD
627 rlwinm r12,r1,0,0,18 /* current_thread_info() */
629 andi. r0,r9,_TIF_SYSCALL_T_OR_A
630 bnel- do_syscall_trace_leave
633 .globl ret_from_except_full
634 ret_from_except_full:
638 .globl ret_from_except
640 /* Hard-disable interrupts so that current_thread_info()->flags
641 * can't change between when we test it and when we return
642 * from the interrupt. */
643 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
644 SYNC /* Some chip revs have problems here... */
645 MTMSRD(r10) /* disable interrupts */
647 lwz r3,_MSR(r1) /* Returning to user mode? */
651 user_exc_return: /* r10 contains MSR_KERNEL here */
652 /* Check current_thread_info()->flags */
655 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
659 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
660 /* Check whether this process has its own DBCR0 value. The single
661 step bit tells us that dbcr0 should be loaded. */
662 lwz r0,THREAD+THREAD_DBCR0(r2)
663 andis. r10,r0,DBCR0_IC@h
667 #ifdef CONFIG_PREEMPT
670 /* N.B. the only way to get here is from the beq following ret_from_except. */
672 /* check current_thread_info->preempt_count */
674 lwz r0,TI_PREEMPT(r9)
675 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
678 andi. r0,r0,_TIF_NEED_RESCHED
680 andi. r0,r3,MSR_EE /* interrupts off? */
681 beq restore /* don't schedule if so */
682 1: bl preempt_schedule_irq
685 andi. r0,r3,_TIF_NEED_RESCHED
689 #endif /* CONFIG_PREEMPT */
691 /* interrupts are hard-disabled at this point */
704 stwcx. r0,0,r1 /* to clear the reservation */
706 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
708 andi. r10,r9,MSR_RI /* check if this exception occurred */
709 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
717 * Once we put values in SRR0 and SRR1, we are in a state
718 * where exceptions are not recoverable, since taking an
719 * exception will trash SRR0 and SRR1. Therefore we clear the
720 * MSR:RI bit to indicate this. If we do take an exception,
721 * we can't return to the point of the exception but we
722 * can restart the exception exit path at the label
723 * exc_exit_restart below. -- paulus
725 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
727 MTMSRD(r10) /* clear the RI bit */
728 .globl exc_exit_restart
737 .globl exc_exit_restart_end
738 exc_exit_restart_end:
742 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
744 * This is a bit different on 4xx/Book-E because it doesn't have
745 * the RI bit in the MSR.
746 * The TLB miss handler checks if we have interrupted
747 * the exception exit path and restarts it if so
748 * (well maybe one day it will... :).
755 .globl exc_exit_restart
764 .globl exc_exit_restart_end
765 exc_exit_restart_end:
768 b . /* prevent prefetch past rfi */
771 * Returning from a critical interrupt in user mode doesn't need
772 * to be any different from a normal exception. For a critical
773 * interrupt in the kernel, we just return (without checking for
774 * preemption) since the interrupt may have happened at some crucial
775 * place (e.g. inside the TLB miss handler), and because we will be
776 * running with r1 pointing into critical_stack, not the current
777 * process's kernel stack (and therefore current_thread_info() will
778 * give the wrong answer).
779 * We have to restore various SPRs that may have been in use at the
780 * time of the critical interrupt.
784 #define PPC_40x_TURN_OFF_MSR_DR \
785 /* avoid any possible TLB misses here by turning off MSR.DR, we \
786 * assume the instructions here are mapped by a pinned TLB entry */ \
792 #define PPC_40x_TURN_OFF_MSR_DR
795 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
798 andi. r3,r3,MSR_PR; \
799 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
800 bne user_exc_return; \
807 mtspr SPRN_XER,r10; \
809 PPC405_ERR77(0,r1); \
810 stwcx. r0,0,r1; /* to clear the reservation */ \
815 PPC_40x_TURN_OFF_MSR_DR; \
818 mtspr SPRN_DEAR,r9; \
819 mtspr SPRN_ESR,r10; \
822 mtspr exc_lvl_srr0,r11; \
823 mtspr exc_lvl_srr1,r12; \
831 b .; /* prevent prefetch past exc_lvl_rfi */
833 .globl ret_from_crit_exc
835 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
838 .globl ret_from_mcheck_exc
840 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
841 #endif /* CONFIG_BOOKE */
844 * Load the DBCR0 value for a task that is being ptraced,
845 * having first saved away the global DBCR0. Note that r0
846 * has the dbcr0 value to set upon entry to this.
849 mfmsr r10 /* first disable debug exceptions */
850 rlwinm r10,r10,0,~MSR_DE
854 lis r11,global_dbcr0@ha
855 addi r11,r11,global_dbcr0@l
862 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
866 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
868 do_work: /* r10 contains MSR_KERNEL here */
869 andi. r0,r9,_TIF_NEED_RESCHED
872 do_resched: /* r10 contains MSR_KERNEL here */
875 MTMSRD(r10) /* hard-enable interrupts */
878 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
880 MTMSRD(r10) /* disable interrupts */
883 andi. r0,r9,_TIF_NEED_RESCHED
885 andi. r0,r9,_TIF_SIGPENDING
887 do_user_signal: /* r10 contains MSR_KERNEL here */
890 MTMSRD(r10) /* hard-enable interrupts */
891 /* save r13-r31 in the exception frame, if not already done */
899 addi r4,r1,STACK_FRAME_OVERHEAD
905 * We come here when we are at the end of handling an exception
906 * that occurred at a place where taking an exception will lose
907 * state information, such as the contents of SRR0 and SRR1.
910 lis r10,exc_exit_restart_end@ha
911 addi r10,r10,exc_exit_restart_end@l
914 lis r11,exc_exit_restart@ha
915 addi r11,r11,exc_exit_restart@l
918 lis r10,ee_restarts@ha
919 lwz r12,ee_restarts@l(r10)
921 stw r12,ee_restarts@l(r10)
922 mr r12,r11 /* restart at exc_exit_restart */
924 3: /* OK, we can't recover, kill this process */
925 /* but the 601 doesn't implement the RI bit, so assume it's OK */
928 END_FTR_SECTION_IFSET(CPU_FTR_601)
935 4: addi r3,r1,STACK_FRAME_OVERHEAD
936 bl nonrecoverable_exception
937 /* shouldn't return */
943 * PROM code for specific machines follows. Put it
944 * here so it's easy to add arch-specific sections later.
949 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
950 * called with the MMU off.
953 stwu r1,-INT_FRAME_SIZE(r1)
955 stw r0,INT_FRAME_SIZE+4(r1)
957 lwz r4,rtas_data@l(r4)
958 lis r6,1f@ha /* physical return address for rtas */
963 lwz r8,rtas_entry@l(r8)
966 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
967 SYNC /* disable interrupts so SRR0/1 */
968 MTMSRD(r0) /* don't get trashed */
969 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
977 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
978 lwz r9,8(r9) /* original msr value */
980 addi r1,r1,INT_FRAME_SIZE
985 RFI /* return to caller */
987 .globl machine_check_in_rtas
988 machine_check_in_rtas:
990 /* XXX load up BATs and panic */
992 #endif /* CONFIG_PPC_OF */