2 * Kernel execution entry point code.
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2005 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
25 * This program is free software; you can redistribute it and/or modify it
26 * under the terms of the GNU General Public License as published by the
27 * Free Software Foundation; either version 2 of the License, or (at your
28 * option) any later version.
31 #include <asm/processor.h>
34 #include <asm/pgtable.h>
35 #include <asm/ibm4xx.h>
36 #include <asm/ibm44x.h>
37 #include <asm/cputable.h>
38 #include <asm/thread_info.h>
39 #include <asm/ppc_asm.h>
40 #include <asm/asm-offsets.h>
41 #include "head_booke.h"
44 /* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet
46 * optional, information:
48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
49 * r4 - Starting address of the init RAM disk
50 * r5 - Ending address of the init RAM disk
51 * r6 - Start of kernel command line string (e.g. "mem=128")
52 * r7 - End of kernel command line string
59 * Reserve a word at a fixed location to store the address
64 * Save parameters we are passed
71 li r24,0 /* CPU number */
74 * Set up the initial MMU state
76 * We are still executing code at the virtual address
77 * mappings set by the firmware for the base of RAM.
79 * We first invalidate all TLB entries but the one
80 * we are running from. We then load the KERNELBASE
81 * mappings so we can begin to use kernel addresses
82 * natively and so the interrupt vector locations are
83 * permanently pinned (necessary since Book E
84 * implementations always have translation enabled).
86 * TODO: Use the known TLB entry we are running from to
87 * determine which physical region we are located
88 * in. This can be used to determine where in RAM
89 * (on a shared CPU system) or PCI memory space
90 * (on a DRAMless system) we are located.
91 * For now, we assume a perfect world which means
92 * we are located at the base of DRAM (physical 0).
96 * Search TLB for entry that we are currently using.
97 * Invalidate all entries but the one we are using.
99 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
100 mfspr r3,SPRN_PID /* Get PID */
101 mfmsr r4 /* Get MSR */
102 andi. r4,r4,MSR_IS@l /* TS=1? */
103 beq wmmucr /* If not, leave STS=0 */
104 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
105 wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
108 bl invstr /* Find our address */
109 invstr: mflr r5 /* Make it accessible */
110 tlbsx r23,0,r5 /* Find entry we are in */
111 li r4,0 /* Start at TLB entry 0 */
112 li r3,0 /* Set PAGEID inval value */
113 1: cmpw r23,r4 /* Is this our entry? */
114 beq skpinv /* If so, skip the inval */
115 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
116 skpinv: addi r4,r4,1 /* Increment */
117 cmpwi r4,64 /* Are we done? */
118 bne 1b /* If not, repeat */
119 isync /* If so, context change */
122 * Configure and load pinned entry into TLB slot 63.
125 lis r3,KERNELBASE@h /* Load the kernel virtual address */
126 ori r3,r3,KERNELBASE@l
128 /* Kernel is at the base of RAM */
129 li r4, 0 /* Load the kernel physical address */
131 /* Load the kernel PID = 0 */
136 /* Initialize MMUCR */
142 clrrwi r3,r3,10 /* Mask off the effective page number */
143 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
146 clrrwi r4,r4,10 /* Mask off the real page number */
147 /* ERPN is 0 for first 4GB page */
150 /* Added guarded bit to protect against speculative loads/stores */
152 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
154 li r0,63 /* TLB slot 63 */
156 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
157 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
158 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
160 /* Force context change */
169 /* If necessary, invalidate original entry we used */
173 tlbwe r6,r23,PPC44x_TLB_PAGEID
177 #ifdef CONFIG_SERIAL_TEXT_DEBUG
179 * Add temporary UART mapping for early debug.
180 * We can map UART registers wherever we want as long as they don't
181 * interfere with other system mappings (e.g. with pinned entries).
182 * For an example of how we handle this - see ocotea.h. --ebs
185 lis r3,UART0_IO_BASE@h
186 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
189 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
190 #ifdef UART0_PHYS_ERPN
191 ori r4,r4,UART0_PHYS_ERPN /* Add ERPN if above 4GB */
196 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
198 li r0,0 /* TLB slot 0 */
200 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
201 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
202 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
204 /* Force context change */
206 #endif /* CONFIG_SERIAL_TEXT_DEBUG */
208 /* Establish the interrupt vector offsets */
209 SET_IVOR(0, CriticalInput);
210 SET_IVOR(1, MachineCheck);
211 SET_IVOR(2, DataStorage);
212 SET_IVOR(3, InstructionStorage);
213 SET_IVOR(4, ExternalInput);
214 SET_IVOR(5, Alignment);
215 SET_IVOR(6, Program);
216 SET_IVOR(7, FloatingPointUnavailable);
217 SET_IVOR(8, SystemCall);
218 SET_IVOR(9, AuxillaryProcessorUnavailable);
219 SET_IVOR(10, Decrementer);
220 SET_IVOR(11, FixedIntervalTimer);
221 SET_IVOR(12, WatchdogTimer);
222 SET_IVOR(13, DataTLBError);
223 SET_IVOR(14, InstructionTLBError);
226 /* Establish the interrupt vector base */
227 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
231 /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
241 * This is where the main kernel code starts.
246 ori r2,r2,init_task@l
248 /* ptr to current thread */
249 addi r4,r2,THREAD /* init task's THREAD */
253 lis r1,init_thread_union@h
254 ori r1,r1,init_thread_union@l
256 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
261 * Decide what sort of machine this is and initialize the MMU.
271 /* Setup PTE pointers for the Abatron bdiGDB */
272 lis r6, swapper_pg_dir@h
273 ori r6, r6, swapper_pg_dir@l
274 lis r5, abatron_pteptrs@h
275 ori r5, r5, abatron_pteptrs@l
277 ori r4, r4, KERNELBASE@l
278 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
282 lis r4,start_kernel@h
283 ori r4,r4,start_kernel@l
285 ori r3,r3,MSR_KERNEL@l
288 rfi /* change context and jump to start_kernel */
291 * Interrupt vector entry code
293 * The Book E MMUs are always on so we don't need to handle
294 * interrupts in real mode as with previous PPC processors. In
295 * this case we handle interrupts in the kernel virtual address
298 * Interrupt vectors are dynamically placed relative to the
299 * interrupt prefix as determined by the address of interrupt_base.
300 * The interrupt vectors offsets are programmed using the labels
301 * for each interrupt vector entry.
303 * Interrupt vectors must be aligned on a 16 byte boundary.
304 * We align on a 32 byte cache line boundary for good measure.
308 /* Critical Input Interrupt */
309 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
311 /* Machine Check Interrupt */
313 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
315 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
318 /* Data Storage Interrupt */
319 START_EXCEPTION(DataStorage)
320 mtspr SPRN_SPRG0, r10 /* Save some working registers */
321 mtspr SPRN_SPRG1, r11
322 mtspr SPRN_SPRG4W, r12
323 mtspr SPRN_SPRG5W, r13
325 mtspr SPRN_SPRG7W, r11
328 * Check if it was a store fault, if not then bail
329 * because a user tried to access a kernel or
330 * read-protected page. Otherwise, get the
331 * offending address and handle it.
334 andis. r10, r10, ESR_ST@h
337 mfspr r10, SPRN_DEAR /* Get faulting address */
339 /* If we are faulting a kernel address, we have to use the
340 * kernel page tables.
345 lis r11, swapper_pg_dir@h
346 ori r11, r11, swapper_pg_dir@l
349 rlwinm r12,r12,0,0,23 /* Clear TID */
353 /* Get the PGD for the current thread */
358 /* Load PID into MMUCR TID */
359 mfspr r12,SPRN_MMUCR /* Get MMUCR */
360 mfspr r13,SPRN_PID /* Get PID */
361 rlwimi r12,r13,0,24,31 /* Set TID */
366 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
367 lwzx r11, r12, r11 /* Get pgd/pmd entry */
368 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
369 beq 2f /* Bail if no table */
371 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
372 lwz r11, 4(r12) /* Get pte entry */
374 andi. r13, r11, _PAGE_RW /* Is it writeable? */
375 beq 2f /* Bail if not */
379 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
380 stw r11, 4(r12) /* Update Linux page table */
382 li r13, PPC44x_TLB_SR@l /* Set SR */
383 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
384 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
385 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
386 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
387 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
388 and r12, r12, r11 /* HWEXEC/RW & USER */
389 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
390 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
392 rlwimi r11,r13,0,26,31 /* Insert static perms */
394 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
396 /* find the TLB index that caused the fault. It has to be here. */
399 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
401 /* Done...restore registers and get out of here.
403 mfspr r11, SPRN_SPRG7R
405 mfspr r13, SPRN_SPRG5R
406 mfspr r12, SPRN_SPRG4R
408 mfspr r11, SPRN_SPRG1
409 mfspr r10, SPRN_SPRG0
410 rfi /* Force context change */
414 * The bailout. Restore registers to pre-exception conditions
415 * and call the heavyweights to help us out.
417 mfspr r11, SPRN_SPRG7R
419 mfspr r13, SPRN_SPRG5R
420 mfspr r12, SPRN_SPRG4R
422 mfspr r11, SPRN_SPRG1
423 mfspr r10, SPRN_SPRG0
426 /* Instruction Storage Interrupt */
427 INSTRUCTION_STORAGE_EXCEPTION
429 /* External Input Interrupt */
430 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
432 /* Alignment Interrupt */
435 /* Program Interrupt */
438 /* Floating Point Unavailable Interrupt */
439 #ifdef CONFIG_PPC_FPU
440 FP_UNAVAILABLE_EXCEPTION
442 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
445 /* System Call Interrupt */
446 START_EXCEPTION(SystemCall)
447 NORMAL_EXCEPTION_PROLOG
448 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
450 /* Auxillary Processor Unavailable Interrupt */
451 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
453 /* Decrementer Interrupt */
454 DECREMENTER_EXCEPTION
456 /* Fixed Internal Timer Interrupt */
457 /* TODO: Add FIT support */
458 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
460 /* Watchdog Timer Interrupt */
461 /* TODO: Add watchdog support */
462 #ifdef CONFIG_BOOKE_WDT
463 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
465 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
468 /* Data TLB Error Interrupt */
469 START_EXCEPTION(DataTLBError)
470 mtspr SPRN_SPRG0, r10 /* Save some working registers */
471 mtspr SPRN_SPRG1, r11
472 mtspr SPRN_SPRG4W, r12
473 mtspr SPRN_SPRG5W, r13
475 mtspr SPRN_SPRG7W, r11
476 mfspr r10, SPRN_DEAR /* Get faulting address */
478 /* If we are faulting a kernel address, we have to use the
479 * kernel page tables.
484 lis r11, swapper_pg_dir@h
485 ori r11, r11, swapper_pg_dir@l
488 rlwinm r12,r12,0,0,23 /* Clear TID */
492 /* Get the PGD for the current thread */
497 /* Load PID into MMUCR TID */
499 mfspr r13,SPRN_PID /* Get PID */
500 rlwimi r12,r13,0,24,31 /* Set TID */
505 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
506 lwzx r11, r12, r11 /* Get pgd/pmd entry */
507 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
508 beq 2f /* Bail if no table */
510 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
511 lwz r11, 4(r12) /* Get pte entry */
512 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
513 beq 2f /* Bail if not present */
515 ori r11, r11, _PAGE_ACCESSED
518 /* Jump to common tlb load */
522 /* The bailout. Restore registers to pre-exception conditions
523 * and call the heavyweights to help us out.
525 mfspr r11, SPRN_SPRG7R
527 mfspr r13, SPRN_SPRG5R
528 mfspr r12, SPRN_SPRG4R
529 mfspr r11, SPRN_SPRG1
530 mfspr r10, SPRN_SPRG0
533 /* Instruction TLB Error Interrupt */
535 * Nearly the same as above, except we get our
536 * information from different registers and bailout
537 * to a different point.
539 START_EXCEPTION(InstructionTLBError)
540 mtspr SPRN_SPRG0, r10 /* Save some working registers */
541 mtspr SPRN_SPRG1, r11
542 mtspr SPRN_SPRG4W, r12
543 mtspr SPRN_SPRG5W, r13
545 mtspr SPRN_SPRG7W, r11
546 mfspr r10, SPRN_SRR0 /* Get faulting address */
548 /* If we are faulting a kernel address, we have to use the
549 * kernel page tables.
554 lis r11, swapper_pg_dir@h
555 ori r11, r11, swapper_pg_dir@l
558 rlwinm r12,r12,0,0,23 /* Clear TID */
562 /* Get the PGD for the current thread */
567 /* Load PID into MMUCR TID */
569 mfspr r13,SPRN_PID /* Get PID */
570 rlwimi r12,r13,0,24,31 /* Set TID */
575 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
576 lwzx r11, r12, r11 /* Get pgd/pmd entry */
577 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
578 beq 2f /* Bail if no table */
580 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
581 lwz r11, 4(r12) /* Get pte entry */
582 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
583 beq 2f /* Bail if not present */
585 ori r11, r11, _PAGE_ACCESSED
588 /* Jump to common TLB load point */
592 /* The bailout. Restore registers to pre-exception conditions
593 * and call the heavyweights to help us out.
595 mfspr r11, SPRN_SPRG7R
597 mfspr r13, SPRN_SPRG5R
598 mfspr r12, SPRN_SPRG4R
599 mfspr r11, SPRN_SPRG1
600 mfspr r10, SPRN_SPRG0
603 /* Debug Interrupt */
610 * Data TLB exceptions will bail out to this point
611 * if they can't resolve the lightweight TLB fault.
614 NORMAL_EXCEPTION_PROLOG
615 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
617 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
618 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
622 * Both the instruction and data TLB miss get to this
623 * point to load the TLB.
625 * r11 - available to use
626 * r12 - Pointer to the 64-bit PTE
627 * r13 - available to use
628 * MMUCR - loaded with proper value when we get here
629 * Upon exit, we reload everything and RFI.
633 * We set execute, because we don't have the granularity to
634 * properly set this at the page level (Linux problem).
635 * If shared is set, we cause a zero PID->TID load.
636 * Many of these bits are software only. Bits we don't set
637 * here we (properly should) assume have the appropriate value.
640 /* Load the next available TLB index */
641 lis r13, tlb_44x_index@ha
642 lwz r13, tlb_44x_index@l(r13)
643 /* Load the TLB high watermark */
644 lis r11, tlb_44x_hwater@ha
645 lwz r11, tlb_44x_hwater@l(r11)
647 /* Increment, rollover, and store TLB index */
649 cmpw 0, r13, r11 /* reserve entries */
653 /* Store the next available TLB index */
654 lis r11, tlb_44x_index@ha
655 stw r13, tlb_44x_index@l(r11)
657 lwz r11, 0(r12) /* Get MS word of PTE */
658 lwz r12, 4(r12) /* Get LS word of PTE */
659 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
660 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
663 * Create PAGEID. This is the faulting address,
664 * page size, and valid flag.
666 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
667 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
668 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
670 li r10, PPC44x_TLB_SR@l /* Set SR */
671 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
672 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
673 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
674 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
675 and r11, r12, r11 /* HWEXEC & USER */
676 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
678 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
679 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
680 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
682 /* Done...restore registers and get out of here.
684 mfspr r11, SPRN_SPRG7R
686 mfspr r13, SPRN_SPRG5R
687 mfspr r12, SPRN_SPRG4R
688 mfspr r11, SPRN_SPRG1
689 mfspr r10, SPRN_SPRG0
690 rfi /* Force context change */
697 * extern void giveup_altivec(struct task_struct *prev)
699 * The 44x core does not have an AltiVec unit.
701 _GLOBAL(giveup_altivec)
705 * extern void giveup_fpu(struct task_struct *prev)
707 * The 44x core does not have an FPU.
709 #ifndef CONFIG_PPC_FPU
715 * extern void abort(void)
717 * At present, this routine just applies a system reset.
721 oris r13,r13,DBCR0_RST_SYSTEM@h
726 #ifdef CONFIG_BDI_SWITCH
727 /* Context switch the PTE pointer for the Abatron BDI2000.
728 * The PGDIR is the second parameter.
730 lis r5, abatron_pteptrs@h
731 ori r5, r5, abatron_pteptrs@l
735 isync /* Force context change */
739 * We put a few things here that have to be page-aligned. This stuff
740 * goes at the beginning of the data segment, which is page-aligned.
746 .globl empty_zero_page
751 * To support >32-bit physical addresses, we use an 8KB pgdir.
753 .globl swapper_pg_dir
757 /* Reserved 4k for the critical exception stack & 4k for the machine
758 * check stack per CPU for kernel mode exceptions */
761 exception_stack_bottom:
762 .space BOOKE_EXCEPTION_STACK_SIZE
763 .globl exception_stack_top
767 * This space gets a copy of optional info passed to us by the bootstrap
768 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
775 * Room for two PTE pointers, usually the kernel and current user pointers
776 * to their respective root page table.