2 * arch/ppc64/kernel/head.S
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/config.h>
27 #include <linux/threads.h>
31 #include <asm/ppc_asm.h>
32 #include <asm/asm-offsets.h>
34 #include <asm/cputable.h>
35 #include <asm/setup.h>
36 #include <asm/hvcall.h>
37 #include <asm/iseries/lpar_map.h>
38 #include <asm/thread_info.h>
40 #ifdef CONFIG_PPC_ISERIES
41 #define DO_SOFT_DISABLE
45 * We layout physical memory as follows:
46 * 0x0000 - 0x00ff : Secondary processor spin code
47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
50 * 0x7000 - 0x7fff : FWNMI data area
51 * 0x8000 - : Early init and support code
59 * SPRG0 reserved for hypervisor
60 * SPRG1 temp - used to save gpr
61 * SPRG2 temp - used to save gpr
62 * SPRG3 virt addr of paca
66 * Entering into this code we make the following assumptions:
68 * 1. The MMU is off & open firmware is running in real mode.
69 * 2. The kernel is entered at __start
72 * 1. The MMU is on (as it always is for iSeries)
73 * 2. The kernel is entered at system_reset_iSeries
79 #ifdef CONFIG_PPC_MULTIPLATFORM
81 /* NOP this out unconditionally */
83 b .__start_initialization_multiplatform
85 #endif /* CONFIG_PPC_MULTIPLATFORM */
87 /* Catch branch to 0 in real mode */
90 #ifdef CONFIG_PPC_ISERIES
92 * At offset 0x20, there is a pointer to iSeries LPAR data.
93 * This is required by the hypervisor
96 .llong hvReleaseData-KERNELBASE
99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
100 * array (used by the iSeries LPAR debugger to do translation
101 * between physical addresses and absolute addresses) and
102 * to the pidhash table (also used by the debugger)
104 .llong mschunks_map-KERNELBASE
105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
107 /* Offset 0x38 - Pointer to start of embedded System.map */
108 .globl embedded_sysmap_start
109 embedded_sysmap_start:
111 /* Offset 0x40 - Pointer to end of embedded System.map */
112 .globl embedded_sysmap_end
116 #endif /* CONFIG_PPC_ISERIES */
118 /* Secondary processors spin on this value until it goes to 1. */
119 .globl __secondary_hold_spinloop
120 __secondary_hold_spinloop:
123 /* Secondary processors write this value with their cpu # */
124 /* after they enter the spin loop immediately below. */
125 .globl __secondary_hold_acknowledge
126 __secondary_hold_acknowledge:
131 * The following code is used on pSeries to hold secondary processors
132 * in a spin loop after they have been freed from OpenFirmware, but
133 * before the bulk of the kernel has been relocated. This code
134 * is relocated to physical address 0x60 before prom_init is run.
135 * All of it must fit below the first exception vector at 0x100.
137 _GLOBAL(__secondary_hold)
140 mtmsrd r24 /* RI on */
142 /* Grab our linux cpu number */
145 /* Tell the master cpu we're here */
146 /* Relocation is off & we are located at an address less */
147 /* than 0x100, so only need to grab low order offset. */
148 std r24,__secondary_hold_acknowledge@l(0)
151 /* All secondary cpus wait here until told to start. */
152 100: ld r4,__secondary_hold_spinloop@l(0)
161 b .pSeries_secondary_smp_init
167 /* This value is used to mark exception frames on the stack. */
170 .tc ID_72656773_68657265[TC],0x7265677368657265
174 * The following macros define the code that appears as
175 * the prologue to each of the exception handlers. They
176 * are split into two parts to allow a single kernel binary
177 * to be used for pSeries and iSeries.
178 * LOL. One day... - paulus
182 * We make as much of the exception code common between native
183 * exception handlers (including pSeries LPAR) and iSeries LPAR
184 * implementations as possible.
188 * This is the start of the interrupt handlers for pSeries
189 * This code runs with relocation off.
203 #define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRN_SPRG1; \
210 std r9,area+EX_R13(r13); \
212 clrrdi r12,r13,32; /* get high part of &label */ \
214 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SPRN_SRR0,r12; \
218 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
219 mtspr SPRN_SRR1,r10; \
221 b . /* prevent speculative execution */
224 * This is the start of the interrupt handlers for iSeries
225 * This code runs with relocation on.
227 #define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRN_SPRG1; \
234 std r9,area+EX_R13(r13); \
237 #define EXCEPTION_PROLOG_ISERIES_2 \
239 ld r11,PACALPPACA+LPPACASRR0(r13); \
240 ld r12,PACALPPACA+LPPACASRR1(r13); \
241 ori r10,r10,MSR_RI; \
245 * The common exception prolog is used for all except a few exceptions
246 * such as a segment miss on a kernel address. We have to be prepared
247 * to take another exception from the point where we first touch the
248 * kernel stack onwards.
250 * On entry r13 points to the paca, r9-r13 are saved in the paca,
251 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
252 * SRR1, and relocation is on.
254 #define EXCEPTION_PROLOG_COMMON(n, area) \
255 andi. r10,r12,MSR_PR; /* See if coming from user */ \
256 mr r10,r1; /* Save r1 */ \
257 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
259 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
260 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
261 bge- cr1,bad_stack; /* abort if it is */ \
262 std r9,_CCR(r1); /* save CR in stackframe */ \
263 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
264 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
265 std r10,0(r1); /* make stack chain pointer */ \
266 std r0,GPR0(r1); /* save r0 in stackframe */ \
267 std r10,GPR1(r1); /* save r1 in stackframe */ \
268 std r2,GPR2(r1); /* save r2 in stackframe */ \
269 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
270 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
271 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
272 ld r10,area+EX_R10(r13); \
275 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
276 ld r10,area+EX_R12(r13); \
277 ld r11,area+EX_R13(r13); \
281 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
282 mflr r9; /* save LR in stackframe */ \
284 mfctr r10; /* save CTR in stackframe */ \
286 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
289 std r9,_TRAP(r1); /* set trap number */ \
291 ld r11,exception_marker@toc(r2); \
292 std r10,RESULT(r1); /* clear regs->result */ \
293 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
298 #define STD_EXCEPTION_PSERIES(n, label) \
300 .globl label##_pSeries; \
303 mtspr SPRN_SPRG1,r13; /* save r13 */ \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
307 #define STD_EXCEPTION_ISERIES(n, label, area) \
308 .globl label##_iSeries; \
311 mtspr SPRN_SPRG1,r13; /* save r13 */ \
313 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \
317 #define MASKABLE_EXCEPTION_ISERIES(n, label) \
318 .globl label##_iSeries; \
321 mtspr SPRN_SPRG1,r13; /* save r13 */ \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \
326 beq- label##_iSeries_masked; \
327 EXCEPTION_PROLOG_ISERIES_2; \
330 #ifdef DO_SOFT_DISABLE
331 #define DISABLE_INTS \
332 lbz r10,PACAPROCENABLED(r13); \
336 stb r11,PACAPROCENABLED(r13); \
337 ori r10,r10,MSR_EE; \
340 #define ENABLE_INTS \
341 lbz r10,PACAPROCENABLED(r13); \
344 ori r11,r11,MSR_EE; \
347 #else /* hard enable/disable interrupts */
350 #define ENABLE_INTS \
353 rlwimi r11,r12,0,MSR_EE; \
358 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \
360 .globl label##_common; \
362 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
365 addi r3,r1,STACK_FRAME_OVERHEAD; \
369 #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
371 .globl label##_common; \
373 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
375 addi r3,r1,STACK_FRAME_OVERHEAD; \
377 b .ret_from_except_lite
380 * Start of pSeries system interrupt routines
383 .globl __start_interrupts
386 STD_EXCEPTION_PSERIES(0x100, system_reset)
389 _machine_check_pSeries:
391 mtspr SPRN_SPRG1,r13 /* save r13 */
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
396 .globl data_access_pSeries
405 rlwimi r13,r12,16,0x20
408 beq .do_stab_bolted_pSeries
411 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
415 .globl data_access_slb_pSeries
416 data_access_slb_pSeries:
420 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
421 std r3,PACA_EXSLB+EX_R3(r13)
423 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
426 /* Keep that around for when we re-implement dynamic VSIDs */
428 bge slb_miss_user_pseries
429 #endif /* __DISABLED__ */
430 std r10,PACA_EXSLB+EX_R10(r13)
431 std r11,PACA_EXSLB+EX_R11(r13)
432 std r12,PACA_EXSLB+EX_R12(r13)
434 std r10,PACA_EXSLB+EX_R13(r13)
435 mfspr r12,SPRN_SRR1 /* and SRR1 */
436 b .slb_miss_realmode /* Rel. branch works in real mode */
438 STD_EXCEPTION_PSERIES(0x400, instruction_access)
441 .globl instruction_access_slb_pSeries
442 instruction_access_slb_pSeries:
446 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
449 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
452 /* Keep that around for when we re-implement dynamic VSIDs */
454 bge slb_miss_user_pseries
455 #endif /* __DISABLED__ */
456 std r10,PACA_EXSLB+EX_R10(r13)
457 std r11,PACA_EXSLB+EX_R11(r13)
458 std r12,PACA_EXSLB+EX_R12(r13)
460 std r10,PACA_EXSLB+EX_R13(r13)
461 mfspr r12,SPRN_SRR1 /* and SRR1 */
462 b .slb_miss_realmode /* Rel. branch works in real mode */
464 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
465 STD_EXCEPTION_PSERIES(0x600, alignment)
466 STD_EXCEPTION_PSERIES(0x700, program_check)
467 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
468 STD_EXCEPTION_PSERIES(0x900, decrementer)
469 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
470 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
473 .globl system_call_pSeries
482 oris r12,r12,system_call_common@h
483 ori r12,r12,system_call_common@l
485 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
489 b . /* prevent speculative execution */
491 STD_EXCEPTION_PSERIES(0xd00, single_step)
492 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
494 /* We need to deal with the Altivec unavailable exception
495 * here which is at 0xf20, thus in the middle of the
496 * prolog code of the PerformanceMonitor one. A little
497 * trickery is thus necessary
500 b performance_monitor_pSeries
502 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
504 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
505 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
509 /*** pSeries interrupt support ***/
511 /* moved from 0xf00 */
512 STD_EXCEPTION_PSERIES(., performance_monitor)
515 _GLOBAL(do_stab_bolted_pSeries)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
521 * We have some room here we use that to put
522 * the peries slb miss user trampoline code so it's reasonably
523 * away from slb_miss_user_common to avoid problems with rfid
525 * This is used for when the SLB miss handler has to go virtual,
526 * which doesn't happen for now anymore but will once we re-implement
527 * dynamic VSIDs for shared page tables
530 slb_miss_user_pseries:
531 std r10,PACA_EXGEN+EX_R10(r13)
532 std r11,PACA_EXGEN+EX_R11(r13)
533 std r12,PACA_EXGEN+EX_R12(r13)
535 ld r11,PACA_EXSLB+EX_R9(r13)
536 ld r12,PACA_EXSLB+EX_R3(r13)
537 std r10,PACA_EXGEN+EX_R13(r13)
538 std r11,PACA_EXGEN+EX_R9(r13)
539 std r12,PACA_EXGEN+EX_R3(r13)
542 mfspr r11,SRR0 /* save SRR0 */
543 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
544 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
546 mfspr r12,SRR1 /* and SRR1 */
549 b . /* prevent spec. execution */
550 #endif /* __DISABLED__ */
553 * Vectors for the FWNMI option. Share common code.
555 .globl system_reset_fwnmi
558 mtspr SPRN_SPRG1,r13 /* save r13 */
560 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
562 .globl machine_check_fwnmi
565 mtspr SPRN_SPRG1,r13 /* save r13 */
567 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
569 #ifdef CONFIG_PPC_ISERIES
570 /*** ISeries-LPAR interrupt handlers ***/
572 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
574 .globl data_access_iSeries
582 rlwimi r13,r12,16,0x20
585 beq .do_stab_bolted_iSeries
588 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
589 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
590 EXCEPTION_PROLOG_ISERIES_2
593 .do_stab_bolted_iSeries:
596 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
597 EXCEPTION_PROLOG_ISERIES_2
600 .globl data_access_slb_iSeries
601 data_access_slb_iSeries:
602 mtspr SPRN_SPRG1,r13 /* save r13 */
603 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
604 std r3,PACA_EXSLB+EX_R3(r13)
606 std r9,PACA_EXSLB+EX_R9(r13)
610 bge slb_miss_user_iseries
612 std r10,PACA_EXSLB+EX_R10(r13)
613 std r11,PACA_EXSLB+EX_R11(r13)
614 std r12,PACA_EXSLB+EX_R12(r13)
616 std r10,PACA_EXSLB+EX_R13(r13)
617 ld r12,PACALPPACA+LPPACASRR1(r13);
620 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
622 .globl instruction_access_slb_iSeries
623 instruction_access_slb_iSeries:
624 mtspr SPRN_SPRG1,r13 /* save r13 */
625 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
626 std r3,PACA_EXSLB+EX_R3(r13)
627 ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
628 std r9,PACA_EXSLB+EX_R9(r13)
632 bge .slb_miss_user_iseries
634 std r10,PACA_EXSLB+EX_R10(r13)
635 std r11,PACA_EXSLB+EX_R11(r13)
636 std r12,PACA_EXSLB+EX_R12(r13)
638 std r10,PACA_EXSLB+EX_R13(r13)
639 ld r12,PACALPPACA+LPPACASRR1(r13);
643 slb_miss_user_iseries:
644 std r10,PACA_EXGEN+EX_R10(r13)
645 std r11,PACA_EXGEN+EX_R11(r13)
646 std r12,PACA_EXGEN+EX_R12(r13)
648 ld r11,PACA_EXSLB+EX_R9(r13)
649 ld r12,PACA_EXSLB+EX_R3(r13)
650 std r10,PACA_EXGEN+EX_R13(r13)
651 std r11,PACA_EXGEN+EX_R9(r13)
652 std r12,PACA_EXGEN+EX_R3(r13)
653 EXCEPTION_PROLOG_ISERIES_2
654 b slb_miss_user_common
657 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
658 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
659 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
660 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
661 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
662 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
663 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
665 .globl system_call_iSeries
669 EXCEPTION_PROLOG_ISERIES_2
672 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
673 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
674 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
676 .globl system_reset_iSeries
677 system_reset_iSeries:
678 mfspr r13,SPRN_SPRG3 /* Get paca address */
681 mtmsrd r24 /* RI on */
682 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
683 cmpwi 0,r24,0 /* Are we processor 0? */
684 beq .__start_initialization_iSeries /* Start up the first processor */
686 li r5,CTRL_RUNLATCH /* Turn off the run light */
693 lbz r23,PACAPROCSTART(r13) /* Test if this processor
696 LOADADDR(r3,current_set)
697 sldi r28,r24,3 /* get current_set[cpu#] */
699 addi r1,r3,THREAD_SIZE
700 subi r1,r1,STACK_FRAME_OVERHEAD
703 beq iSeries_secondary_smp_loop /* Loop until told to go */
704 bne .__secondary_start /* Loop until told to go */
705 iSeries_secondary_smp_loop:
706 /* Let the Hypervisor know we are alive */
707 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
709 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
710 #else /* CONFIG_SMP */
711 /* Yield the processor. This is required for non-SMP kernels
712 which are running on multi-threaded machines. */
714 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
715 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
716 li r4,0 /* "yield timed" */
717 li r5,-1 /* "yield forever" */
718 #endif /* CONFIG_SMP */
719 li r0,-1 /* r0=-1 indicates a Hypervisor call */
720 sc /* Invoke the hypervisor via a system call */
721 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
722 b 1b /* If SMP not configured, secondaries
725 .globl decrementer_iSeries_masked
726 decrementer_iSeries_masked:
728 stb r11,PACALPPACA+LPPACADECRINT(r13)
729 lwz r12,PACADEFAULTDECR(r13)
733 .globl hardware_interrupt_iSeries_masked
734 hardware_interrupt_iSeries_masked:
735 mtcrf 0x80,r9 /* Restore regs */
736 ld r11,PACALPPACA+LPPACASRR0(r13)
737 ld r12,PACALPPACA+LPPACASRR1(r13)
740 ld r9,PACA_EXGEN+EX_R9(r13)
741 ld r10,PACA_EXGEN+EX_R10(r13)
742 ld r11,PACA_EXGEN+EX_R11(r13)
743 ld r12,PACA_EXGEN+EX_R12(r13)
744 ld r13,PACA_EXGEN+EX_R13(r13)
746 b . /* prevent speculative execution */
747 #endif /* CONFIG_PPC_ISERIES */
749 /*** Common interrupt handlers ***/
751 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
754 * Machine check is different because we use a different
755 * save area: PACA_EXMC instead of PACA_EXGEN.
758 .globl machine_check_common
759 machine_check_common:
760 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
763 addi r3,r1,STACK_FRAME_OVERHEAD
764 bl .machine_check_exception
767 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
768 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
769 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
770 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
771 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
772 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
773 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
774 #ifdef CONFIG_ALTIVEC
775 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
777 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
781 * Here we have detected that the kernel stack pointer is bad.
782 * R9 contains the saved CR, r13 points to the paca,
783 * r10 contains the (bad) kernel stack pointer,
784 * r11 and r12 contain the saved SRR0 and SRR1.
785 * We switch to using an emergency stack, save the registers there,
786 * and call kernel_bad_stack(), which panics.
789 ld r1,PACAEMERGSP(r13)
790 subi r1,r1,64+INT_FRAME_SIZE
811 addi r11,r1,INT_FRAME_SIZE
816 1: addi r3,r1,STACK_FRAME_OVERHEAD
821 * Return from an exception with minimal checks.
822 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
823 * If interrupts have been enabled, or anything has been
824 * done that might have changed the scheduling status of
825 * any task or sent any task a signal, you should use
826 * ret_from_except or ret_from_except_lite instead of this.
828 .globl fast_exception_return
829 fast_exception_return:
832 andi. r3,r12,MSR_RI /* check if RI is set */
846 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
854 b . /* prevent speculative execution */
858 1: addi r3,r1,STACK_FRAME_OVERHEAD
859 bl .unrecoverable_exception
863 * Here r13 points to the paca, r9 contains the saved CR,
864 * SRR0 and SRR1 are saved in r11 and r12,
865 * r9 - r13 are saved in paca->exgen.
868 .globl data_access_common
870 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
872 std r10,PACA_EXGEN+EX_DAR(r13)
874 stw r10,PACA_EXGEN+EX_DSISR(r13)
875 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
876 ld r3,PACA_EXGEN+EX_DAR(r13)
877 lwz r4,PACA_EXGEN+EX_DSISR(r13)
879 b .do_hash_page /* Try to handle as hpte fault */
882 .globl instruction_access_common
883 instruction_access_common:
884 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
888 b .do_hash_page /* Try to handle as hpte fault */
891 * Here is the common SLB miss user that is used when going to virtual
892 * mode for SLB misses, that is currently not used
896 .globl slb_miss_user_common
897 slb_miss_user_common:
899 std r3,PACA_EXGEN+EX_DAR(r13)
900 stw r9,PACA_EXGEN+EX_CCR(r13)
901 std r10,PACA_EXGEN+EX_LR(r13)
902 std r11,PACA_EXGEN+EX_SRR0(r13)
903 bl .slb_allocate_user
905 ld r10,PACA_EXGEN+EX_LR(r13)
906 ld r3,PACA_EXGEN+EX_R3(r13)
907 lwz r9,PACA_EXGEN+EX_CCR(r13)
908 ld r11,PACA_EXGEN+EX_SRR0(r13)
912 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
913 beq- unrecov_user_slb
921 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
927 ld r9,PACA_EXGEN+EX_R9(r13)
928 ld r10,PACA_EXGEN+EX_R10(r13)
929 ld r11,PACA_EXGEN+EX_R11(r13)
930 ld r12,PACA_EXGEN+EX_R12(r13)
931 ld r13,PACA_EXGEN+EX_R13(r13)
936 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
937 ld r4,PACA_EXGEN+EX_DAR(r13)
944 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
947 1: addi r3,r1,STACK_FRAME_OVERHEAD
948 bl .unrecoverable_exception
951 #endif /* __DISABLED__ */
955 * r13 points to the PACA, r9 contains the saved CR,
956 * r12 contain the saved SRR1, SRR0 is still ready for return
957 * r3 has the faulting address
958 * r9 - r13 are saved in paca->exslb.
959 * r3 is saved in paca->slb_r3
960 * We assume we aren't going to take any exceptions during this procedure.
962 _GLOBAL(slb_miss_realmode)
965 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
966 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
968 bl .slb_allocate_realmode
970 /* All done -- return from exception. */
972 ld r10,PACA_EXSLB+EX_LR(r13)
973 ld r3,PACA_EXSLB+EX_R3(r13)
974 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
975 #ifdef CONFIG_PPC_ISERIES
976 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
977 #endif /* CONFIG_PPC_ISERIES */
981 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
987 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
990 #ifdef CONFIG_PPC_ISERIES
993 #endif /* CONFIG_PPC_ISERIES */
994 ld r9,PACA_EXSLB+EX_R9(r13)
995 ld r10,PACA_EXSLB+EX_R10(r13)
996 ld r11,PACA_EXSLB+EX_R11(r13)
997 ld r12,PACA_EXSLB+EX_R12(r13)
998 ld r13,PACA_EXSLB+EX_R13(r13)
1000 b . /* prevent speculative execution */
1003 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1006 1: addi r3,r1,STACK_FRAME_OVERHEAD
1007 bl .unrecoverable_exception
1011 .globl hardware_interrupt_common
1012 .globl hardware_interrupt_entry
1013 hardware_interrupt_common:
1014 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1015 hardware_interrupt_entry:
1017 addi r3,r1,STACK_FRAME_OVERHEAD
1019 b .ret_from_except_lite
1022 .globl alignment_common
1025 std r10,PACA_EXGEN+EX_DAR(r13)
1026 mfspr r10,SPRN_DSISR
1027 stw r10,PACA_EXGEN+EX_DSISR(r13)
1028 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1029 ld r3,PACA_EXGEN+EX_DAR(r13)
1030 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1034 addi r3,r1,STACK_FRAME_OVERHEAD
1036 bl .alignment_exception
1040 .globl program_check_common
1041 program_check_common:
1042 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1044 addi r3,r1,STACK_FRAME_OVERHEAD
1046 bl .program_check_exception
1050 .globl fp_unavailable_common
1051 fp_unavailable_common:
1052 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1053 bne .load_up_fpu /* if from user, just load it up */
1055 addi r3,r1,STACK_FRAME_OVERHEAD
1057 bl .kernel_fp_unavailable_exception
1061 .globl altivec_unavailable_common
1062 altivec_unavailable_common:
1063 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1064 #ifdef CONFIG_ALTIVEC
1066 bne .load_up_altivec /* if from user, just load it up */
1067 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1070 addi r3,r1,STACK_FRAME_OVERHEAD
1072 bl .altivec_unavailable_exception
1075 #ifdef CONFIG_ALTIVEC
1077 * load_up_altivec(unused, unused, tsk)
1078 * Disable VMX for the task which had it previously,
1079 * and save its vector registers in its thread_struct.
1080 * Enables the VMX for use in the kernel on return.
1081 * On SMP we know the VMX is free, since we give it up every
1082 * switch (ie, no lazy save of the vector registers).
1083 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1085 _STATIC(load_up_altivec)
1086 mfmsr r5 /* grab the current MSR */
1087 oris r5,r5,MSR_VEC@h
1088 mtmsrd r5 /* enable use of VMX now */
1092 * For SMP, we don't do lazy VMX switching because it just gets too
1093 * horrendously complex, especially when a task switches from one CPU
1094 * to another. Instead we call giveup_altvec in switch_to.
1095 * VRSAVE isn't dealt with here, that is done in the normal context
1096 * switch code. Note that we could rely on vrsave value to eventually
1097 * avoid saving all of the VREGs here...
1100 ld r3,last_task_used_altivec@got(r2)
1104 /* Save VMX state to last_task_used_altivec's THREAD struct */
1110 /* Disable VMX for last_task_used_altivec */
1112 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1115 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1117 #endif /* CONFIG_SMP */
1118 /* Hack: if we get an altivec unavailable trap with VRSAVE
1119 * set to all zeros, we assume this is a broken application
1120 * that fails to set it properly, and thus we switch it to
1123 mfspr r4,SPRN_VRSAVE
1127 mtspr SPRN_VRSAVE,r4
1129 /* enable use of VMX after return */
1130 ld r4,PACACURRENT(r13)
1131 addi r5,r4,THREAD /* Get THREAD */
1132 oris r12,r12,MSR_VEC@h
1136 stw r4,THREAD_USED_VR(r5)
1141 /* Update last_task_used_math to 'current' */
1142 subi r4,r5,THREAD /* Back to 'current' */
1144 #endif /* CONFIG_SMP */
1145 /* restore registers and return */
1146 b fast_exception_return
1147 #endif /* CONFIG_ALTIVEC */
1153 _GLOBAL(do_hash_page)
1157 andis. r0,r4,0xa450 /* weird error? */
1158 bne- .handle_page_fault /* if not, try to insert a HPTE */
1160 andis. r0,r4,0x0020 /* Is it a segment table fault? */
1161 bne- .do_ste_alloc /* If so handle it */
1162 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1165 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1166 * accessing a userspace segment (even from the kernel). We assume
1167 * kernel addresses always have the high bit set.
1169 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1170 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
1171 orc r0,r12,r0 /* MSR_PR | ~high_bit */
1172 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
1173 ori r4,r4,1 /* add _PAGE_PRESENT */
1174 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
1177 * On iSeries, we soft-disable interrupts here, then
1178 * hard-enable interrupts so that the hash_page code can spin on
1179 * the hash_table_lock without problems on a shared processor.
1184 * r3 contains the faulting address
1185 * r4 contains the required access permissions
1186 * r5 contains the trap number
1188 * at return r3 = 0 for success
1190 bl .hash_page /* build HPTE if possible */
1191 cmpdi r3,0 /* see if hash_page succeeded */
1193 #ifdef DO_SOFT_DISABLE
1195 * If we had interrupts soft-enabled at the point where the
1196 * DSI/ISI occurred, and an interrupt came in during hash_page,
1198 * We jump to ret_from_except_lite rather than fast_exception_return
1199 * because ret_from_except_lite will check for and handle pending
1200 * interrupts if necessary.
1202 beq .ret_from_except_lite
1203 /* For a hash failure, we don't bother re-enabling interrupts */
1207 * hash_page couldn't handle it, set soft interrupt enable back
1208 * to what it was before the trap. Note that .local_irq_restore
1209 * handles any interrupts pending at this point.
1212 bl .local_irq_restore
1215 beq fast_exception_return /* Return from exception on success */
1216 ble- 12f /* Failure return from hash_page */
1221 /* Here we have a page fault that hash_page can't handle. */
1222 _GLOBAL(handle_page_fault)
1226 addi r3,r1,STACK_FRAME_OVERHEAD
1229 beq+ .ret_from_except_lite
1232 addi r3,r1,STACK_FRAME_OVERHEAD
1237 /* We have a page fault that hash_page could handle but HV refused
1241 addi r3,r1,STACK_FRAME_OVERHEAD
1246 /* here we have a segment miss */
1247 _GLOBAL(do_ste_alloc)
1248 bl .ste_allocate /* try to insert stab entry */
1250 beq+ fast_exception_return
1251 b .handle_page_fault
1254 * r13 points to the PACA, r9 contains the saved CR,
1255 * r11 and r12 contain the saved SRR0 and SRR1.
1256 * r9 - r13 are saved in paca->exslb.
1257 * We assume we aren't going to take any exceptions during this procedure.
1258 * We assume (DAR >> 60) == 0xc.
1261 _GLOBAL(do_stab_bolted)
1262 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1263 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1265 /* Hash to the primary group */
1266 ld r10,PACASTABVIRT(r13)
1269 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1271 /* Calculate VSID */
1272 /* This is a kernel address, so protovsid = ESID */
1273 ASM_VSID_SCRAMBLE(r11, r9)
1274 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1276 /* Search the primary group for a free entry */
1277 1: ld r11,0(r10) /* Test valid bit of the current ste */
1284 /* Stick for only searching the primary group for now. */
1285 /* At least for now, we use a very simple random castout scheme */
1286 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1288 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1291 /* r10 currently points to an ste one past the group of interest */
1292 /* make it point to the randomly selected entry */
1294 or r10,r10,r11 /* r10 is the entry to invalidate */
1296 isync /* mark the entry invalid */
1298 rldicl r11,r11,56,1 /* clear the valid bit */
1303 clrrdi r11,r11,28 /* Get the esid part of the ste */
1306 2: std r9,8(r10) /* Store the vsid part of the ste */
1309 mfspr r11,SPRN_DAR /* Get the new esid */
1310 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1311 ori r11,r11,0x90 /* Turn on valid and kp */
1312 std r11,0(r10) /* Put new entry back into the stab */
1316 /* All done -- return from exception. */
1317 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1318 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1320 andi. r10,r12,MSR_RI
1323 mtcrf 0x80,r9 /* restore CR */
1331 ld r9,PACA_EXSLB+EX_R9(r13)
1332 ld r10,PACA_EXSLB+EX_R10(r13)
1333 ld r11,PACA_EXSLB+EX_R11(r13)
1334 ld r12,PACA_EXSLB+EX_R12(r13)
1335 ld r13,PACA_EXSLB+EX_R13(r13)
1337 b . /* prevent speculative execution */
1340 * Space for CPU0's segment table.
1342 * On iSeries, the hypervisor must fill in at least one entry before
1343 * we get control (with relocate on). The address is give to the hv
1344 * as a page number (see xLparMap in lpardata.c), so this must be at a
1345 * fixed address (the linker can't compute (u64)&initial_stab >>
1348 . = STAB0_PHYS_ADDR /* 0x6000 */
1354 * Data area reserved for FWNMI option.
1355 * This address (0x7000) is fixed by the RPA.
1358 .globl fwnmi_data_area
1361 /* iSeries does not use the FWNMI stuff, so it is safe to put
1362 * this here, even if we later allow kernels that will boot on
1363 * both pSeries and iSeries */
1364 #ifdef CONFIG_PPC_ISERIES
1366 #include "lparmap.s"
1368 * This ".text" is here for old compilers that generate a trailing
1369 * .note section when compiling .c files to .s
1372 #endif /* CONFIG_PPC_ISERIES */
1377 * On pSeries, secondary processors spin in the following code.
1378 * At entry, r3 = this processor's number (physical cpu id)
1380 _GLOBAL(pSeries_secondary_smp_init)
1383 /* turn on 64-bit mode */
1387 /* Copy some CPU settings from CPU 0 */
1388 bl .__restore_cpu_setup
1390 /* Set up a paca value for this processor. Since we have the
1391 * physical cpu id in r24, we need to search the pacas to find
1392 * which logical id maps to our physical one.
1394 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1395 li r5,0 /* logical cpu id */
1396 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1397 cmpw r6,r24 /* Compare to our id */
1399 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1404 mr r3,r24 /* not found, copy phys to r3 */
1405 b .kexec_wait /* next kernel might do better */
1407 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1408 /* From now on, r24 is expected to be logical cpuid */
1411 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1415 /* Create a temp kernel stack for use before relocation is on. */
1416 ld r1,PACAEMERGSP(r13)
1417 subi r1,r1,STACK_FRAME_OVERHEAD
1421 bne .__secondary_start
1423 b 3b /* Loop until told to go */
1425 #ifdef CONFIG_PPC_ISERIES
1426 _STATIC(__start_initialization_iSeries)
1427 /* Clear out the BSS */
1428 LOADADDR(r11,__bss_stop)
1429 LOADADDR(r8,__bss_start)
1430 sub r11,r11,r8 /* bss size */
1431 addi r11,r11,7 /* round up to an even double word */
1432 rldicl. r11,r11,61,3 /* shift right by 3 */
1436 mtctr r11 /* zero this many doublewords */
1440 LOADADDR(r1,init_thread_union)
1441 addi r1,r1,THREAD_SIZE
1443 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1445 LOADADDR(r3,cpu_specs)
1446 LOADADDR(r4,cur_cpu_spec)
1450 LOADADDR(r2,__toc_start)
1454 bl .iSeries_early_setup
1457 /* relocation is on at this point */
1459 b .start_here_common
1460 #endif /* CONFIG_PPC_ISERIES */
1462 #ifdef CONFIG_PPC_MULTIPLATFORM
1466 andi. r0,r3,MSR_IR|MSR_DR
1473 b . /* prevent speculative execution */
1477 * Here is our main kernel entry point. We support currently 2 kind of entries
1478 * depending on the value of r5.
1480 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1483 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1484 * DT block, r4 is a physical pointer to the kernel itself
1487 _GLOBAL(__start_initialization_multiplatform)
1489 * Are we booted from a PROM Of-type client-interface ?
1492 bne .__boot_from_prom /* yes -> prom */
1494 /* Save parameters */
1498 /* Make sure we are running in 64 bits mode */
1501 /* Setup some critical 970 SPRs before switching MMU off */
1502 bl .__970_cpu_preinit
1507 /* Switch off MMU if not already */
1508 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1511 b .__after_prom_start
1513 _STATIC(__boot_from_prom)
1514 /* Save parameters */
1521 /* Make sure we are running in 64 bits mode */
1524 /* put a relocation offset into r3 */
1527 LOADADDR(r2,__toc_start)
1531 /* Relocate the TOC from a virt addr to a real addr */
1534 /* Restore parameters */
1541 /* Do all of the interaction with OF client interface */
1543 /* We never return */
1547 * At this point, r3 contains the physical address we are running at,
1548 * returned by prom_init()
1550 _STATIC(__after_prom_start)
1553 * We need to run with __start at physical address 0.
1554 * This will leave some code in the first 256B of
1555 * real memory, which are reserved for software use.
1556 * The remainder of the first page is loaded with the fixed
1557 * interrupt vectors. The next two pages are filled with
1558 * unknown exception placeholders.
1560 * Note: This process overwrites the OF exception vectors.
1561 * r26 == relocation offset
1566 SET_REG_TO_CONST(r27,KERNELBASE)
1568 li r3,0 /* target addr */
1570 // XXX FIXME: Use phys returned by OF (r30)
1571 add r4,r27,r26 /* source addr */
1572 /* current address of _start */
1573 /* i.e. where we are running */
1574 /* the source addr */
1576 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1579 li r6,0x100 /* Start offset, the first 0x100 */
1580 /* bytes were copied earlier. */
1582 bl .copy_and_flush /* copy the first n bytes */
1583 /* this includes the code being */
1584 /* executed here. */
1586 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1587 mtctr r0 /* that we just made/relocated */
1590 4: LOADADDR(r5,klimit)
1592 ld r5,0(r5) /* get the value of klimit */
1594 bl .copy_and_flush /* copy the rest */
1595 b .start_here_multiplatform
1597 #endif /* CONFIG_PPC_MULTIPLATFORM */
1600 * Copy routine used to copy the kernel to start at physical address 0
1601 * and flush and invalidate the caches as needed.
1602 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1603 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1605 * Note: this routine *only* clobbers r0, r6 and lr
1607 _GLOBAL(copy_and_flush)
1610 4: li r0,16 /* Use the least common */
1611 /* denominator cache line */
1612 /* size. This results in */
1613 /* extra cache line flushes */
1614 /* but operation is correct. */
1615 /* Can't get cache line size */
1616 /* from NACA as it is being */
1619 mtctr r0 /* put # words/line in ctr */
1620 3: addi r6,r6,8 /* copy a cache line */
1624 dcbst r6,r3 /* write it to memory */
1626 icbi r6,r3 /* flush the icache line */
1638 #ifdef CONFIG_PPC_PMAC
1640 * On PowerMac, secondary processors starts from the reset vector, which
1641 * is temporarily turned into a call to one of the functions below.
1646 .globl __secondary_start_pmac_0
1647 __secondary_start_pmac_0:
1648 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1658 _GLOBAL(pmac_secondary_start)
1659 /* turn on 64-bit mode */
1663 /* Copy some CPU settings from CPU 0 */
1664 bl .__restore_cpu_setup
1666 /* pSeries do that early though I don't think we really need it */
1669 mtmsrd r3 /* RI on */
1671 /* Set up a paca value for this processor. */
1672 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1673 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1674 add r13,r13,r4 /* for this processor. */
1675 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1677 /* Create a temp kernel stack for use before relocation is on. */
1678 ld r1,PACAEMERGSP(r13)
1679 subi r1,r1,STACK_FRAME_OVERHEAD
1681 b .__secondary_start
1683 #endif /* CONFIG_PPC_PMAC */
1686 * This function is called after the master CPU has released the
1687 * secondary processors. The execution environment is relocation off.
1688 * The paca for this processor has the following fields initialized at
1690 * 1. Processor number
1691 * 2. Segment table pointer (virtual address)
1692 * On entry the following are set:
1693 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1694 * r24 = cpu# (in Linux terms)
1695 * r13 = paca virtual address
1696 * SPRG3 = paca virtual address
1698 _GLOBAL(__secondary_start)
1699 /* Set thread priority to MEDIUM */
1705 /* Do early setup for that CPU (stab, slb, hash table pointer) */
1706 bl .early_setup_secondary
1708 /* Initialize the kernel stack. Just a repeat for iSeries. */
1709 LOADADDR(r3,current_set)
1710 sldi r28,r24,3 /* get current_set[cpu#] */
1712 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1713 std r1,PACAKSAVE(r13)
1715 /* Clear backchain so we get nice backtraces */
1719 /* enable MMU and jump to start_secondary */
1720 LOADADDR(r3,.start_secondary_prolog)
1721 SET_REG_TO_CONST(r4, MSR_KERNEL)
1722 #ifdef DO_SOFT_DISABLE
1728 b . /* prevent speculative execution */
1731 * Running with relocation on at this point. All we want to do is
1732 * zero the stack back-chain pointer before going into C code.
1734 _GLOBAL(start_secondary_prolog)
1736 std r3,0(r1) /* Zero the stack frame pointer */
1742 * This subroutine clobbers r11 and r12
1744 _GLOBAL(enable_64b_mode)
1745 mfmsr r11 /* grab the current MSR */
1747 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1750 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1756 #ifdef CONFIG_PPC_MULTIPLATFORM
1758 * This is where the main kernel code starts.
1760 _STATIC(start_here_multiplatform)
1761 /* get a new offset, now that the kernel has moved. */
1765 /* Clear out the BSS. It may have been done in prom_init,
1766 * already but that's irrelevant since prom_init will soon
1767 * be detached from the kernel completely. Besides, we need
1768 * to clear it now for kexec-style entry.
1770 LOADADDR(r11,__bss_stop)
1771 LOADADDR(r8,__bss_start)
1772 sub r11,r11,r8 /* bss size */
1773 addi r11,r11,7 /* round up to an even double word */
1774 rldicl. r11,r11,61,3 /* shift right by 3 */
1778 mtctr r11 /* zero this many doublewords */
1785 mtmsrd r6 /* RI on */
1788 /* Start up the second thread on cpu 0 */
1791 cmpwi r3,0x34 /* Pulsar */
1793 cmpwi r3,0x36 /* Icestar */
1795 cmpwi r3,0x37 /* SStar */
1797 b 91f /* HMT not supported */
1799 bl .hmt_start_secondary
1803 /* The following gets the stack and TOC set up with the regs */
1804 /* pointing to the real addr of the kernel stack. This is */
1805 /* all done to support the C function call below which sets */
1806 /* up the htab. This is done because we have relocated the */
1807 /* kernel but are still running in real mode. */
1809 LOADADDR(r3,init_thread_union)
1812 /* set up a stack pointer (physical address) */
1813 addi r1,r3,THREAD_SIZE
1815 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1817 /* set up the TOC (physical address) */
1818 LOADADDR(r2,__toc_start)
1823 LOADADDR(r3,cpu_specs)
1825 LOADADDR(r4,cur_cpu_spec)
1830 /* Save some low level config HIDs of CPU0 to be copied to
1831 * other CPUs later on, or used for suspend/resume
1833 bl .__save_cpu_setup
1836 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1837 * note that boot_cpuid can always be 0 nowadays since there is
1838 * nowhere it can be initialized differently before we reach this
1841 LOADADDR(r27, boot_cpuid)
1845 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1846 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1847 add r13,r13,r24 /* for this processor. */
1848 add r13,r13,r26 /* convert to physical addr */
1849 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1851 /* Do very early kernel initializations, including initial hash table,
1852 * stab and slb setup before we turn on relocation. */
1854 /* Restore parameters passed from prom_init/kexec */
1858 LOADADDR(r3,.start_here_common)
1859 SET_REG_TO_CONST(r4, MSR_KERNEL)
1863 b . /* prevent speculative execution */
1864 #endif /* CONFIG_PPC_MULTIPLATFORM */
1866 /* This is where all platforms converge execution */
1867 _STATIC(start_here_common)
1868 /* relocation is on at this point */
1870 /* The following code sets up the SP and TOC now that we are */
1871 /* running with translation enabled. */
1873 LOADADDR(r3,init_thread_union)
1875 /* set up the stack */
1876 addi r1,r3,THREAD_SIZE
1878 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1880 /* Apply the CPUs-specific fixups (nop out sections not relevant
1884 bl .do_cpu_ftr_fixups
1886 LOADADDR(r26, boot_cpuid)
1889 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1890 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1891 add r13,r13,r24 /* for this processor. */
1892 mtspr SPRN_SPRG3,r13
1894 /* ptr to current */
1895 LOADADDR(r4,init_task)
1896 std r4,PACACURRENT(r13)
1900 std r1,PACAKSAVE(r13)
1904 /* Load up the kernel context */
1906 #ifdef DO_SOFT_DISABLE
1908 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1910 ori r5,r5,MSR_EE /* Hard Enabled */
1918 LOADADDR(r5, hmt_thread_data)
1921 cmpwi r7,0x34 /* Pulsar */
1923 cmpwi r7,0x36 /* Icestar */
1925 cmpwi r7,0x37 /* SStar */
1928 90: mfspr r6,SPRN_PIR
1931 91: mfspr r6,SPRN_PIR
1935 bl .hmt_start_secondary
1938 __hmt_secondary_hold:
1939 LOADADDR(r5, hmt_thread_data)
1949 93: andi. r6,r6,0x3f
1963 b .pSeries_secondary_smp_init
1966 _GLOBAL(hmt_start_secondary)
1967 LOADADDR(r4,__hmt_secondary_hold)
1969 mtspr SPRN_NIADORM, r4
1970 mfspr r4, SPRN_MSRDORM
1973 mtspr SPRN_MSRDORM, r4
1982 mfspr r4, SPRN_CTRLF
1984 mtspr SPRN_CTRLT, r4
1989 * We put a few things here that have to be page-aligned.
1990 * This stuff goes at the beginning of the bss, which is page-aligned.
1996 .globl empty_zero_page
2000 .globl swapper_pg_dir
2005 * This space gets a copy of optional info passed to us by the bootstrap
2006 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
2010 .space COMMAND_LINE_SIZE