2 * linux/arch/arm/kernel/head.S
4 * Copyright (C) 1994-2002 Russell King
5 * Copyright (c) 2003 ARM Limited
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Kernel startup code for all 32-bit CPUs
14 #include <linux/config.h>
15 #include <linux/linkage.h>
16 #include <linux/init.h>
18 #include <asm/assembler.h>
19 #include <asm/domain.h>
20 #include <asm/mach-types.h>
21 #include <asm/procinfo.h>
22 #include <asm/ptrace.h>
23 #include <asm/constants.h>
24 #include <asm/thread_info.h>
25 #include <asm/system.h>
27 #define PROCINFO_MMUFLAGS 8
28 #define PROCINFO_INITFUNC 12
30 #define MACHINFO_TYPE 0
31 #define MACHINFO_PHYSRAM 4
32 #define MACHINFO_PHYSIO 8
33 #define MACHINFO_PGOFFIO 12
34 #define MACHINFO_NAME 16
36 #ifndef CONFIG_XIP_KERNEL
38 * We place the page tables 16K below TEXTADDR. Therefore, we must make sure
39 * that TEXTADDR is correctly set. Currently, we expect the least significant
40 * 16 bits to be 0x8000, but we could probably relax this restriction to
41 * TEXTADDR >= PAGE_OFFSET + 0x4000
43 * Note that swapper_pg_dir is the virtual address of the page tables, and
44 * pgtbl gives us a position-independent reference to these tables. We can
45 * do this because stext == TEXTADDR
47 #if (TEXTADDR & 0xffff) != 0x8000
48 #error TEXTADDR must start at 0xXXXX8000
52 .equ swapper_pg_dir, TEXTADDR - 0x4000
54 .macro pgtbl, rd, phys
62 * We place the page tables 16K below DATAADDR. Therefore, we must make sure
63 * that DATAADDR is correctly set. Currently, we expect the least significant
64 * 16 bits to be 0x8000, but we could probably relax this restriction to
65 * DATAADDR >= PAGE_OFFSET + 0x4000
67 * Note that pgtbl is meant to return the physical address of swapper_pg_dir.
68 * We can't make it relative to the kernel position in this case since
69 * the kernel can physically be anywhere.
71 #if (DATAADDR & 0xffff) != 0x8000
72 #error DATAADDR must start at 0xXXXX8000
76 .equ swapper_pg_dir, DATAADDR - 0x4000
78 .macro pgtbl, rd, phys
79 ldr \rd, =((DATAADDR - 0x4000) - VIRT_OFFSET)
85 * Kernel startup entry point.
86 * ---------------------------
88 * This is normally called from the decompressor code. The requirements
89 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
92 * This code is mostly position independent, so if you link the kernel at
93 * 0xc0008000, you call this at __pa(0xc0008000).
95 * See linux/arch/arm/tools/mach-types for the complete list of machine
98 * We're trying to keep crap to a minimum; DO NOT add any machine specific
99 * crap here - that's what the boot loader (or in extreme, well justified
100 * circumstances, zImage) is for.
103 .type stext, %function
105 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
107 bl __lookup_processor_type @ r5=procinfo r9=cpuid
108 movs r10, r5 @ invalid processor (r5=0)?
109 beq __error_p @ yes, error 'p'
110 bl __lookup_machine_type @ r5=machinfo
111 movs r8, r5 @ invalid machine (r5=0)?
112 beq __error_a @ yes, error 'a'
113 bl __create_page_tables
116 * The following calls CPU specific code in a position independent
117 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
118 * xxx_proc_info structure selected by __lookup_machine_type
119 * above. On return, the CPU will be ready for the MMU to be
120 * turned on, and r0 will hold the CPU control register value.
122 ldr r13, __switch_data @ address to jump to after
123 @ mmu has been enabled
124 adr lr, __enable_mmu @ return (PIC) address
125 add pc, r10, #PROCINFO_INITFUNC
127 .type __switch_data, %object
129 .long __mmap_switched
130 .long __data_loc @ r4
131 .long __data_start @ r5
132 .long __bss_start @ r6
134 .long processor_id @ r4
135 .long __machine_arch_type @ r5
136 .long cr_alignment @ r6
137 .long init_thread_union + THREAD_START_SP @ sp
140 * The following fragment of code is executed with the MMU on, and uses
141 * absolute addresses; this is not position independent.
143 * r0 = cp#15 control register
147 .type __mmap_switched, %function
149 adr r3, __switch_data + 4
151 ldmia r3!, {r4, r5, r6, r7}
152 cmp r4, r5 @ Copy data segment if needed
158 mov fp, #0 @ Clear BSS (and zero fp)
163 ldmia r3, {r4, r5, r6, sp}
164 str r9, [r4] @ Save processor ID
165 str r1, [r5] @ Save machine type
166 bic r4, r0, #CR_A @ Clear 'A' bit
167 stmia r6, {r0, r4} @ Save control register values
170 #if defined(CONFIG_SMP)
171 .type secondary_startup, #function
172 ENTRY(secondary_startup)
174 * Common entry point for secondary CPUs.
176 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
177 * the processor type - there is no need to check the machine type
178 * as it has already been validated by the primary processor.
180 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC
181 bl __lookup_processor_type
182 movs r10, r5 @ invalid processor?
183 moveq r0, #'p' @ yes, error 'p'
187 * Use the page tables supplied from __cpu_up.
189 adr r4, __secondary_data
190 ldmia r4, {r5, r6, r13} @ address to jump to after
191 sub r4, r4, r5 @ mmu has been enabled
192 ldr r4, [r6, r4] @ get secondary_data.pgdir
193 adr lr, __enable_mmu @ return address
194 add pc, r10, #12 @ initialise processor
195 @ (return control reg)
198 * r6 = &secondary_data
200 ENTRY(__secondary_switched)
201 ldr sp, [r6, #4] @ get secondary_data.stack
203 b secondary_start_kernel
205 .type __secondary_data, %object
209 .long __secondary_switched
210 #endif /* defined(CONFIG_SMP) */
215 * Setup common bits before finally enabling the MMU. Essentially
216 * this is just loading the page table pointer and domain access
219 .type __enable_mmu, %function
221 #ifdef CONFIG_ALIGNMENT_TRAP
226 #ifdef CONFIG_CPU_DCACHE_DISABLE
229 #ifdef CONFIG_CPU_BPREDICT_DISABLE
232 #ifdef CONFIG_CPU_ICACHE_DISABLE
235 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
236 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
237 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
238 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
239 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
240 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
244 * Enable the MMU. This completely changes the structure of the visible
245 * memory space. You will not be able to trace execution through this.
246 * If you have an enquiry about this, *please* check the linux-arm-kernel
247 * mailing list archives BEFORE sending another post to the list.
249 * r0 = cp#15 control register
250 * r13 = *virtual* address to jump to upon completion
252 * other registers depend on the function called upon completion
255 .type __turn_mmu_on, %function
258 mcr p15, 0, r0, c1, c0, 0 @ write control reg
259 mrc p15, 0, r3, c0, c0, 0 @ read id reg
267 * Setup the initial page tables. We only setup the barest
268 * amount which are required to get the kernel running, which
269 * generally means mapping in the kernel code.
276 * r0, r3, r5, r6, r7 corrupted
277 * r4 = physical page table address
279 .type __create_page_tables, %function
280 __create_page_tables:
281 ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram
282 pgtbl r4, r5 @ page table address
285 * Clear the 16K level 1 swapper page table
297 ldr r7, [r10, #PROCINFO_MMUFLAGS] @ mmuflags
300 * Create identity mapping for first MB of kernel to
301 * cater for the MMU enable. This identity mapping
302 * will be removed by paging_init(). We use our current program
303 * counter to determine corresponding section base address.
305 mov r6, pc, lsr #20 @ start of kernel section
306 orr r3, r7, r6, lsl #20 @ flags + kernel base
307 str r3, [r4, r6, lsl #2] @ identity mapping
310 * Now setup the pagetables for our kernel direct
311 * mapped region. We round TEXTADDR down to the
312 * nearest megabyte boundary. It is assumed that
313 * the kernel fits within 4 contigous 1MB sections.
315 add r0, r4, #(TEXTADDR & 0xff000000) >> 18 @ start of kernel
316 str r3, [r0, #(TEXTADDR & 0x00f00000) >> 18]!
318 str r3, [r0, #4]! @ KERNEL + 1MB
320 str r3, [r0, #4]! @ KERNEL + 2MB
322 str r3, [r0, #4] @ KERNEL + 3MB
325 * Then map first 1MB of ram in case it contains our boot params.
327 add r0, r4, #VIRT_OFFSET >> 18
331 #ifdef CONFIG_XIP_KERNEL
333 * Map some ram to cover our .data and .bss areas.
334 * Mapping 3MB should be plenty.
338 add r0, r0, r3, lsl #2
339 add r6, r6, r3, lsl #20
341 add r6, r6, #(1 << 20)
343 add r6, r6, #(1 << 20)
347 #ifdef CONFIG_DEBUG_LL
348 bic r7, r7, #0x0c @ turn off cacheable
349 @ and bufferable bits
351 * Map in IO space for serial debugging.
352 * This allows debug messages to be output
353 * via a serial console before paging_init.
355 ldr r3, [r8, #MACHINFO_PGOFFIO]
357 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
358 cmp r3, #0x0800 @ limit to 512MB
361 ldr r3, [r8, #MACHINFO_PHYSIO]
367 #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
369 * If we're using the NetWinder, we need to map in
370 * the 16550-type serial port for the debug messages
372 teq r1, #MACH_TYPE_NETWINDER
373 teqne r1, #MACH_TYPE_CATS
375 add r0, r4, #0xff000000 >> 18
376 orr r3, r7, #0x7c000000
380 #ifdef CONFIG_ARCH_RPC
382 * Map in screen at 0x02000000 & SCREEN2_BASE
383 * Similar reasons here - for debug. This is
384 * only for Acorn RiscPC architectures.
386 add r0, r4, #0x02000000 >> 18
387 orr r3, r7, #0x02000000
389 add r0, r4, #0xd8000000 >> 18
399 * Exception handling. Something went wrong and we can't proceed. We
400 * ought to tell the user, but since we don't have any guarantee that
401 * we're even running on the right architecture, we do virtually nothing.
403 * If CONFIG_DEBUG_LL is set we try to print out something about the error
404 * and hope for the best (useful if bootloader fails to pass a proper
405 * machine ID for example).
408 .type __error_p, %function
410 #ifdef CONFIG_DEBUG_LL
414 str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
418 .type __error_a, %function
420 #ifdef CONFIG_DEBUG_LL
421 mov r4, r1 @ preserve machine ID
429 ldmia r3, {r4, r5, r6} @ get machine desc list
430 sub r4, r3, r4 @ get offset between virt&phys
431 add r5, r5, r4 @ convert virt addresses to
432 add r6, r6, r4 @ physical address space
433 1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
437 ldr r0, [r5, #MACHINFO_NAME] @ get machine name
442 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
448 str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
449 str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
450 str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
454 .type __error, %function
456 #ifdef CONFIG_ARCH_RPC
458 * Turn the screen red on a error - RiscPC only.
462 orr r3, r3, r3, lsl #8
463 orr r3, r3, r3, lsl #16
474 * Read processor ID register (CP#15, CR0), and look up in the linker-built
475 * supported processor list. Note that we can't use the absolute addresses
476 * for the __proc_info lists since we aren't running with the MMU on
477 * (and therefore, we are not in the correct address space). We have to
478 * calculate the offset.
481 * r3, r4, r6 corrupted
482 * r5 = proc_info pointer in physical address space
485 .type __lookup_processor_type, %function
486 __lookup_processor_type:
488 ldmda r3, {r5, r6, r9}
489 sub r3, r3, r9 @ get offset between virt&phys
490 add r5, r5, r3 @ convert virt addresses to
491 add r6, r6, r3 @ physical address space
492 mrc p15, 0, r9, c0, c0 @ get processor id
493 1: ldmia r5, {r3, r4} @ value, mask
494 and r4, r4, r9 @ mask wanted bits
497 add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
500 mov r5, #0 @ unknown processor
504 * This provides a C-API version of the above function.
506 ENTRY(lookup_processor_type)
507 stmfd sp!, {r4 - r6, r9, lr}
508 bl __lookup_processor_type
510 ldmfd sp!, {r4 - r6, r9, pc}
513 * Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
514 * more information about the __proc_info and __arch_info structures.
516 .long __proc_info_begin
517 .long __proc_info_end
519 .long __arch_info_begin
520 .long __arch_info_end
523 * Lookup machine architecture in the linker-build list of architectures.
524 * Note that we can't use the absolute addresses for the __arch_info
525 * lists since we aren't running with the MMU on (and therefore, we are
526 * not in the correct address space). We have to calculate the offset.
528 * r1 = machine architecture number
530 * r3, r4, r6 corrupted
531 * r5 = mach_info pointer in physical address space
533 .type __lookup_machine_type, %function
534 __lookup_machine_type:
536 ldmia r3, {r4, r5, r6}
537 sub r3, r3, r4 @ get offset between virt&phys
538 add r5, r5, r3 @ convert virt addresses to
539 add r6, r6, r3 @ physical address space
540 1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
541 teq r3, r1 @ matches loader number?
543 add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
546 mov r5, #0 @ unknown machine
550 * This provides a C-API version of the above function.
552 ENTRY(lookup_machine_type)
553 stmfd sp!, {r4 - r6, lr}
555 bl __lookup_machine_type
557 ldmfd sp!, {r4 - r6, pc}