2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/kernel/head.S
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 * Copyright (C) 2003, 2004 Paul Mundt
12 * benedict.gaster@superh.com: 2nd May 2002
13 * Moved definition of empty_zero_page to its own section allowing
14 * it to be placed at an absolute address known at load time.
16 * lethal@linux-sh.org: 9th May 2003
17 * Kill off GLOBAL_NAME() usage.
19 * lethal@linux-sh.org: 8th May 2004
20 * Add early SCIF console DTLB mapping.
23 #include <linux/config.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cache.h>
29 #include <asm/processor.h>
30 #include <asm/registers.h>
31 #include <asm/thread_info.h>
34 * MMU defines: TLB boundaries.
37 #define MMUIR_FIRST ITLB_FIXED
38 #define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
39 #define MMUIR_STEP TLB_STEP
41 #define MMUDR_FIRST DTLB_FIXED
42 #define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
43 #define MMUDR_STEP TLB_STEP
45 /* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */
46 #if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1))
47 #error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb"
51 * MMU defines: Fixed TLBs.
53 /* Deal safely with the case where the base of RAM is not 512Mb aligned */
55 #define ALIGN_512M_MASK (0xffffffffe0000000)
56 #define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
57 #define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
59 #define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
60 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
62 #define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
63 /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
65 #define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
66 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
67 #define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
68 /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
70 #ifdef CONFIG_ICACHE_DISABLED
71 #define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
73 #define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
75 #define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
77 #if defined (CONFIG_DCACHE_DISABLED)
78 #define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
79 #elif defined (CONFIG_DCACHE_WRITE_THROUGH)
80 #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
82 #elif defined (CONFIG_DCACHE_WRITE_BACK)
83 #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
86 #error preprocessor flag CONFIG_DCACHE_... not recognized!
89 #define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
91 .section .empty_zero_page, "aw"
92 .global empty_zero_page
95 .long 1 /* MOUNT_ROOT_RDONLY */
96 .long 0 /* RAMDISK_FLAGS */
97 .long 0x0200 /* ORIG_ROOT_DEV */
98 .long 1 /* LOADER_TYPE */
99 .long 0x00800000 /* INITRD_START */
100 .long 0x00800000 /* INITRD_SIZE */
112 .global swapper_pg_dir
116 .global empty_bad_page
120 .global empty_bad_pte_table
129 .balign L1_CACHE_BYTES
131 * Condition at the entry of __stext:
133 * . SR.FD = 1 (FPU disabled)
134 * . SR.BL = 1 (Exceptions disabled)
135 * . SR.MD = 1 (Privileged Mode)
136 * . SR.MMU = 0 (MMU Disabled)
137 * . SR.CD = 0 (CTC User Visible)
138 * . SR.IMASK = Undefined (Interrupt Mask)
140 * Operations supposed to be performed by __stext:
141 * . prevent speculative fetch onto device memory while MMU is off
142 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
143 * . first, save CPU state and set it to something harmless
144 * . any CPU detection and/or endianness settings (?)
145 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
146 * . set initial TLB entries for cached and uncached regions
147 * (no fine granularity paging)
148 * . set initial cache state
149 * . enable MMU and caches
150 * . set CPU to a consistent state
151 * . registers (including stack pointer and current/KCR0)
152 * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
153 * at this stage. This is all to later Linux initialization steps.
156 * . jump into start_kernel()
157 * . be prepared to hopeless start_kernel() returns.
163 * Prevent speculative fetch on device memory due to
164 * uninitialized target registers.
177 * Read/Set CPU state. After this block:
181 movi SR_HARMLESS, r20
185 * Initialize EMI/LMI. To Be Done.
189 * CPU detection and/or endianness settings (?). To Be Done.
190 * Pure PIC code here, please ! Just save state into r30.
192 * r30 = CPU type/Platform Endianness
196 * Set initial TLB entries for cached and uncached regions.
197 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
201 movi MMUIR_FIRST, r21
204 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
205 addi r21, MMUIR_STEP, r21
210 movi MMUDR_FIRST, r21
213 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
214 addi r21, MMUDR_STEP, r21
217 /* Map one big (512Mb) page for ITLB */
218 movi MMUIR_FIRST, r21
219 movi MMUIR_TEXT_L, r22 /* PTEL first */
220 add.l r22, r63, r22 /* Sign extend */
221 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
222 movi MMUIR_TEXT_H, r22 /* PTEH last */
223 add.l r22, r63, r22 /* Sign extend */
224 putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
226 /* Map one big CACHED (512Mb) page for DTLB */
227 movi MMUDR_FIRST, r21
228 movi MMUDR_CACHED_L, r22 /* PTEL first */
229 add.l r22, r63, r22 /* Sign extend */
230 putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
231 movi MMUDR_CACHED_H, r22 /* PTEH last */
232 add.l r22, r63, r22 /* Sign extend */
233 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
235 #ifdef CONFIG_EARLY_PRINTK
237 * Setup a DTLB translation for SCIF phys.
239 addi r21, MMUDR_STEP, r21
240 movi 0x0a03, r22 /* SCIF phys */
242 putcfg r21, 1, r22 /* PTEL first */
243 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
245 putcfg r21, 0, r22 /* PTEH last */
249 * Set cache behaviours.
253 movi ICCR0_INIT_VAL, r22
254 movi ICCR1_INIT_VAL, r23
255 putcfg r21, ICCR_REG0, r22
256 putcfg r21, ICCR_REG1, r23
260 movi OCCR0_INIT_VAL, r22
261 movi OCCR1_INIT_VAL, r23
262 putcfg r21, OCCR_REG0, r22
263 putcfg r21, OCCR_REG1, r23
267 * Enable Caches and MMU. Do the first non-PIC jump.
268 * Now head.S global variables, constants and externs
272 movi SR_ENABLE_MMU, r22
276 ori r22, 1, r22 /* Make it SHmedia, not required but..*/
279 rte /* And now go into the hyperspace ... */
280 hyperspace: /* ... that's the next instruction ! */
283 * Set CPU to a consistent state.
284 * r31 = FPU support flag
285 * tr0/tr7 in use. Others give a chance to loop somewhere safe
287 movi start_kernel, r32
290 ptabs r32, tr0 /* r32 = _start_kernel address */
298 gettr tr1, r28 /* r28 = hopeless address */
300 /* Set initial stack pointer */
301 movi init_thread_union, SP
302 putcon SP, KCR0 /* Set current to init_task */
303 movi THREAD_SIZE, r22 /* Point to the end */
308 * Keep FPU flag in r31. After this block:
311 movi fpu_in_use, r31 /* Temporary */
315 movi SR_ENABLE_FPU, r22
317 putcon r22, SR /* Try to enable */
320 shlri r21, 15, r21 /* Supposedly 0/1 */
321 st.q r31, 0 , r21 /* Set fpu_in_use */
324 st.q r31, 0 , r21 /* Set fpu_in_use */
326 or r21, ZERO, r31 /* Set FPU flag at last */
328 #ifndef CONFIG_SH_NO_BSS_INIT
329 /* Don't clear BSS if running on slow platforms such as an RTL simulation,
330 remote memory via SHdebug link, etc. For these the memory can be guaranteed
331 to be all zero on boot anyway. */
336 movi __bss_start, r22
341 bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
345 /* Say bye to head.S but be prepared to wrongly get back ... */
348 /* If we ever get back here through LINK/tr1-tr7 */
353 * Something's badly wrong here. Loop endlessly,
354 * there's nothing more we can do about it.
356 * Note on hopeless: it can be jumped into invariably
357 * before or after jumping into hyperspace. The only
358 * requirement is to be PIC called (PTA) before and
359 * any way (PTA/PTABS) after. According to Virtual
360 * to Physical mapping a simulator/emulator can easily
361 * tell where we came here from just looking at hopeless
364 * For debugging purposes:
365 * (r28) hopeless/loop address
367 * (r30) CPU type/Platform endianness
369 * (r32) _start_kernel address