2 * Low-level SLB routines
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
6 * Based on earlier C version:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/processor.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cputable.h>
23 #include <asm/pgtable.h>
24 #include <asm/firmware.h>
26 /* void slb_allocate_realmode(unsigned long ea);
28 * Create an SLB entry for the given EA (user or kernel).
29 * r3 = faulting address, r13 = PACA
30 * r9, r10, r11 are clobbered by this function
31 * No other registers are examined or changed.
33 _GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */
36 srdi r9,r3,60 /* get region */
37 srdi r10,r3,28 /* get esid */
38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
41 blt cr7,0f /* user or kernel? */
43 /* kernel address: proto-VSID = ESID */
44 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
45 * this code will generate the protoVSID 0xfffffffff for the
46 * top segment. That's ok, the scramble below will translate
47 * it to VSID 0, which is reserved as a bad VSID - one which
48 * will never have any pages in it. */
50 /* Check if hitting the linear mapping or some other kernel space
54 /* Linear mapping encoding bits, the "li" instruction below will
55 * be patched by the kernel at boot
57 _GLOBAL(slb_miss_kernel_load_linear)
61 END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
65 #ifdef CONFIG_SPARSEMEM_VMEMMAP
66 /* Check virtual memmap region. To be patches at kernel boot */
69 _GLOBAL(slb_miss_kernel_load_vmemmap)
73 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
75 /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
76 * will be patched by the kernel at boot
79 /* check whether this is in vmalloc or ioremap space */
81 cmpldi r11,(VMALLOC_SIZE >> 28) - 1
83 lhz r11,PACAVMALLOCSLLP(r13)
86 END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
87 _GLOBAL(slb_miss_kernel_load_io)
92 END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
95 0: /* user address: proto-VSID = context << 15 | ESID. First check
96 * if the address is within the boundaries of the user region
98 srdi. r9,r10,USER_ESID_BITS
99 bne- 8f /* invalid ea bits set */
102 /* when using slices, we extract the psize off the slice bitmaps
103 * and then we need to get the sllp encoding off the mmu_psize_defs
106 * XXX This is a bit inefficient especially for the normal case,
107 * so we should try to implement a fast path for the standard page
108 * size using the old sllp value so we avoid the array. We cannot
109 * really do dynamic patching unfortunately as processes might flip
110 * between 4k and 64k standard page size
112 #ifdef CONFIG_PPC_MM_SLICES
115 /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
116 ld r9,PACALOWSLICESPSIZE(r13)
119 ld r9,PACAHIGHSLICEPSIZE(r13)
120 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
123 5: /* Extract the psize and multiply to get an array offset */
126 mulli r9,r9,MMUPSIZEDEFSIZE
128 /* Now get to the array and obtain the sllp
131 ld r11,mmu_psize_defs@got(r11)
133 ld r11,MMUPSIZESLLP(r11)
134 ori r11,r11,SLB_VSID_USER
136 /* paca context sllp already contains the SLB_VSID_USER bits */
137 lhz r11,PACACONTEXTSLLP(r13)
138 #endif /* CONFIG_PPC_MM_SLICES */
140 ld r9,PACACONTEXTID(r13)
143 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
144 rldimi r10,r9,USER_ESID_BITS,0
146 bge slb_finish_load_1T
147 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
151 li r10,0 /* BAD_VSID */
152 li r11,SLB_VSID_USER /* flags don't much matter */
157 /* void slb_allocate_user(unsigned long ea);
159 * Create an SLB entry for the given EA (user or kernel).
160 * r3 = faulting address, r13 = PACA
161 * r9, r10, r11 are clobbered by this function
162 * No other registers are examined or changed.
164 * It is called with translation enabled in order to be able to walk the
165 * page tables. This is not currently used.
167 _GLOBAL(slb_allocate_user)
168 /* r3 = faulting address */
169 srdi r10,r3,28 /* get esid */
171 crset 4*cr7+lt /* set "user" flag for later */
173 /* check if we fit in the range covered by the pagetables*/
174 srdi. r9,r3,PGTABLE_EADDR_SIZE
175 crnot 4*cr0+eq,4*cr0+eq
178 /* now we need to get to the page tables in order to get the page
179 * size encoding from the PMD. In the future, we'll be able to deal
180 * with 1T segments too by getting the encoding from the PGD instead
185 rlwinm r11,r10,8,25,28
186 ldx r9,r9,r11 /* get pgd_t */
189 rlwinm r11,r10,3,17,28
190 ldx r9,r9,r11 /* get pmd_t */
194 /* build vsid flags */
195 andi. r11,r9,SLB_VSID_LLP
196 ori r11,r11,SLB_VSID_USER
198 /* get context to calculate proto-VSID */
199 ld r9,PACACONTEXTID(r13)
200 rldimi r10,r9,USER_ESID_BITS,0
202 /* fall through slb_finish_load */
204 #endif /* __DISABLED__ */
208 * Finish loading of an SLB entry and return
210 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
213 ASM_VSID_SCRAMBLE(r10,r9,256M)
214 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
216 /* r3 = EA, r11 = VSID data */
218 * Find a slot, round robin. Previously we tried to find a
219 * free slot first but that took too long. Unfortunately we
220 * dont have any LRU information to help us choose a slot.
222 #ifdef CONFIG_PPC_ISERIES
225 * On iSeries, the "bolted" stack segment can be cast out on
226 * shared processor switch so we need to check for a miss on
227 * it and restore it to the right slot.
232 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
235 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
236 #endif /* CONFIG_PPC_ISERIES */
238 7: ld r10,PACASTABRR(r13)
240 /* This gets soft patched on boot. */
241 _GLOBAL(slb_compare_rr_to_size)
245 li r10,SLB_NUM_BOLTED
248 std r10,PACASTABRR(r13)
251 rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
252 oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
254 /* r3 = ESID data, r11 = VSID data */
257 * No need for an isync before or after this slbmte. The exception
258 * we enter with and the rfid we exit with are context synchronizing.
262 /* we're done for kernel addresses */
263 crclr 4*cr0+eq /* set result to "success" */
266 /* Update the slb cache */
267 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
268 cmpldi r3,SLB_CACHE_ENTRIES
271 /* still room in the slb cache */
272 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
273 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
274 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
275 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
276 addi r3,r3,1 /* offset++ */
278 1: /* offset >= SLB_CACHE_ENTRIES */
279 li r3,SLB_CACHE_ENTRIES+1
281 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
282 crclr 4*cr0+eq /* set result to "success" */
286 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
287 * We assume legacy iSeries will never have 1T segments.
289 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
292 srdi r10,r10,40-28 /* get 1T ESID */
293 ASM_VSID_SCRAMBLE(r10,r9,1T)
294 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
295 li r10,MMU_SEGSIZE_1T
296 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
298 /* r3 = EA, r11 = VSID data */
299 clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */