1 #ifndef _ASM_POWERPC_MMU_HASH64_H_
2 #define _ASM_POWERPC_MMU_HASH64_H_
4 * PowerPC64 memory management structures
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <asm/asm-compat.h>
22 #define STE_ESID_V 0x80
23 #define STE_ESID_KS 0x20
24 #define STE_ESID_KP 0x10
25 #define STE_ESID_N 0x08
27 #define STE_VSID_SHIFT 12
29 /* Location of cpu0's segment table */
30 #define STAB0_PAGE 0x6
31 #define STAB0_OFFSET (STAB0_PAGE << 12)
32 #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
35 extern char initial_stab[];
36 #endif /* ! __ASSEMBLY */
42 #define SLB_NUM_BOLTED 3
43 #define SLB_CACHE_ENTRIES 8
45 /* Bits in the SLB ESID word */
46 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
48 /* Bits in the SLB VSID word */
49 #define SLB_VSID_SHIFT 12
50 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
51 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
52 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
53 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
54 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
55 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
56 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
57 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
58 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
59 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
60 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
61 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
62 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
63 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
65 #define SLB_VSID_KERNEL (SLB_VSID_KP)
66 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
68 #define SLBIE_C (0x08000000)
74 #define HPTES_PER_GROUP 8
76 #define HPTE_V_SSIZE_SHIFT 62
77 #define HPTE_V_AVPN_SHIFT 7
78 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
79 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
80 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
81 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
82 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
83 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
84 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
85 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
87 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
88 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
89 #define HPTE_R_RPN_SHIFT 12
90 #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
91 #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
92 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
93 #define HPTE_R_N ASM_CONST(0x0000000000000004)
94 #define HPTE_R_C ASM_CONST(0x0000000000000080)
95 #define HPTE_R_R ASM_CONST(0x0000000000000100)
97 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
98 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
100 /* Values for PP (assumes Ks=0, Kp=1) */
101 /* pp0 will always be 0 for linux */
102 #define PP_RWXX 0 /* Supervisor read/write, User none */
103 #define PP_RWRX 1 /* Supervisor read/write, User read */
104 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
105 #define PP_RXRX 3 /* Supervisor read, User read */
114 extern struct hash_pte *htab_address;
115 extern unsigned long htab_size_bytes;
116 extern unsigned long htab_hash_mask;
119 * Page size definition
121 * shift : is the "PAGE_SHIFT" value for that page size
122 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
123 * directly to a slbmte "vsid" value
124 * penc : is the HPTE encoding mask for the "LP" field:
129 unsigned int shift; /* number of bits */
130 unsigned int penc; /* HPTE encoding */
131 unsigned int tlbiel; /* tlbiel supported for that page size */
132 unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
133 unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
136 #endif /* __ASSEMBLY__ */
139 * The kernel use the constants below to index in the page sizes array.
140 * The use of fixed constants for this purpose is better for performances
141 * of the low level hash refill handlers.
143 * A non supported page size has a "shift" field set to 0
145 * Any new page size being implemented can get a new entry in here. Whether
146 * the kernel will use it or not is a different matter though. The actual page
147 * size used by hugetlbfs is not defined here and may be made variable
150 #define MMU_PAGE_4K 0 /* 4K */
151 #define MMU_PAGE_64K 1 /* 64K */
152 #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
153 #define MMU_PAGE_1M 3 /* 1M */
154 #define MMU_PAGE_16M 4 /* 16M */
155 #define MMU_PAGE_16G 5 /* 16G */
156 #define MMU_PAGE_COUNT 6
160 * These are the values used by hardware in the B field of
161 * SLB entries and the first dword of MMU hashtable entries.
162 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
164 #define MMU_SEGSIZE_256M 0
165 #define MMU_SEGSIZE_1T 1
170 * The current system page sizes
172 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
173 extern int mmu_linear_psize;
174 extern int mmu_virtual_psize;
175 extern int mmu_vmalloc_psize;
176 extern int mmu_io_psize;
179 * If the processor supports 64k normal pages but not 64k cache
180 * inhibited pages, we have to be prepared to switch processes
181 * to use 4k pages when they create cache-inhibited mappings.
182 * If this is the case, mmu_ci_restrictions will be set to 1.
184 extern int mmu_ci_restrictions;
186 #ifdef CONFIG_HUGETLB_PAGE
188 * The page size index of the huge pages for use by hugetlbfs
190 extern int mmu_huge_psize;
192 #endif /* CONFIG_HUGETLB_PAGE */
195 * This function sets the AVPN and L fields of the HPTE appropriately
198 static inline unsigned long hpte_encode_v(unsigned long va, int psize)
201 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
202 v <<= HPTE_V_AVPN_SHIFT;
203 if (psize != MMU_PAGE_4K)
209 * This function sets the ARPN, and LP fields of the HPTE appropriately
210 * for the page size. We assume the pa is already "clean" that is properly
211 * aligned for the requested page size
213 static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
217 /* A 4K page needs no special encoding */
218 if (psize == MMU_PAGE_4K)
219 return pa & HPTE_R_RPN;
221 unsigned int penc = mmu_psize_defs[psize].penc;
222 unsigned int shift = mmu_psize_defs[psize].shift;
223 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
229 * This hashes a virtual address for a 256Mb segment only for now
232 static inline unsigned long hpt_hash(unsigned long va, unsigned int shift)
234 return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift);
237 extern int __hash_page_4K(unsigned long ea, unsigned long access,
238 unsigned long vsid, pte_t *ptep, unsigned long trap,
240 extern int __hash_page_64K(unsigned long ea, unsigned long access,
241 unsigned long vsid, pte_t *ptep, unsigned long trap,
244 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
245 extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
246 unsigned long ea, unsigned long vsid, int local,
249 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
250 unsigned long pstart, unsigned long mode,
253 extern void htab_initialize(void);
254 extern void htab_initialize_secondary(void);
255 extern void hpte_init_native(void);
256 extern void hpte_init_lpar(void);
257 extern void hpte_init_iSeries(void);
258 extern void hpte_init_beat(void);
260 extern void stabs_alloc(void);
261 extern void slb_initialize(void);
262 extern void slb_flush_and_rebolt(void);
263 extern void stab_initialize(unsigned long stab);
265 extern void slb_vmalloc_update(void);
266 #endif /* __ASSEMBLY__ */
271 * We first generate a 36-bit "proto-VSID". For kernel addresses this
272 * is equal to the ESID, for user addresses it is:
273 * (context << 15) | (esid & 0x7fff)
275 * The two forms are distinguishable because the top bit is 0 for user
276 * addresses, whereas the top two bits are 1 for kernel addresses.
277 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
280 * The proto-VSIDs are then scrambled into real VSIDs with the
281 * multiplicative hash:
283 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
284 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
285 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
287 * This scramble is only well defined for proto-VSIDs below
288 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
289 * reserved. VSID_MULTIPLIER is prime, so in particular it is
290 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
291 * Because the modulus is 2^n-1 we can compute it efficiently without
292 * a divide or extra multiply (see below).
294 * This scheme has several advantages over older methods:
296 * - We have VSIDs allocated for every kernel address
297 * (i.e. everything above 0xC000000000000000), except the very top
298 * segment, which simplifies several things.
300 * - We allow for 15 significant bits of ESID and 20 bits of
301 * context for user addresses. i.e. 8T (43 bits) of address space for
302 * up to 1M contexts (although the page table structure and context
303 * allocation will need changes to take advantage of this).
305 * - The scramble function gives robust scattering in the hash
306 * table (at least based on some initial results). The previous
307 * method was more susceptible to pathological cases giving excessive
311 * WARNING - If you change these you must make sure the asm
312 * implementations in slb_allocate (slb_low.S), do_stab_bolted
313 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
315 * You'll also need to change the precomputed VSID values in head.S
316 * which are used by the iSeries firmware.
319 #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
321 #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
323 #define CONTEXT_BITS 19
324 #define USER_ESID_BITS 16
326 #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
329 * This macro generates asm code to compute the VSID scramble
330 * function. Used in slb_allocate() and do_stab_bolted. The function
331 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
333 * rt = register continaing the proto-VSID and into which the
334 * VSID will be stored
335 * rx = scratch register (clobbered)
337 * - rt and rx must be different registers
338 * - The answer will end up in the low 36 bits of rt. The higher
339 * bits may contain other garbage, so you may need to mask the
342 #define ASM_VSID_SCRAMBLE(rt, rx) \
343 lis rx,VSID_MULTIPLIER@h; \
344 ori rx,rx,VSID_MULTIPLIER@l; \
345 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
347 srdi rx,rt,VSID_BITS; \
348 clrldi rt,rt,(64-VSID_BITS); \
349 add rt,rt,rx; /* add high and low bits */ \
350 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
351 * 2^36-1+2^28-1. That in particular means that if r3 >= \
352 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
353 * the bit clear, r3 already has the answer we want, if it \
354 * doesn't, the answer is the low 36 bits of r3+1. So in all \
355 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
357 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
363 typedef unsigned long mm_context_id_t;
367 u16 user_psize; /* page size index */
369 #ifdef CONFIG_PPC_MM_SLICES
370 u64 low_slices_psize; /* SLB page size encodings */
371 u64 high_slices_psize; /* 4 bits per slice for now */
373 u16 sllp; /* SLB page size encoding */
375 unsigned long vdso_base;
379 static inline unsigned long vsid_scramble(unsigned long protovsid)
382 /* The code below is equivalent to this function for arguments
383 * < 2^VSID_BITS, which is all this should ever be called
384 * with. However gcc is not clever enough to compute the
385 * modulus (2^n-1) without a second multiply. */
386 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
390 x = protovsid * VSID_MULTIPLIER;
391 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
392 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
396 /* This is only valid for addresses >= KERNELBASE */
397 static inline unsigned long get_kernel_vsid(unsigned long ea)
399 return vsid_scramble(ea >> SID_SHIFT);
402 /* This is only valid for user addresses (which are below 2^41) */
403 static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
405 return vsid_scramble((context << USER_ESID_BITS)
406 | (ea >> SID_SHIFT));
409 #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
410 #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
412 /* Physical address used by some IO functions */
413 typedef unsigned long phys_addr_t;
415 #endif /* __ASSEMBLY__ */
417 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */