2 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
13 * This file contains low-level assembler routines for managing
14 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
15 * hash table, so this file is not used on them.)
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
26 #include <asm/pgtable.h>
27 #include <asm/cputable.h>
28 #include <asm/ppc_asm.h>
29 #include <asm/thread_info.h>
30 #include <asm/asm-offsets.h>
38 #endif /* CONFIG_SMP */
41 * Sync CPUs with hash_page taking & releasing the hash
46 _GLOBAL(hash_page_sync)
47 lis r8,mmu_hash_lock@h
48 ori r8,r8,mmu_hash_lock@l
67 * Load a PTE into the hash table, if possible.
68 * The address is in r4, and r3 contains an access flag:
69 * _PAGE_RW (0x400) if a write.
70 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
71 * SPRG3 contains the physical address of the current task's thread.
73 * Returns to the caller if the access is illegal or there is no
74 * mapping for the address. Otherwise it places an appropriate PTE
75 * in the hash table and returns from the exception.
76 * Uses r0, r3 - r8, ctr, lr.
80 tophys(r7,0) /* gets -KERNELBASE into r7 */
82 addis r8,r7,mmu_hash_lock@h
83 ori r8,r8,mmu_hash_lock@l
96 /* Get PTE (linux-style) and check access */
97 lis r0,KERNELBASE@h /* check if kernel address */
99 mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
100 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
101 lwz r5,PGDIR(r8) /* virt page-table root */
102 blt+ 112f /* assume user more likely */
103 lis r5,swapper_pg_dir@ha /* if kernel address, use */
104 addi r5,r5,swapper_pg_dir@l /* kernel page table */
105 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
106 112: add r5,r5,r7 /* convert to phys addr */
107 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
108 lwz r8,0(r5) /* get pmd entry */
109 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
111 beq- hash_page_out /* return if no mapping */
113 /* XXX it seems like the 601 will give a machine fault on the
114 rfi if its alignment is wrong (bottom 4 bits of address are
115 8 or 0xc) and we have had a not-taken conditional branch
116 to the address following the rfi. */
119 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
120 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
121 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
124 * Update the linux PTE atomically. We do the lwarx up-front
125 * because almost always, there won't be a permission violation
126 * and there won't already be an HPTE, and thus we will have
127 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
130 lwarx r6,0,r8 /* get linux-style pte */
131 andc. r5,r3,r6 /* check access & ~permission */
133 bne- hash_page_out /* return if access not permitted */
137 or r5,r0,r6 /* set accessed/dirty bits */
138 stwcx. r5,0,r8 /* attempt to update PTE */
139 bne- retry /* retry if someone got there first */
141 mfsrin r3,r4 /* get segment reg for segment */
144 bl create_hpte /* add the hash table entry */
148 addis r8,r7,mmu_hash_lock@ha
150 stw r0,mmu_hash_lock@l(r8)
153 /* Return from the exception */
159 b fast_exception_return
164 addis r8,r7,mmu_hash_lock@ha
166 stw r0,mmu_hash_lock@l(r8)
168 #endif /* CONFIG_SMP */
171 * Add an entry for a particular page to the hash table.
173 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
175 * We assume any necessary modifications to the pte (e.g. setting
176 * the accessed bit) have already been done and that there is actually
177 * a hash table in use (i.e. we're not on a 603).
179 _GLOBAL(add_hash_page)
183 /* Convert context and va to VSID */
184 mulli r3,r3,897*16 /* multiply context by context skew */
185 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
186 mulli r0,r0,0x111 /* multiply by ESID skew */
187 add r3,r3,r0 /* note create_hpte trims to 24 bits */
190 rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
191 lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
193 #endif /* CONFIG_SMP */
196 * We disable interrupts here, even on UP, because we don't
197 * want to race with hash_page, and because we want the
198 * _PAGE_HASHPTE bit to be a reliable indication of whether
199 * the HPTE exists (or at least whether one did once).
200 * We also turn off the MMU for data accesses so that we
201 * we can't take a hash table miss (assuming the code is
202 * covered by a BAT). -- paulus
206 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
207 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
215 addis r9,r7,mmu_hash_lock@ha
216 addi r9,r9,mmu_hash_lock@l
217 10: lwarx r0,0,r9 /* take the mmu_hash_lock */
230 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
231 * If _PAGE_HASHPTE was already set, we don't replace the existing
232 * HPTE, so we just unlock and return.
235 rlwimi r8,r4,22,20,29
237 andi. r0,r6,_PAGE_HASHPTE
238 bne 9f /* if HASHPTE already set, done */
239 ori r5,r6,_PAGE_HASHPTE
249 stw r0,0(r9) /* clear mmu_hash_lock */
252 /* reenable interrupts and DR */
262 * This routine adds a hardware PTE to the hash table.
263 * It is designed to be called with the MMU either on or off.
264 * r3 contains the VSID, r4 contains the virtual address,
265 * r5 contains the linux PTE, r6 contains the old value of the
266 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
267 * offset to be added to addresses (0 if the MMU is on,
268 * -KERNELBASE if it is off).
269 * On SMP, the caller should have the mmu_hash_lock held.
270 * We assume that the caller has (or will) set the _PAGE_HASHPTE
271 * bit in the linux PTE in memory. The value passed in r6 should
272 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
273 * this routine will skip the search for an existing HPTE.
274 * This procedure modifies r0, r3 - r6, r8, cr0.
277 * For speed, 4 of the instructions get patched once the size and
278 * physical address of the hash table are known. These definitions
279 * of Hash_base and Hash_bits below are just an example.
281 Hash_base = 0xc0180000
282 Hash_bits = 12 /* e.g. 256kB hash table */
283 Hash_msk = (((1 << Hash_bits) - 1) * 64)
285 /* defines for the PTE format for 32-bit PPCs */
288 #define LG_PTEG_SIZE 6
294 #define PTE_V 0x80000000
295 #define TST_V(r) rlwinm. r,r,0,0,0
296 #define SET_V(r) oris r,r,PTE_V@h
297 #define CLR_V(r,t) rlwinm r,r,0,1,31
299 #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
300 #define HASH_RIGHT 31-LG_PTEG_SIZE
303 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
304 rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
305 rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
306 and r8,r8,r0 /* writable if _RW & _DIRTY */
307 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
308 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
309 ori r8,r8,0xe14 /* clear out reserved bits and M */
310 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
312 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
313 END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
315 /* Construct the high word of the PPC-style PTE (r5) */
316 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
317 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
318 SET_V(r5) /* set V (valid) bit */
320 /* Get the address of the primary PTE group in the hash table (r3) */
321 _GLOBAL(hash_page_patch_A)
322 addis r0,r7,Hash_base@h /* base address of hash table */
323 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
324 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
325 xor r3,r3,r0 /* make primary hash */
326 li r0,8 /* PTEs/group */
329 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
330 * if it is clear, meaning that the HPTE isn't there already...
332 andi. r6,r6,_PAGE_HASHPTE
333 beq+ 10f /* no PTE: go look for an empty slot */
336 addis r4,r7,htab_hash_searches@ha
337 lwz r6,htab_hash_searches@l(r4)
338 addi r6,r6,1 /* count how many searches we do */
339 stw r6,htab_hash_searches@l(r4)
341 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
344 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
346 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
349 /* Search the secondary PTEG for a matching PTE */
350 ori r5,r5,PTE_H /* set H (secondary hash) bit */
351 _GLOBAL(hash_page_patch_B)
352 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
353 xori r4,r4,(-PTEG_SIZE & 0xffff)
356 2: LDPTEu r6,PTE_SIZE(r4)
360 xori r5,r5,PTE_H /* clear H bit again */
362 /* Search the primary PTEG for an empty slot */
364 addi r4,r3,-PTE_SIZE /* search primary PTEG */
365 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
366 TST_V(r6) /* test valid bit */
367 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
370 /* update counter of times that the primary PTEG is full */
371 addis r4,r7,primary_pteg_full@ha
372 lwz r6,primary_pteg_full@l(r4)
374 stw r6,primary_pteg_full@l(r4)
376 /* Search the secondary PTEG for an empty slot */
377 ori r5,r5,PTE_H /* set H (secondary hash) bit */
378 _GLOBAL(hash_page_patch_C)
379 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
380 xori r4,r4,(-PTEG_SIZE & 0xffff)
383 2: LDPTEu r6,PTE_SIZE(r4)
387 xori r5,r5,PTE_H /* clear H bit again */
390 * Choose an arbitrary slot in the primary PTEG to overwrite.
391 * Since both the primary and secondary PTEGs are full, and we
392 * have no information that the PTEs in the primary PTEG are
393 * more important or useful than those in the secondary PTEG,
394 * and we know there is a definite (although small) speed
395 * advantage to putting the PTE in the primary PTEG, we always
396 * put the PTE in the primary PTEG.
398 * In addition, we skip any slot that is mapping kernel text in
399 * order to avoid a deadlock when not using BAT mappings if
400 * trying to hash in the kernel hash code itself after it has
401 * already taken the hash table lock. This works in conjunction
402 * with pre-faulting of the kernel text.
404 * If the hash table bucket is full of kernel text entries, we'll
405 * lockup here but that shouldn't happen
408 1: addis r4,r7,next_slot@ha /* get next evict slot */
409 lwz r6,next_slot@l(r4)
410 addi r6,r6,PTE_SIZE /* search for candidate */
411 andi. r6,r6,7*PTE_SIZE
412 stw r6,next_slot@l(r4)
414 LDPTE r0,PTE_SIZE/2(r4) /* get PTE second word */
417 ori r6,r6,etext@l /* get etext */
419 cmpl cr0,r0,r6 /* compare and try again */
423 /* Store PTE in PTEG */
427 STPTE r8,PTE_SIZE/2(r4)
429 #else /* CONFIG_SMP */
431 * Between the tlbie above and updating the hash table entry below,
432 * another CPU could read the hash table entry and put it in its TLB.
434 * 1. using an empty slot
435 * 2. updating an earlier entry to change permissions (i.e. enable write)
436 * 3. taking over the PTE for an unrelated address
438 * In each case it doesn't really matter if the other CPUs have the old
439 * PTE in their TLB. So we don't need to bother with another tlbie here,
440 * which is convenient as we've overwritten the register that had the
441 * address. :-) The tlbie above is mainly to make sure that this CPU comes
442 * and gets the new PTE from the hash table.
444 * We do however have to make sure that the PTE is never in an invalid
445 * state with the V bit set.
449 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
453 STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
456 STPTE r5,0(r4) /* finally set V bit in PTE */
457 #endif /* CONFIG_SMP */
459 sync /* make sure pte updates get to memory */
473 * Flush the entry for a particular page from the hash table.
475 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
478 * We assume that there is a hash table in use (Hash != 0).
480 _GLOBAL(flush_hash_pages)
484 * We disable interrupts here, even on UP, because we want
485 * the _PAGE_HASHPTE bit to be a reliable indication of
486 * whether the HPTE exists (or at least whether one did once).
487 * We also turn off the MMU for data accesses so that we
488 * we can't take a hash table miss (assuming the code is
489 * covered by a BAT). -- paulus
493 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
494 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
499 /* First find a PTE in the range that has _PAGE_HASHPTE set */
500 rlwimi r5,r4,22,20,29
503 andi. r0,r0,_PAGE_HASHPTE
511 /* Convert context and va to VSID */
512 2: mulli r3,r3,897*16 /* multiply context by context skew */
513 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
514 mulli r0,r0,0x111 /* multiply by ESID skew */
515 add r3,r3,r0 /* note code below trims to 24 bits */
517 /* Construct the high word of the PPC-style PTE (r11) */
518 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
519 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
520 SET_V(r11) /* set V (valid) bit */
523 addis r9,r7,mmu_hash_lock@ha
524 addi r9,r9,mmu_hash_lock@l
542 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
543 * already clear, we're done (for this pte). If not,
544 * clear it (atomically) and proceed. -- paulus.
546 33: lwarx r8,0,r5 /* fetch the pte */
547 andi. r0,r8,_PAGE_HASHPTE
548 beq 8f /* done if HASHPTE is already clear */
549 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
550 stwcx. r8,0,r5 /* update the pte */
553 /* Get the address of the primary PTE group in the hash table (r3) */
554 _GLOBAL(flush_hash_patch_A)
555 addis r8,r7,Hash_base@h /* base address of hash table */
556 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
557 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
558 xor r8,r0,r8 /* make primary hash */
560 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
561 li r0,8 /* PTEs/group */
563 addi r12,r8,-PTE_SIZE
564 1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
566 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
569 /* Search the secondary PTEG for a matching PTE */
570 ori r11,r11,PTE_H /* set H (secondary hash) bit */
571 li r0,8 /* PTEs/group */
572 _GLOBAL(flush_hash_patch_B)
573 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
574 xori r12,r12,(-PTEG_SIZE & 0xffff)
575 addi r12,r12,-PTE_SIZE
577 2: LDPTEu r0,PTE_SIZE(r12)
580 xori r11,r11,PTE_H /* clear H again */
581 bne- 4f /* should rarely fail to find it */
584 STPTE r0,0(r12) /* invalidate entry */
586 tlbie r4 /* in hw tlb too */
589 8: ble cr1,9f /* if all ptes checked */
591 addi r5,r5,4 /* advance to next pte */
593 lwz r0,0(r5) /* check next pte */
595 andi. r0,r0,_PAGE_HASHPTE
603 stw r0,0(r9) /* clear mmu_hash_lock */