2 * ppc64 MMU hashtable management routines
4 * (c) Copyright IBM Corp. 2003, 2005
6 * Maintained by: Benjamin Herrenschmidt
7 * <benh@kernel.crashing.org>
9 * This file is covered by the GNU Public Licence v2 as
10 * described in the kernel's COPYING file.
14 #include <asm/pgtable.h>
17 #include <asm/types.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cputable.h>
27 * +-> Back chain (SP + 256)
28 * | General register save area (SP + 112)
29 * | Parameter save area (SP + 48)
30 * | TOC save area (SP + 40)
31 * | link editor doubleword (SP + 32)
32 * | compiler doubleword (SP + 24)
33 * | LR save area (SP + 16)
34 * | CR save area (SP + 8)
35 * SP ---> +-- Back chain (SP + 0)
37 #define STACKFRAMESIZE 256
39 /* Save parameters offsets */
40 #define STK_PARM(i) (STACKFRAMESIZE + 48 + ((i)-3)*8)
42 /* Save non-volatile offsets */
43 #define STK_REG(i) (112 + ((i)-14)*8)
46 #ifndef CONFIG_PPC_64K_PAGES
48 /*****************************************************************************
50 * 4K SW & 4K HW pages implementation *
52 *****************************************************************************/
56 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
57 * pte_t *ptep, unsigned long trap, int local, int ssize)
59 * Adds a 4K page to the hash table in a segment of 4K pages only
62 _GLOBAL(__hash_page_4K)
65 stdu r1,-STACKFRAMESIZE(r1)
66 /* Save all params that we need after a function call */
67 std r6,STK_PARM(r6)(r1)
68 std r8,STK_PARM(r8)(r1)
69 std r9,STK_PARM(r9)(r1)
71 /* Add _PAGE_PRESENT to access */
72 ori r4,r4,_PAGE_PRESENT
74 /* Save non-volatile registers.
75 * r31 will hold "old PTE"
79 * r27 is hashtab mask (maybe dynamic patched instead ?)
81 std r27,STK_REG(r27)(r1)
82 std r28,STK_REG(r28)(r1)
83 std r29,STK_REG(r29)(r1)
84 std r30,STK_REG(r30)(r1)
85 std r31,STK_REG(r31)(r1)
89 * Check permissions, atomically mark the linux PTE busy
94 /* Check access rights (access & ~(pte_val(*ptep))) */
96 bne- htab_wrong_access
97 /* Check if PTE is busy */
98 andi. r0,r31,_PAGE_BUSY
99 /* If so, just bail out and refault if needed. Someone else
100 * is changing this PTE anyway and might hash it.
104 /* Prepare new PTE value (turn access RW into DIRTY, then
105 * add BUSY,HASHPTE and ACCESSED)
107 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
109 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
110 /* Write the linux PTE atomically (setting busy) */
117 * Insert/Update the HPTE in the hash table. At this point,
118 * r4 (access) is re-useable, we use it for the new HPTE flags
122 cmpdi r9,0 /* check segment size */
124 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
125 /* Calc va and put it in r29 */
126 rldicr r29,r5,28,63-28
130 /* Calculate hash value for primary slot and store it in r28 */
131 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
132 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
136 3: /* Calc VA and hash in r29 and r28 for 1T segment */
137 sldi r29,r5,40 /* vsid << 40 */
138 clrldi r3,r3,24 /* ea & 0xffffffffff */
139 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
140 clrldi r5,r5,40 /* vsid & 0xffffff */
141 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
143 or r29,r3,r29 /* VA */
144 xor r28,r28,r0 /* hash */
146 /* Convert linux PTE bits into HW equivalents */
147 4: andi. r3,r30,0x1fe /* Get basic set of flags */
148 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
149 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
150 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
151 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
152 andc r0,r30,r0 /* r0 = pte & ~r0 */
153 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
154 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
156 /* We eventually do the icache sync here (maybe inline that
157 * code rather than call a C function...)
162 bl .hash_page_do_lazy_icache
163 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
165 /* At this point, r3 contains new PP bits, save them in
166 * place of "access" in the param area (sic)
168 std r3,STK_PARM(r4)(r1)
170 /* Get htab_hash_mask */
171 ld r4,htab_hash_mask@got(2)
172 ld r27,0(r4) /* htab_hash_mask -> r27 */
174 /* Check if we may already be in the hashtable, in this case, we
175 * go to out-of-line code to try to modify the HPTE
177 andi. r0,r31,_PAGE_HASHPTE
181 /* Clear hpte bits in new pte (we also clear BUSY btw) and
184 lis r0,_PAGE_HPTEFLAGS@h
185 ori r0,r0,_PAGE_HPTEFLAGS@l
187 ori r30,r30,_PAGE_HASHPTE
189 /* physical address r5 */
190 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
191 sldi r5,r5,PAGE_SHIFT
193 /* Calculate primary group hash */
195 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
197 /* Call ppc_md.hpte_insert */
198 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
199 mr r4,r29 /* Retreive va */
200 li r7,0 /* !bolted, !secondary */
201 li r8,MMU_PAGE_4K /* page size */
202 ld r9,STK_PARM(r9)(r1) /* segment size */
203 _GLOBAL(htab_call_hpte_insert1)
204 bl . /* Patched by htab_finish_init() */
206 bge htab_pte_insert_ok /* Insertion successful */
207 cmpdi 0,r3,-2 /* Critical failure */
208 beq- htab_pte_insert_failure
210 /* Now try secondary slot */
212 /* physical address r5 */
213 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
214 sldi r5,r5,PAGE_SHIFT
216 /* Calculate secondary group hash */
218 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
220 /* Call ppc_md.hpte_insert */
221 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
222 mr r4,r29 /* Retreive va */
223 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
224 li r8,MMU_PAGE_4K /* page size */
225 ld r9,STK_PARM(r9)(r1) /* segment size */
226 _GLOBAL(htab_call_hpte_insert2)
227 bl . /* Patched by htab_finish_init() */
229 bge+ htab_pte_insert_ok /* Insertion successful */
230 cmpdi 0,r3,-2 /* Critical failure */
231 beq- htab_pte_insert_failure
233 /* Both are full, we need to evict something */
235 /* Pick a random group based on TB */
241 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
242 /* Call ppc_md.hpte_remove */
243 _GLOBAL(htab_call_hpte_remove)
244 bl . /* Patched by htab_finish_init() */
254 /* Insert slot number & secondary bit in PTE */
255 rldimi r30,r3,12,63-15
257 /* Write out the PTE with a normal write
258 * (maybe add eieio may be good still ?)
261 ld r6,STK_PARM(r6)(r1)
265 ld r27,STK_REG(r27)(r1)
266 ld r28,STK_REG(r28)(r1)
267 ld r29,STK_REG(r29)(r1)
268 ld r30,STK_REG(r30)(r1)
269 ld r31,STK_REG(r31)(r1)
270 addi r1,r1,STACKFRAMESIZE
276 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
278 rlwinm r3,r31,32-12,29,31
280 /* Secondary group ? if yes, get a inverted hash value */
282 andi. r0,r31,_PAGE_SECONDARY
286 /* Calculate proper slot value for ppc_md.hpte_updatepp */
288 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
289 add r3,r0,r3 /* add slot idx */
291 /* Call ppc_md.hpte_updatepp */
293 li r6,MMU_PAGE_4K /* page size */
294 ld r7,STK_PARM(r9)(r1) /* segment size */
295 ld r8,STK_PARM(r8)(r1) /* get "local" param */
296 _GLOBAL(htab_call_hpte_updatepp)
297 bl . /* Patched by htab_finish_init() */
299 /* if we failed because typically the HPTE wasn't really here
300 * we try an insertion.
305 /* Clear the BUSY bit and Write out the PTE */
311 /* Bail out clearing reservation */
316 htab_pte_insert_failure:
317 /* Bail out restoring old PTE */
318 ld r6,STK_PARM(r6)(r1)
324 #else /* CONFIG_PPC_64K_PAGES */
327 /*****************************************************************************
329 * 64K SW & 4K or 64K HW in a 4K segment pages implementation *
331 *****************************************************************************/
333 /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
334 * pte_t *ptep, unsigned long trap, int local)
338 * For now, we do NOT implement Admixed pages
340 _GLOBAL(__hash_page_4K)
343 stdu r1,-STACKFRAMESIZE(r1)
344 /* Save all params that we need after a function call */
345 std r6,STK_PARM(r6)(r1)
346 std r8,STK_PARM(r8)(r1)
347 std r9,STK_PARM(r9)(r1)
349 /* Add _PAGE_PRESENT to access */
350 ori r4,r4,_PAGE_PRESENT
352 /* Save non-volatile registers.
353 * r31 will hold "old PTE"
356 * r28 is a hash value
357 * r27 is hashtab mask (maybe dynamic patched instead ?)
358 * r26 is the hidx mask
359 * r25 is the index in combo page
361 std r25,STK_REG(r25)(r1)
362 std r26,STK_REG(r26)(r1)
363 std r27,STK_REG(r27)(r1)
364 std r28,STK_REG(r28)(r1)
365 std r29,STK_REG(r29)(r1)
366 std r30,STK_REG(r30)(r1)
367 std r31,STK_REG(r31)(r1)
371 * Check permissions, atomically mark the linux PTE busy
376 /* Check access rights (access & ~(pte_val(*ptep))) */
378 bne- htab_wrong_access
379 /* Check if PTE is busy */
380 andi. r0,r31,_PAGE_BUSY
381 /* If so, just bail out and refault if needed. Someone else
382 * is changing this PTE anyway and might hash it.
385 /* Prepare new PTE value (turn access RW into DIRTY, then
386 * add BUSY and ACCESSED)
388 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
390 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
391 oris r30,r30,_PAGE_COMBO@h
392 /* Write the linux PTE atomically (setting busy) */
399 * Insert/Update the HPTE in the hash table. At this point,
400 * r4 (access) is re-useable, we use it for the new HPTE flags
403 /* Load the hidx index */
404 rldicl r25,r3,64-12,60
407 cmpdi r9,0 /* check segment size */
409 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
410 /* Calc va and put it in r29 */
411 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
412 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
413 or r29,r3,r29 /* r29 = va */
415 /* Calculate hash value for primary slot and store it in r28 */
416 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
417 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
421 3: /* Calc VA and hash in r29 and r28 for 1T segment */
422 sldi r29,r5,40 /* vsid << 40 */
423 clrldi r3,r3,24 /* ea & 0xffffffffff */
424 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
425 clrldi r5,r5,40 /* vsid & 0xffffff */
426 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
428 or r29,r3,r29 /* VA */
429 xor r28,r28,r0 /* hash */
431 /* Convert linux PTE bits into HW equivalents */
432 4: andi. r3,r30,0x1fe /* Get basic set of flags */
433 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
434 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
435 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
436 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
437 andc r0,r30,r0 /* r0 = pte & ~r0 */
438 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
439 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
441 /* We eventually do the icache sync here (maybe inline that
442 * code rather than call a C function...)
447 bl .hash_page_do_lazy_icache
448 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
450 /* At this point, r3 contains new PP bits, save them in
451 * place of "access" in the param area (sic)
453 std r3,STK_PARM(r4)(r1)
455 /* Get htab_hash_mask */
456 ld r4,htab_hash_mask@got(2)
457 ld r27,0(r4) /* htab_hash_mask -> r27 */
459 /* Check if we may already be in the hashtable, in this case, we
460 * go to out-of-line code to try to modify the HPTE. We look for
461 * the bit at (1 >> (index + 32))
463 andi. r0,r31,_PAGE_HASHPTE
464 li r26,0 /* Default hidx */
468 * Check if the pte was already inserted into the hash table
469 * as a 64k HW page, and invalidate the 64k HPTE if so.
471 andis. r0,r31,_PAGE_COMBO@h
472 beq htab_inval_old_hpte
474 ld r6,STK_PARM(r6)(r1)
475 ori r26,r6,0x8000 /* Load the hidx mask */
477 addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
478 rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
482 /* real page number in r5, PTE RPN value + index */
483 andis. r0,r31,_PAGE_4K_PFN@h
484 srdi r5,r31,PTE_RPN_SHIFT
485 bne- htab_special_pfn
486 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
489 sldi r5,r5,HW_PAGE_SHIFT
491 /* Calculate primary group hash */
493 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
495 /* Call ppc_md.hpte_insert */
496 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
497 mr r4,r29 /* Retreive va */
498 li r7,0 /* !bolted, !secondary */
499 li r8,MMU_PAGE_4K /* page size */
500 ld r9,STK_PARM(r9)(r1) /* segment size */
501 _GLOBAL(htab_call_hpte_insert1)
502 bl . /* patched by htab_finish_init() */
504 bge htab_pte_insert_ok /* Insertion successful */
505 cmpdi 0,r3,-2 /* Critical failure */
506 beq- htab_pte_insert_failure
508 /* Now try secondary slot */
510 /* real page number in r5, PTE RPN value + index */
511 andis. r0,r31,_PAGE_4K_PFN@h
512 srdi r5,r31,PTE_RPN_SHIFT
514 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
516 3: sldi r5,r5,HW_PAGE_SHIFT
518 /* Calculate secondary group hash */
520 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
522 /* Call ppc_md.hpte_insert */
523 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
524 mr r4,r29 /* Retreive va */
525 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
526 li r8,MMU_PAGE_4K /* page size */
527 ld r9,STK_PARM(r9)(r1) /* segment size */
528 _GLOBAL(htab_call_hpte_insert2)
529 bl . /* patched by htab_finish_init() */
531 bge+ htab_pte_insert_ok /* Insertion successful */
532 cmpdi 0,r3,-2 /* Critical failure */
533 beq- htab_pte_insert_failure
535 /* Both are full, we need to evict something */
537 /* Pick a random group based on TB */
543 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
544 /* Call ppc_md.hpte_remove */
545 _GLOBAL(htab_call_hpte_remove)
546 bl . /* patched by htab_finish_init() */
552 * Call out to C code to invalidate an 64k HW HPTE that is
553 * useless now that the segment has been switched to 4k pages.
556 mr r3,r29 /* virtual addr */
557 mr r4,r31 /* PTE.pte */
558 li r5,0 /* PTE.hidx */
559 li r6,MMU_PAGE_64K /* psize */
560 ld r7,STK_PARM(r8)(r1) /* local */
569 /* Insert slot number & secondary bit in PTE second half,
570 * clear _PAGE_BUSY and set approriate HPTE slot bit
572 ld r6,STK_PARM(r6)(r1)
577 subfic r5,r25,27 /* Must match bit position in */
578 sld r0,r0,r5 /* pgtable.h */
593 ld r25,STK_REG(r25)(r1)
594 ld r26,STK_REG(r26)(r1)
595 ld r27,STK_REG(r27)(r1)
596 ld r28,STK_REG(r28)(r1)
597 ld r29,STK_REG(r29)(r1)
598 ld r30,STK_REG(r30)(r1)
599 ld r31,STK_REG(r31)(r1)
600 addi r1,r1,STACKFRAMESIZE
606 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
611 /* Secondary group ? if yes, get a inverted hash value */
613 andi. r0,r3,0x8 /* page secondary ? */
616 1: andi. r3,r3,0x7 /* extract idx alone */
618 /* Calculate proper slot value for ppc_md.hpte_updatepp */
620 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
621 add r3,r0,r3 /* add slot idx */
623 /* Call ppc_md.hpte_updatepp */
625 li r6,MMU_PAGE_4K /* page size */
626 ld r7,STK_PARM(r9)(r1) /* segment size */
627 ld r8,STK_PARM(r8)(r1) /* get "local" param */
628 _GLOBAL(htab_call_hpte_updatepp)
629 bl . /* patched by htab_finish_init() */
631 /* if we failed because typically the HPTE wasn't really here
632 * we try an insertion.
637 /* Clear the BUSY bit and Write out the PTE */
640 ld r6,STK_PARM(r6)(r1)
646 /* Bail out clearing reservation */
651 htab_pte_insert_failure:
652 /* Bail out restoring old PTE */
653 ld r6,STK_PARM(r6)(r1)
658 #endif /* CONFIG_PPC_64K_PAGES */
660 #ifdef CONFIG_PPC_HAS_HASH_64K
662 /*****************************************************************************
664 * 64K SW & 64K HW in a 64K segment pages implementation *
666 *****************************************************************************/
668 _GLOBAL(__hash_page_64K)
671 stdu r1,-STACKFRAMESIZE(r1)
672 /* Save all params that we need after a function call */
673 std r6,STK_PARM(r6)(r1)
674 std r8,STK_PARM(r8)(r1)
675 std r9,STK_PARM(r9)(r1)
677 /* Add _PAGE_PRESENT to access */
678 ori r4,r4,_PAGE_PRESENT
680 /* Save non-volatile registers.
681 * r31 will hold "old PTE"
684 * r28 is a hash value
685 * r27 is hashtab mask (maybe dynamic patched instead ?)
687 std r27,STK_REG(r27)(r1)
688 std r28,STK_REG(r28)(r1)
689 std r29,STK_REG(r29)(r1)
690 std r30,STK_REG(r30)(r1)
691 std r31,STK_REG(r31)(r1)
695 * Check permissions, atomically mark the linux PTE busy
700 /* Check access rights (access & ~(pte_val(*ptep))) */
702 bne- ht64_wrong_access
703 /* Check if PTE is busy */
704 andi. r0,r31,_PAGE_BUSY
705 /* If so, just bail out and refault if needed. Someone else
706 * is changing this PTE anyway and might hash it.
710 /* Check if PTE has the cache-inhibit bit set */
711 andi. r0,r31,_PAGE_NO_CACHE
712 /* If so, bail out and refault as a 4k page */
714 END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
715 /* Prepare new PTE value (turn access RW into DIRTY, then
716 * add BUSY,HASHPTE and ACCESSED)
718 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
720 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
721 /* Write the linux PTE atomically (setting busy) */
728 * Insert/Update the HPTE in the hash table. At this point,
729 * r4 (access) is re-useable, we use it for the new HPTE flags
733 cmpdi r9,0 /* check segment size */
735 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
736 /* Calc va and put it in r29 */
737 rldicr r29,r5,28,63-28
741 /* Calculate hash value for primary slot and store it in r28 */
742 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
743 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
747 3: /* Calc VA and hash in r29 and r28 for 1T segment */
748 sldi r29,r5,40 /* vsid << 40 */
749 clrldi r3,r3,24 /* ea & 0xffffffffff */
750 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
751 clrldi r5,r5,40 /* vsid & 0xffffff */
752 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
754 or r29,r3,r29 /* VA */
755 xor r28,r28,r0 /* hash */
757 /* Convert linux PTE bits into HW equivalents */
758 4: andi. r3,r30,0x1fe /* Get basic set of flags */
759 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
760 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
761 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
762 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
763 andc r0,r30,r0 /* r0 = pte & ~r0 */
764 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
765 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
767 /* We eventually do the icache sync here (maybe inline that
768 * code rather than call a C function...)
773 bl .hash_page_do_lazy_icache
774 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
776 /* At this point, r3 contains new PP bits, save them in
777 * place of "access" in the param area (sic)
779 std r3,STK_PARM(r4)(r1)
781 /* Get htab_hash_mask */
782 ld r4,htab_hash_mask@got(2)
783 ld r27,0(r4) /* htab_hash_mask -> r27 */
785 /* Check if we may already be in the hashtable, in this case, we
786 * go to out-of-line code to try to modify the HPTE
788 andi. r0,r31,_PAGE_HASHPTE
792 /* Clear hpte bits in new pte (we also clear BUSY btw) and
795 lis r0,_PAGE_HPTEFLAGS@h
796 ori r0,r0,_PAGE_HPTEFLAGS@l
798 ori r30,r30,_PAGE_HASHPTE
800 /* Phyical address in r5 */
801 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
802 sldi r5,r5,PAGE_SHIFT
804 /* Calculate primary group hash */
806 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
808 /* Call ppc_md.hpte_insert */
809 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
810 mr r4,r29 /* Retreive va */
811 li r7,0 /* !bolted, !secondary */
813 ld r9,STK_PARM(r9)(r1) /* segment size */
814 _GLOBAL(ht64_call_hpte_insert1)
815 bl . /* patched by htab_finish_init() */
817 bge ht64_pte_insert_ok /* Insertion successful */
818 cmpdi 0,r3,-2 /* Critical failure */
819 beq- ht64_pte_insert_failure
821 /* Now try secondary slot */
823 /* Phyical address in r5 */
824 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
825 sldi r5,r5,PAGE_SHIFT
827 /* Calculate secondary group hash */
829 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
831 /* Call ppc_md.hpte_insert */
832 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
833 mr r4,r29 /* Retreive va */
834 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
836 ld r9,STK_PARM(r9)(r1) /* segment size */
837 _GLOBAL(ht64_call_hpte_insert2)
838 bl . /* patched by htab_finish_init() */
840 bge+ ht64_pte_insert_ok /* Insertion successful */
841 cmpdi 0,r3,-2 /* Critical failure */
842 beq- ht64_pte_insert_failure
844 /* Both are full, we need to evict something */
846 /* Pick a random group based on TB */
852 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
853 /* Call ppc_md.hpte_remove */
854 _GLOBAL(ht64_call_hpte_remove)
855 bl . /* patched by htab_finish_init() */
865 /* Insert slot number & secondary bit in PTE */
866 rldimi r30,r3,12,63-15
868 /* Write out the PTE with a normal write
869 * (maybe add eieio may be good still ?)
872 ld r6,STK_PARM(r6)(r1)
876 ld r27,STK_REG(r27)(r1)
877 ld r28,STK_REG(r28)(r1)
878 ld r29,STK_REG(r29)(r1)
879 ld r30,STK_REG(r30)(r1)
880 ld r31,STK_REG(r31)(r1)
881 addi r1,r1,STACKFRAMESIZE
887 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
889 rlwinm r3,r31,32-12,29,31
891 /* Secondary group ? if yes, get a inverted hash value */
893 andi. r0,r31,_PAGE_F_SECOND
897 /* Calculate proper slot value for ppc_md.hpte_updatepp */
899 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
900 add r3,r0,r3 /* add slot idx */
902 /* Call ppc_md.hpte_updatepp */
905 ld r7,STK_PARM(r9)(r1) /* segment size */
906 ld r8,STK_PARM(r8)(r1) /* get "local" param */
907 _GLOBAL(ht64_call_hpte_updatepp)
908 bl . /* patched by htab_finish_init() */
910 /* if we failed because typically the HPTE wasn't really here
911 * we try an insertion.
916 /* Clear the BUSY bit and Write out the PTE */
922 /* Bail out clearing reservation */
927 ht64_pte_insert_failure:
928 /* Bail out restoring old PTE */
929 ld r6,STK_PARM(r6)(r1)
935 #endif /* CONFIG_PPC_HAS_HASH_64K */
938 /*****************************************************************************
940 * Huge pages implementation is in hugetlbpage.c *
942 *****************************************************************************/