2 * ppc64 MMU hashtable management routines
4 * (c) Copyright IBM Corp. 2003, 2005
6 * Maintained by: Benjamin Herrenschmidt
7 * <benh@kernel.crashing.org>
9 * This file is covered by the GNU Public Licence v2 as
10 * described in the kernel's COPYING file.
14 #include <asm/pgtable.h>
17 #include <asm/types.h>
18 #include <asm/ppc_asm.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cputable.h>
27 * +-> Back chain (SP + 256)
28 * | General register save area (SP + 112)
29 * | Parameter save area (SP + 48)
30 * | TOC save area (SP + 40)
31 * | link editor doubleword (SP + 32)
32 * | compiler doubleword (SP + 24)
33 * | LR save area (SP + 16)
34 * | CR save area (SP + 8)
35 * SP ---> +-- Back chain (SP + 0)
37 #define STACKFRAMESIZE 256
39 /* Save parameters offsets */
40 #define STK_PARM(i) (STACKFRAMESIZE + 48 + ((i)-3)*8)
42 /* Save non-volatile offsets */
43 #define STK_REG(i) (112 + ((i)-14)*8)
46 #ifndef CONFIG_PPC_64K_PAGES
48 /*****************************************************************************
50 * 4K SW & 4K HW pages implementation *
52 *****************************************************************************/
56 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
57 * pte_t *ptep, unsigned long trap, int local)
59 * Adds a 4K page to the hash table in a segment of 4K pages only
62 _GLOBAL(__hash_page_4K)
65 stdu r1,-STACKFRAMESIZE(r1)
66 /* Save all params that we need after a function call */
67 std r6,STK_PARM(r6)(r1)
68 std r8,STK_PARM(r8)(r1)
70 /* Add _PAGE_PRESENT to access */
71 ori r4,r4,_PAGE_PRESENT
73 /* Save non-volatile registers.
74 * r31 will hold "old PTE"
78 * r27 is hashtab mask (maybe dynamic patched instead ?)
80 std r27,STK_REG(r27)(r1)
81 std r28,STK_REG(r28)(r1)
82 std r29,STK_REG(r29)(r1)
83 std r30,STK_REG(r30)(r1)
84 std r31,STK_REG(r31)(r1)
88 * Check permissions, atomically mark the linux PTE busy
93 /* Check access rights (access & ~(pte_val(*ptep))) */
95 bne- htab_wrong_access
96 /* Check if PTE is busy */
97 andi. r0,r31,_PAGE_BUSY
98 /* If so, just bail out and refault if needed. Someone else
99 * is changing this PTE anyway and might hash it.
103 /* Prepare new PTE value (turn access RW into DIRTY, then
104 * add BUSY,HASHPTE and ACCESSED)
106 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
108 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
109 /* Write the linux PTE atomically (setting busy) */
116 * Insert/Update the HPTE in the hash table. At this point,
117 * r4 (access) is re-useable, we use it for the new HPTE flags
120 /* Calc va and put it in r29 */
121 rldicr r29,r5,28,63-28
125 /* Calculate hash value for primary slot and store it in r28 */
126 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
127 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
130 /* Convert linux PTE bits into HW equivalents */
131 andi. r3,r30,0x1fe /* Get basic set of flags */
132 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
133 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
134 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
135 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
136 andc r0,r30,r0 /* r0 = pte & ~r0 */
137 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
138 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
140 /* We eventually do the icache sync here (maybe inline that
141 * code rather than call a C function...)
146 bl .hash_page_do_lazy_icache
147 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
149 /* At this point, r3 contains new PP bits, save them in
150 * place of "access" in the param area (sic)
152 std r3,STK_PARM(r4)(r1)
154 /* Get htab_hash_mask */
155 ld r4,htab_hash_mask@got(2)
156 ld r27,0(r4) /* htab_hash_mask -> r27 */
158 /* Check if we may already be in the hashtable, in this case, we
159 * go to out-of-line code to try to modify the HPTE
161 andi. r0,r31,_PAGE_HASHPTE
165 /* Clear hpte bits in new pte (we also clear BUSY btw) and
168 lis r0,_PAGE_HPTEFLAGS@h
169 ori r0,r0,_PAGE_HPTEFLAGS@l
171 ori r30,r30,_PAGE_HASHPTE
173 /* physical address r5 */
174 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
175 sldi r5,r5,PAGE_SHIFT
177 /* Calculate primary group hash */
179 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
181 /* Call ppc_md.hpte_insert */
182 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
183 mr r4,r29 /* Retreive va */
184 li r7,0 /* !bolted, !secondary */
185 li r8,MMU_PAGE_4K /* page size */
186 _GLOBAL(htab_call_hpte_insert1)
187 bl . /* Patched by htab_finish_init() */
189 bge htab_pte_insert_ok /* Insertion successful */
190 cmpdi 0,r3,-2 /* Critical failure */
191 beq- htab_pte_insert_failure
193 /* Now try secondary slot */
195 /* physical address r5 */
196 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
197 sldi r5,r5,PAGE_SHIFT
199 /* Calculate secondary group hash */
201 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
203 /* Call ppc_md.hpte_insert */
204 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
205 mr r4,r29 /* Retreive va */
206 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
207 li r8,MMU_PAGE_4K /* page size */
208 _GLOBAL(htab_call_hpte_insert2)
209 bl . /* Patched by htab_finish_init() */
211 bge+ htab_pte_insert_ok /* Insertion successful */
212 cmpdi 0,r3,-2 /* Critical failure */
213 beq- htab_pte_insert_failure
215 /* Both are full, we need to evict something */
217 /* Pick a random group based on TB */
223 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
224 /* Call ppc_md.hpte_remove */
225 _GLOBAL(htab_call_hpte_remove)
226 bl . /* Patched by htab_finish_init() */
236 /* Insert slot number & secondary bit in PTE */
237 rldimi r30,r3,12,63-15
239 /* Write out the PTE with a normal write
240 * (maybe add eieio may be good still ?)
243 ld r6,STK_PARM(r6)(r1)
247 ld r27,STK_REG(r27)(r1)
248 ld r28,STK_REG(r28)(r1)
249 ld r29,STK_REG(r29)(r1)
250 ld r30,STK_REG(r30)(r1)
251 ld r31,STK_REG(r31)(r1)
252 addi r1,r1,STACKFRAMESIZE
258 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
260 rlwinm r3,r31,32-12,29,31
262 /* Secondary group ? if yes, get a inverted hash value */
264 andi. r0,r31,_PAGE_SECONDARY
268 /* Calculate proper slot value for ppc_md.hpte_updatepp */
270 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
271 add r3,r0,r3 /* add slot idx */
273 /* Call ppc_md.hpte_updatepp */
275 li r6,MMU_PAGE_4K /* page size */
276 ld r7,STK_PARM(r8)(r1) /* get "local" param */
277 _GLOBAL(htab_call_hpte_updatepp)
278 bl . /* Patched by htab_finish_init() */
280 /* if we failed because typically the HPTE wasn't really here
281 * we try an insertion.
286 /* Clear the BUSY bit and Write out the PTE */
292 /* Bail out clearing reservation */
297 htab_pte_insert_failure:
298 /* Bail out restoring old PTE */
299 ld r6,STK_PARM(r6)(r1)
305 #else /* CONFIG_PPC_64K_PAGES */
308 /*****************************************************************************
310 * 64K SW & 4K or 64K HW in a 4K segment pages implementation *
312 *****************************************************************************/
314 /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
315 * pte_t *ptep, unsigned long trap, int local)
319 * For now, we do NOT implement Admixed pages
321 _GLOBAL(__hash_page_4K)
324 stdu r1,-STACKFRAMESIZE(r1)
325 /* Save all params that we need after a function call */
326 std r6,STK_PARM(r6)(r1)
327 std r8,STK_PARM(r8)(r1)
329 /* Add _PAGE_PRESENT to access */
330 ori r4,r4,_PAGE_PRESENT
332 /* Save non-volatile registers.
333 * r31 will hold "old PTE"
336 * r28 is a hash value
337 * r27 is hashtab mask (maybe dynamic patched instead ?)
338 * r26 is the hidx mask
339 * r25 is the index in combo page
341 std r25,STK_REG(r25)(r1)
342 std r26,STK_REG(r26)(r1)
343 std r27,STK_REG(r27)(r1)
344 std r28,STK_REG(r28)(r1)
345 std r29,STK_REG(r29)(r1)
346 std r30,STK_REG(r30)(r1)
347 std r31,STK_REG(r31)(r1)
351 * Check permissions, atomically mark the linux PTE busy
356 /* Check access rights (access & ~(pte_val(*ptep))) */
358 bne- htab_wrong_access
359 /* Check if PTE is busy */
360 andi. r0,r31,_PAGE_BUSY
361 /* If so, just bail out and refault if needed. Someone else
362 * is changing this PTE anyway and might hash it.
365 /* Prepare new PTE value (turn access RW into DIRTY, then
366 * add BUSY and ACCESSED)
368 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
370 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
371 oris r30,r30,_PAGE_COMBO@h
372 /* Write the linux PTE atomically (setting busy) */
379 * Insert/Update the HPTE in the hash table. At this point,
380 * r4 (access) is re-useable, we use it for the new HPTE flags
383 /* Load the hidx index */
384 rldicl r25,r3,64-12,60
386 /* Calc va and put it in r29 */
387 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
388 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
389 or r29,r3,r29 /* r29 = va
391 /* Calculate hash value for primary slot and store it in r28 */
392 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
393 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
396 /* Convert linux PTE bits into HW equivalents */
397 andi. r3,r30,0x1fe /* Get basic set of flags */
398 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
399 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
400 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
401 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
402 andc r0,r30,r0 /* r0 = pte & ~r0 */
403 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
404 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
406 /* We eventually do the icache sync here (maybe inline that
407 * code rather than call a C function...)
412 bl .hash_page_do_lazy_icache
413 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
415 /* At this point, r3 contains new PP bits, save them in
416 * place of "access" in the param area (sic)
418 std r3,STK_PARM(r4)(r1)
420 /* Get htab_hash_mask */
421 ld r4,htab_hash_mask@got(2)
422 ld r27,0(r4) /* htab_hash_mask -> r27 */
424 /* Check if we may already be in the hashtable, in this case, we
425 * go to out-of-line code to try to modify the HPTE. We look for
426 * the bit at (1 >> (index + 32))
428 andi. r0,r31,_PAGE_HASHPTE
429 li r26,0 /* Default hidx */
433 * Check if the pte was already inserted into the hash table
434 * as a 64k HW page, and invalidate the 64k HPTE if so.
436 andis. r0,r31,_PAGE_COMBO@h
437 beq htab_inval_old_hpte
439 ld r6,STK_PARM(r6)(r1)
440 ori r26,r6,0x8000 /* Load the hidx mask */
442 addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
443 rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
447 /* real page number in r5, PTE RPN value + index */
448 andis. r0,r31,_PAGE_4K_PFN@h
449 srdi r5,r31,PTE_RPN_SHIFT
450 bne- htab_special_pfn
451 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
454 sldi r5,r5,HW_PAGE_SHIFT
456 /* Calculate primary group hash */
458 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
460 /* Call ppc_md.hpte_insert */
461 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
462 mr r4,r29 /* Retreive va */
463 li r7,0 /* !bolted, !secondary */
464 li r8,MMU_PAGE_4K /* page size */
465 _GLOBAL(htab_call_hpte_insert1)
466 bl . /* patched by htab_finish_init() */
468 bge htab_pte_insert_ok /* Insertion successful */
469 cmpdi 0,r3,-2 /* Critical failure */
470 beq- htab_pte_insert_failure
472 /* Now try secondary slot */
474 /* real page number in r5, PTE RPN value + index */
475 andis. r0,r31,_PAGE_4K_PFN@h
476 srdi r5,r31,PTE_RPN_SHIFT
478 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
480 3: sldi r5,r5,HW_PAGE_SHIFT
482 /* Calculate secondary group hash */
484 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
486 /* Call ppc_md.hpte_insert */
487 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
488 mr r4,r29 /* Retreive va */
489 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
490 li r8,MMU_PAGE_4K /* page size */
491 _GLOBAL(htab_call_hpte_insert2)
492 bl . /* patched by htab_finish_init() */
494 bge+ htab_pte_insert_ok /* Insertion successful */
495 cmpdi 0,r3,-2 /* Critical failure */
496 beq- htab_pte_insert_failure
498 /* Both are full, we need to evict something */
500 /* Pick a random group based on TB */
506 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
507 /* Call ppc_md.hpte_remove */
508 _GLOBAL(htab_call_hpte_remove)
509 bl . /* patched by htab_finish_init() */
515 * Call out to C code to invalidate an 64k HW HPTE that is
516 * useless now that the segment has been switched to 4k pages.
519 mr r3,r29 /* virtual addr */
520 mr r4,r31 /* PTE.pte */
521 li r5,0 /* PTE.hidx */
522 li r6,MMU_PAGE_64K /* psize */
523 ld r7,STK_PARM(r8)(r1) /* local */
532 /* Insert slot number & secondary bit in PTE second half,
533 * clear _PAGE_BUSY and set approriate HPTE slot bit
535 ld r6,STK_PARM(r6)(r1)
540 subfic r5,r25,27 /* Must match bit position in */
541 sld r0,r0,r5 /* pgtable.h */
556 ld r25,STK_REG(r25)(r1)
557 ld r26,STK_REG(r26)(r1)
558 ld r27,STK_REG(r27)(r1)
559 ld r28,STK_REG(r28)(r1)
560 ld r29,STK_REG(r29)(r1)
561 ld r30,STK_REG(r30)(r1)
562 ld r31,STK_REG(r31)(r1)
563 addi r1,r1,STACKFRAMESIZE
569 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
574 /* Secondary group ? if yes, get a inverted hash value */
576 andi. r0,r3,0x8 /* page secondary ? */
579 1: andi. r3,r3,0x7 /* extract idx alone */
581 /* Calculate proper slot value for ppc_md.hpte_updatepp */
583 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
584 add r3,r0,r3 /* add slot idx */
586 /* Call ppc_md.hpte_updatepp */
588 li r6,MMU_PAGE_4K /* page size */
589 ld r7,STK_PARM(r8)(r1) /* get "local" param */
590 _GLOBAL(htab_call_hpte_updatepp)
591 bl . /* patched by htab_finish_init() */
593 /* if we failed because typically the HPTE wasn't really here
594 * we try an insertion.
599 /* Clear the BUSY bit and Write out the PTE */
602 ld r6,STK_PARM(r6)(r1)
608 /* Bail out clearing reservation */
613 htab_pte_insert_failure:
614 /* Bail out restoring old PTE */
615 ld r6,STK_PARM(r6)(r1)
620 #endif /* CONFIG_PPC_64K_PAGES */
622 #ifdef CONFIG_PPC_HAS_HASH_64K
624 /*****************************************************************************
626 * 64K SW & 64K HW in a 64K segment pages implementation *
628 *****************************************************************************/
630 _GLOBAL(__hash_page_64K)
633 stdu r1,-STACKFRAMESIZE(r1)
634 /* Save all params that we need after a function call */
635 std r6,STK_PARM(r6)(r1)
636 std r8,STK_PARM(r8)(r1)
638 /* Add _PAGE_PRESENT to access */
639 ori r4,r4,_PAGE_PRESENT
641 /* Save non-volatile registers.
642 * r31 will hold "old PTE"
645 * r28 is a hash value
646 * r27 is hashtab mask (maybe dynamic patched instead ?)
648 std r27,STK_REG(r27)(r1)
649 std r28,STK_REG(r28)(r1)
650 std r29,STK_REG(r29)(r1)
651 std r30,STK_REG(r30)(r1)
652 std r31,STK_REG(r31)(r1)
656 * Check permissions, atomically mark the linux PTE busy
661 /* Check access rights (access & ~(pte_val(*ptep))) */
663 bne- ht64_wrong_access
664 /* Check if PTE is busy */
665 andi. r0,r31,_PAGE_BUSY
666 /* If so, just bail out and refault if needed. Someone else
667 * is changing this PTE anyway and might hash it.
671 /* Check if PTE has the cache-inhibit bit set */
672 andi. r0,r31,_PAGE_NO_CACHE
673 /* If so, bail out and refault as a 4k page */
675 END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
676 /* Prepare new PTE value (turn access RW into DIRTY, then
677 * add BUSY,HASHPTE and ACCESSED)
679 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
681 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
682 /* Write the linux PTE atomically (setting busy) */
689 * Insert/Update the HPTE in the hash table. At this point,
690 * r4 (access) is re-useable, we use it for the new HPTE flags
693 /* Calc va and put it in r29 */
694 rldicr r29,r5,28,63-28
698 /* Calculate hash value for primary slot and store it in r28 */
699 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
700 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
703 /* Convert linux PTE bits into HW equivalents */
704 andi. r3,r30,0x1fe /* Get basic set of flags */
705 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
706 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
707 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
708 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
709 andc r0,r30,r0 /* r0 = pte & ~r0 */
710 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
711 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
713 /* We eventually do the icache sync here (maybe inline that
714 * code rather than call a C function...)
719 bl .hash_page_do_lazy_icache
720 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
722 /* At this point, r3 contains new PP bits, save them in
723 * place of "access" in the param area (sic)
725 std r3,STK_PARM(r4)(r1)
727 /* Get htab_hash_mask */
728 ld r4,htab_hash_mask@got(2)
729 ld r27,0(r4) /* htab_hash_mask -> r27 */
731 /* Check if we may already be in the hashtable, in this case, we
732 * go to out-of-line code to try to modify the HPTE
734 andi. r0,r31,_PAGE_HASHPTE
738 /* Clear hpte bits in new pte (we also clear BUSY btw) and
741 lis r0,_PAGE_HPTEFLAGS@h
742 ori r0,r0,_PAGE_HPTEFLAGS@l
744 ori r30,r30,_PAGE_HASHPTE
746 /* Phyical address in r5 */
747 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
748 sldi r5,r5,PAGE_SHIFT
750 /* Calculate primary group hash */
752 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
754 /* Call ppc_md.hpte_insert */
755 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
756 mr r4,r29 /* Retreive va */
757 li r7,0 /* !bolted, !secondary */
759 _GLOBAL(ht64_call_hpte_insert1)
760 bl . /* patched by htab_finish_init() */
762 bge ht64_pte_insert_ok /* Insertion successful */
763 cmpdi 0,r3,-2 /* Critical failure */
764 beq- ht64_pte_insert_failure
766 /* Now try secondary slot */
768 /* Phyical address in r5 */
769 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
770 sldi r5,r5,PAGE_SHIFT
772 /* Calculate secondary group hash */
774 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
776 /* Call ppc_md.hpte_insert */
777 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
778 mr r4,r29 /* Retreive va */
779 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
781 _GLOBAL(ht64_call_hpte_insert2)
782 bl . /* patched by htab_finish_init() */
784 bge+ ht64_pte_insert_ok /* Insertion successful */
785 cmpdi 0,r3,-2 /* Critical failure */
786 beq- ht64_pte_insert_failure
788 /* Both are full, we need to evict something */
790 /* Pick a random group based on TB */
796 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
797 /* Call ppc_md.hpte_remove */
798 _GLOBAL(ht64_call_hpte_remove)
799 bl . /* patched by htab_finish_init() */
809 /* Insert slot number & secondary bit in PTE */
810 rldimi r30,r3,12,63-15
812 /* Write out the PTE with a normal write
813 * (maybe add eieio may be good still ?)
816 ld r6,STK_PARM(r6)(r1)
820 ld r27,STK_REG(r27)(r1)
821 ld r28,STK_REG(r28)(r1)
822 ld r29,STK_REG(r29)(r1)
823 ld r30,STK_REG(r30)(r1)
824 ld r31,STK_REG(r31)(r1)
825 addi r1,r1,STACKFRAMESIZE
831 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
833 rlwinm r3,r31,32-12,29,31
835 /* Secondary group ? if yes, get a inverted hash value */
837 andi. r0,r31,_PAGE_F_SECOND
841 /* Calculate proper slot value for ppc_md.hpte_updatepp */
843 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
844 add r3,r0,r3 /* add slot idx */
846 /* Call ppc_md.hpte_updatepp */
849 ld r7,STK_PARM(r8)(r1) /* get "local" param */
850 _GLOBAL(ht64_call_hpte_updatepp)
851 bl . /* patched by htab_finish_init() */
853 /* if we failed because typically the HPTE wasn't really here
854 * we try an insertion.
859 /* Clear the BUSY bit and Write out the PTE */
865 /* Bail out clearing reservation */
870 ht64_pte_insert_failure:
871 /* Bail out restoring old PTE */
872 ld r6,STK_PARM(r6)(r1)
878 #endif /* CONFIG_PPC_HAS_HASH_64K */
881 /*****************************************************************************
883 * Huge pages implementation is in hugetlbpage.c *
885 *****************************************************************************/