2 * ppc64 MMU hashtable management routines
4 * (c) Copyright IBM Corp. 2003, 2005
6 * Maintained by: Benjamin Herrenschmidt
7 * <benh@kernel.crashing.org>
9 * This file is covered by the GNU Public Licence v2 as
10 * described in the kernel's COPYING file.
13 #include <linux/config.h>
15 #include <asm/pgtable.h>
18 #include <asm/types.h>
19 #include <asm/ppc_asm.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cputable.h>
28 * +-> Back chain (SP + 256)
29 * | General register save area (SP + 112)
30 * | Parameter save area (SP + 48)
31 * | TOC save area (SP + 40)
32 * | link editor doubleword (SP + 32)
33 * | compiler doubleword (SP + 24)
34 * | LR save area (SP + 16)
35 * | CR save area (SP + 8)
36 * SP ---> +-- Back chain (SP + 0)
38 #define STACKFRAMESIZE 256
40 /* Save parameters offsets */
41 #define STK_PARM(i) (STACKFRAMESIZE + 48 + ((i)-3)*8)
43 /* Save non-volatile offsets */
44 #define STK_REG(i) (112 + ((i)-14)*8)
47 #ifndef CONFIG_PPC_64K_PAGES
49 /*****************************************************************************
51 * 4K SW & 4K HW pages implementation *
53 *****************************************************************************/
57 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
58 * pte_t *ptep, unsigned long trap, int local)
60 * Adds a 4K page to the hash table in a segment of 4K pages only
63 _GLOBAL(__hash_page_4K)
66 stdu r1,-STACKFRAMESIZE(r1)
67 /* Save all params that we need after a function call */
68 std r6,STK_PARM(r6)(r1)
69 std r8,STK_PARM(r8)(r1)
71 /* Add _PAGE_PRESENT to access */
72 ori r4,r4,_PAGE_PRESENT
74 /* Save non-volatile registers.
75 * r31 will hold "old PTE"
79 * r27 is hashtab mask (maybe dynamic patched instead ?)
81 std r27,STK_REG(r27)(r1)
82 std r28,STK_REG(r28)(r1)
83 std r29,STK_REG(r29)(r1)
84 std r30,STK_REG(r30)(r1)
85 std r31,STK_REG(r31)(r1)
89 * Check permissions, atomically mark the linux PTE busy
94 /* Check access rights (access & ~(pte_val(*ptep))) */
96 bne- htab_wrong_access
97 /* Check if PTE is busy */
98 andi. r0,r31,_PAGE_BUSY
99 /* If so, just bail out and refault if needed. Someone else
100 * is changing this PTE anyway and might hash it.
104 /* Prepare new PTE value (turn access RW into DIRTY, then
105 * add BUSY,HASHPTE and ACCESSED)
107 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
109 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
110 /* Write the linux PTE atomically (setting busy) */
117 * Insert/Update the HPTE in the hash table. At this point,
118 * r4 (access) is re-useable, we use it for the new HPTE flags
121 /* Calc va and put it in r29 */
122 rldicr r29,r5,28,63-28
126 /* Calculate hash value for primary slot and store it in r28 */
127 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
128 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
131 /* Convert linux PTE bits into HW equivalents */
132 andi. r3,r30,0x1fe /* Get basic set of flags */
133 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
134 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
135 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
136 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
137 andc r0,r30,r0 /* r0 = pte & ~r0 */
138 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
139 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
141 /* We eventually do the icache sync here (maybe inline that
142 * code rather than call a C function...)
147 bl .hash_page_do_lazy_icache
148 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
150 /* At this point, r3 contains new PP bits, save them in
151 * place of "access" in the param area (sic)
153 std r3,STK_PARM(r4)(r1)
155 /* Get htab_hash_mask */
156 ld r4,htab_hash_mask@got(2)
157 ld r27,0(r4) /* htab_hash_mask -> r27 */
159 /* Check if we may already be in the hashtable, in this case, we
160 * go to out-of-line code to try to modify the HPTE
162 andi. r0,r31,_PAGE_HASHPTE
166 /* Clear hpte bits in new pte (we also clear BUSY btw) and
169 lis r0,_PAGE_HPTEFLAGS@h
170 ori r0,r0,_PAGE_HPTEFLAGS@l
172 ori r30,r30,_PAGE_HASHPTE
174 /* physical address r5 */
175 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
176 sldi r5,r5,PAGE_SHIFT
178 /* Calculate primary group hash */
180 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
182 /* Call ppc_md.hpte_insert */
183 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
184 mr r4,r29 /* Retreive va */
185 li r7,0 /* !bolted, !secondary */
186 li r8,MMU_PAGE_4K /* page size */
187 _GLOBAL(htab_call_hpte_insert1)
188 bl . /* Patched by htab_finish_init() */
190 bge htab_pte_insert_ok /* Insertion successful */
191 cmpdi 0,r3,-2 /* Critical failure */
192 beq- htab_pte_insert_failure
194 /* Now try secondary slot */
196 /* physical address r5 */
197 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
198 sldi r5,r5,PAGE_SHIFT
200 /* Calculate secondary group hash */
202 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
204 /* Call ppc_md.hpte_insert */
205 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
206 mr r4,r29 /* Retreive va */
207 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
208 li r8,MMU_PAGE_4K /* page size */
209 _GLOBAL(htab_call_hpte_insert2)
210 bl . /* Patched by htab_finish_init() */
212 bge+ htab_pte_insert_ok /* Insertion successful */
213 cmpdi 0,r3,-2 /* Critical failure */
214 beq- htab_pte_insert_failure
216 /* Both are full, we need to evict something */
218 /* Pick a random group based on TB */
224 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
225 /* Call ppc_md.hpte_remove */
226 _GLOBAL(htab_call_hpte_remove)
227 bl . /* Patched by htab_finish_init() */
237 /* Insert slot number & secondary bit in PTE */
238 rldimi r30,r3,12,63-15
240 /* Write out the PTE with a normal write
241 * (maybe add eieio may be good still ?)
244 ld r6,STK_PARM(r6)(r1)
248 ld r27,STK_REG(r27)(r1)
249 ld r28,STK_REG(r28)(r1)
250 ld r29,STK_REG(r29)(r1)
251 ld r30,STK_REG(r30)(r1)
252 ld r31,STK_REG(r31)(r1)
253 addi r1,r1,STACKFRAMESIZE
259 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
261 rlwinm r3,r31,32-12,29,31
263 /* Secondary group ? if yes, get a inverted hash value */
265 andi. r0,r31,_PAGE_SECONDARY
269 /* Calculate proper slot value for ppc_md.hpte_updatepp */
271 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
272 add r3,r0,r3 /* add slot idx */
274 /* Call ppc_md.hpte_updatepp */
276 li r6,MMU_PAGE_4K /* page size */
277 ld r7,STK_PARM(r8)(r1) /* get "local" param */
278 _GLOBAL(htab_call_hpte_updatepp)
279 bl . /* Patched by htab_finish_init() */
281 /* if we failed because typically the HPTE wasn't really here
282 * we try an insertion.
287 /* Clear the BUSY bit and Write out the PTE */
293 /* Bail out clearing reservation */
298 htab_pte_insert_failure:
299 /* Bail out restoring old PTE */
300 ld r6,STK_PARM(r6)(r1)
306 #else /* CONFIG_PPC_64K_PAGES */
309 /*****************************************************************************
311 * 64K SW & 4K or 64K HW in a 4K segment pages implementation *
313 *****************************************************************************/
315 /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
316 * pte_t *ptep, unsigned long trap, int local)
320 * For now, we do NOT implement Admixed pages
322 _GLOBAL(__hash_page_4K)
325 stdu r1,-STACKFRAMESIZE(r1)
326 /* Save all params that we need after a function call */
327 std r6,STK_PARM(r6)(r1)
328 std r8,STK_PARM(r8)(r1)
330 /* Add _PAGE_PRESENT to access */
331 ori r4,r4,_PAGE_PRESENT
333 /* Save non-volatile registers.
334 * r31 will hold "old PTE"
337 * r28 is a hash value
338 * r27 is hashtab mask (maybe dynamic patched instead ?)
339 * r26 is the hidx mask
340 * r25 is the index in combo page
342 std r25,STK_REG(r25)(r1)
343 std r26,STK_REG(r26)(r1)
344 std r27,STK_REG(r27)(r1)
345 std r28,STK_REG(r28)(r1)
346 std r29,STK_REG(r29)(r1)
347 std r30,STK_REG(r30)(r1)
348 std r31,STK_REG(r31)(r1)
352 * Check permissions, atomically mark the linux PTE busy
357 /* Check access rights (access & ~(pte_val(*ptep))) */
359 bne- htab_wrong_access
360 /* Check if PTE is busy */
361 andi. r0,r31,_PAGE_BUSY
362 /* If so, just bail out and refault if needed. Someone else
363 * is changing this PTE anyway and might hash it.
366 /* Prepare new PTE value (turn access RW into DIRTY, then
367 * add BUSY and ACCESSED)
369 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
371 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
372 oris r30,r30,_PAGE_COMBO@h
373 /* Write the linux PTE atomically (setting busy) */
380 * Insert/Update the HPTE in the hash table. At this point,
381 * r4 (access) is re-useable, we use it for the new HPTE flags
384 /* Load the hidx index */
385 rldicl r25,r3,64-12,60
387 /* Calc va and put it in r29 */
388 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
389 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
390 or r29,r3,r29 /* r29 = va
392 /* Calculate hash value for primary slot and store it in r28 */
393 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
394 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
397 /* Convert linux PTE bits into HW equivalents */
398 andi. r3,r30,0x1fe /* Get basic set of flags */
399 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
400 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
401 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
402 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
403 andc r0,r30,r0 /* r0 = pte & ~r0 */
404 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
405 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
407 /* We eventually do the icache sync here (maybe inline that
408 * code rather than call a C function...)
413 bl .hash_page_do_lazy_icache
414 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
416 /* At this point, r3 contains new PP bits, save them in
417 * place of "access" in the param area (sic)
419 std r3,STK_PARM(r4)(r1)
421 /* Get htab_hash_mask */
422 ld r4,htab_hash_mask@got(2)
423 ld r27,0(r4) /* htab_hash_mask -> r27 */
425 /* Check if we may already be in the hashtable, in this case, we
426 * go to out-of-line code to try to modify the HPTE. We look for
427 * the bit at (1 >> (index + 32))
429 andi. r0,r31,_PAGE_HASHPTE
430 li r26,0 /* Default hidx */
434 * Check if the pte was already inserted into the hash table
435 * as a 64k HW page, and invalidate the 64k HPTE if so.
437 andis. r0,r31,_PAGE_COMBO@h
438 beq htab_inval_old_hpte
440 ld r6,STK_PARM(r6)(r1)
441 ori r26,r6,0x8000 /* Load the hidx mask */
443 addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
444 rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
448 /* real page number in r5, PTE RPN value + index */
449 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
450 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
452 sldi r5,r5,HW_PAGE_SHIFT
454 /* Calculate primary group hash */
456 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
458 /* Call ppc_md.hpte_insert */
459 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
460 mr r4,r29 /* Retreive va */
461 li r7,0 /* !bolted, !secondary */
462 li r8,MMU_PAGE_4K /* page size */
463 _GLOBAL(htab_call_hpte_insert1)
464 bl . /* patched by htab_finish_init() */
466 bge htab_pte_insert_ok /* Insertion successful */
467 cmpdi 0,r3,-2 /* Critical failure */
468 beq- htab_pte_insert_failure
470 /* Now try secondary slot */
472 /* real page number in r5, PTE RPN value + index */
473 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
474 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
476 sldi r5,r5,HW_PAGE_SHIFT
478 /* Calculate secondary group hash */
480 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
482 /* Call ppc_md.hpte_insert */
483 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
484 mr r4,r29 /* Retreive va */
485 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
486 li r8,MMU_PAGE_4K /* page size */
487 _GLOBAL(htab_call_hpte_insert2)
488 bl . /* patched by htab_finish_init() */
490 bge+ htab_pte_insert_ok /* Insertion successful */
491 cmpdi 0,r3,-2 /* Critical failure */
492 beq- htab_pte_insert_failure
494 /* Both are full, we need to evict something */
496 /* Pick a random group based on TB */
502 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
503 /* Call ppc_md.hpte_remove */
504 _GLOBAL(htab_call_hpte_remove)
505 bl . /* patched by htab_finish_init() */
511 * Call out to C code to invalidate an 64k HW HPTE that is
512 * useless now that the segment has been switched to 4k pages.
515 mr r3,r29 /* virtual addr */
516 mr r4,r31 /* PTE.pte */
517 li r5,0 /* PTE.hidx */
518 li r6,MMU_PAGE_64K /* psize */
519 ld r7,STK_PARM(r8)(r1) /* local */
528 /* Insert slot number & secondary bit in PTE second half,
529 * clear _PAGE_BUSY and set approriate HPTE slot bit
531 ld r6,STK_PARM(r6)(r1)
536 subfic r5,r25,27 /* Must match bit position in */
537 sld r0,r0,r5 /* pgtable.h */
552 ld r25,STK_REG(r25)(r1)
553 ld r26,STK_REG(r26)(r1)
554 ld r27,STK_REG(r27)(r1)
555 ld r28,STK_REG(r28)(r1)
556 ld r29,STK_REG(r29)(r1)
557 ld r30,STK_REG(r30)(r1)
558 ld r31,STK_REG(r31)(r1)
559 addi r1,r1,STACKFRAMESIZE
565 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
570 /* Secondary group ? if yes, get a inverted hash value */
572 andi. r0,r3,0x8 /* page secondary ? */
575 1: andi. r3,r3,0x7 /* extract idx alone */
577 /* Calculate proper slot value for ppc_md.hpte_updatepp */
579 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
580 add r3,r0,r3 /* add slot idx */
582 /* Call ppc_md.hpte_updatepp */
584 li r6,MMU_PAGE_4K /* page size */
585 ld r7,STK_PARM(r8)(r1) /* get "local" param */
586 _GLOBAL(htab_call_hpte_updatepp)
587 bl . /* patched by htab_finish_init() */
589 /* if we failed because typically the HPTE wasn't really here
590 * we try an insertion.
595 /* Clear the BUSY bit and Write out the PTE */
598 ld r6,STK_PARM(r6)(r1)
604 /* Bail out clearing reservation */
609 htab_pte_insert_failure:
610 /* Bail out restoring old PTE */
611 ld r6,STK_PARM(r6)(r1)
617 /*****************************************************************************
619 * 64K SW & 64K HW in a 64K segment pages implementation *
621 *****************************************************************************/
623 _GLOBAL(__hash_page_64K)
626 stdu r1,-STACKFRAMESIZE(r1)
627 /* Save all params that we need after a function call */
628 std r6,STK_PARM(r6)(r1)
629 std r8,STK_PARM(r8)(r1)
631 /* Add _PAGE_PRESENT to access */
632 ori r4,r4,_PAGE_PRESENT
634 /* Save non-volatile registers.
635 * r31 will hold "old PTE"
638 * r28 is a hash value
639 * r27 is hashtab mask (maybe dynamic patched instead ?)
641 std r27,STK_REG(r27)(r1)
642 std r28,STK_REG(r28)(r1)
643 std r29,STK_REG(r29)(r1)
644 std r30,STK_REG(r30)(r1)
645 std r31,STK_REG(r31)(r1)
649 * Check permissions, atomically mark the linux PTE busy
654 /* Check access rights (access & ~(pte_val(*ptep))) */
656 bne- ht64_wrong_access
657 /* Check if PTE is busy */
658 andi. r0,r31,_PAGE_BUSY
659 /* If so, just bail out and refault if needed. Someone else
660 * is changing this PTE anyway and might hash it.
664 /* Check if PTE has the cache-inhibit bit set */
665 andi. r0,r31,_PAGE_NO_CACHE
666 /* If so, bail out and refault as a 4k page */
668 END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
669 /* Prepare new PTE value (turn access RW into DIRTY, then
670 * add BUSY,HASHPTE and ACCESSED)
672 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
674 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
675 /* Write the linux PTE atomically (setting busy) */
682 * Insert/Update the HPTE in the hash table. At this point,
683 * r4 (access) is re-useable, we use it for the new HPTE flags
686 /* Calc va and put it in r29 */
687 rldicr r29,r5,28,63-28
691 /* Calculate hash value for primary slot and store it in r28 */
692 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
693 rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
696 /* Convert linux PTE bits into HW equivalents */
697 andi. r3,r30,0x1fe /* Get basic set of flags */
698 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
699 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
700 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
701 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
702 andc r0,r30,r0 /* r0 = pte & ~r0 */
703 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
704 ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
706 /* We eventually do the icache sync here (maybe inline that
707 * code rather than call a C function...)
712 bl .hash_page_do_lazy_icache
713 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
715 /* At this point, r3 contains new PP bits, save them in
716 * place of "access" in the param area (sic)
718 std r3,STK_PARM(r4)(r1)
720 /* Get htab_hash_mask */
721 ld r4,htab_hash_mask@got(2)
722 ld r27,0(r4) /* htab_hash_mask -> r27 */
724 /* Check if we may already be in the hashtable, in this case, we
725 * go to out-of-line code to try to modify the HPTE
727 andi. r0,r31,_PAGE_HASHPTE
731 /* Clear hpte bits in new pte (we also clear BUSY btw) and
734 lis r0,_PAGE_HPTEFLAGS@h
735 ori r0,r0,_PAGE_HPTEFLAGS@l
737 ori r30,r30,_PAGE_HASHPTE
739 /* Phyical address in r5 */
740 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
741 sldi r5,r5,PAGE_SHIFT
743 /* Calculate primary group hash */
745 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
747 /* Call ppc_md.hpte_insert */
748 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
749 mr r4,r29 /* Retreive va */
750 li r7,0 /* !bolted, !secondary */
752 _GLOBAL(ht64_call_hpte_insert1)
753 bl . /* patched by htab_finish_init() */
755 bge ht64_pte_insert_ok /* Insertion successful */
756 cmpdi 0,r3,-2 /* Critical failure */
757 beq- ht64_pte_insert_failure
759 /* Now try secondary slot */
761 /* Phyical address in r5 */
762 rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
763 sldi r5,r5,PAGE_SHIFT
765 /* Calculate secondary group hash */
767 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
769 /* Call ppc_md.hpte_insert */
770 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
771 mr r4,r29 /* Retreive va */
772 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
774 _GLOBAL(ht64_call_hpte_insert2)
775 bl . /* patched by htab_finish_init() */
777 bge+ ht64_pte_insert_ok /* Insertion successful */
778 cmpdi 0,r3,-2 /* Critical failure */
779 beq- ht64_pte_insert_failure
781 /* Both are full, we need to evict something */
783 /* Pick a random group based on TB */
789 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
790 /* Call ppc_md.hpte_remove */
791 _GLOBAL(ht64_call_hpte_remove)
792 bl . /* patched by htab_finish_init() */
802 /* Insert slot number & secondary bit in PTE */
803 rldimi r30,r3,12,63-15
805 /* Write out the PTE with a normal write
806 * (maybe add eieio may be good still ?)
809 ld r6,STK_PARM(r6)(r1)
813 ld r27,STK_REG(r27)(r1)
814 ld r28,STK_REG(r28)(r1)
815 ld r29,STK_REG(r29)(r1)
816 ld r30,STK_REG(r30)(r1)
817 ld r31,STK_REG(r31)(r1)
818 addi r1,r1,STACKFRAMESIZE
824 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
826 rlwinm r3,r31,32-12,29,31
828 /* Secondary group ? if yes, get a inverted hash value */
830 andi. r0,r31,_PAGE_F_SECOND
834 /* Calculate proper slot value for ppc_md.hpte_updatepp */
836 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
837 add r3,r0,r3 /* add slot idx */
839 /* Call ppc_md.hpte_updatepp */
842 ld r7,STK_PARM(r8)(r1) /* get "local" param */
843 _GLOBAL(ht64_call_hpte_updatepp)
844 bl . /* patched by htab_finish_init() */
846 /* if we failed because typically the HPTE wasn't really here
847 * we try an insertion.
852 /* Clear the BUSY bit and Write out the PTE */
858 /* Bail out clearing reservation */
863 ht64_pte_insert_failure:
864 /* Bail out restoring old PTE */
865 ld r6,STK_PARM(r6)(r1)
871 #endif /* CONFIG_PPC_64K_PAGES */
874 /*****************************************************************************
876 * Huge pages implementation is in hugetlbpage.c *
878 *****************************************************************************/