2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
36 #include <asm/assembly.h>
37 #include <asm/pgtable.h>
38 #include <asm/cache.h>
39 #include <linux/linkage.h>
44 ENTRY(flush_tlb_all_local)
50 * The pitlbe and pdtlbe instructions should only be used to
51 * flush the entire tlb. Also, there needs to be no intervening
52 * tlb operations, e.g. tlb misses, so the operation needs
53 * to happen in real mode with all interruptions disabled.
56 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
57 rsm PSW_SM_I, %r19 /* save I-bit state */
65 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
66 mtctl %r0, %cr17 /* Clear IIASQ tail */
67 mtctl %r0, %cr17 /* Clear IIASQ head */
68 mtctl %r1, %cr18 /* IIAOQ head */
70 mtctl %r1, %cr18 /* IIAOQ tail */
71 load32 REAL_MODE_PSW, %r1
76 1: load32 PA(cache_info), %r1
78 /* Flush Instruction Tlb */
80 LDREG ITLB_SID_BASE(%r1), %r20
81 LDREG ITLB_SID_STRIDE(%r1), %r21
82 LDREG ITLB_SID_COUNT(%r1), %r22
83 LDREG ITLB_OFF_BASE(%r1), %arg0
84 LDREG ITLB_OFF_STRIDE(%r1), %arg1
85 LDREG ITLB_OFF_COUNT(%r1), %arg2
86 LDREG ITLB_LOOP(%r1), %arg3
88 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
89 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
90 copy %arg0, %r28 /* Init base addr */
92 fitmanyloop: /* Loop if LOOP >= 2 */
94 add %r21, %r20, %r20 /* increment space */
95 copy %arg2, %r29 /* Init middle loop count */
97 fitmanymiddle: /* Loop if LOOP >= 2 */
98 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
100 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
101 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
102 copy %arg3, %r31 /* Re-init inner loop count */
104 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
105 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
107 fitoneloop: /* Loop if LOOP = 1 */
109 copy %arg0, %r28 /* init base addr */
110 copy %arg2, %r29 /* init middle loop count */
112 fitonemiddle: /* Loop if LOOP = 1 */
113 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
114 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
116 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
117 add %r21, %r20, %r20 /* increment space */
123 LDREG DTLB_SID_BASE(%r1), %r20
124 LDREG DTLB_SID_STRIDE(%r1), %r21
125 LDREG DTLB_SID_COUNT(%r1), %r22
126 LDREG DTLB_OFF_BASE(%r1), %arg0
127 LDREG DTLB_OFF_STRIDE(%r1), %arg1
128 LDREG DTLB_OFF_COUNT(%r1), %arg2
129 LDREG DTLB_LOOP(%r1), %arg3
131 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
132 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
133 copy %arg0, %r28 /* Init base addr */
135 fdtmanyloop: /* Loop if LOOP >= 2 */
137 add %r21, %r20, %r20 /* increment space */
138 copy %arg2, %r29 /* Init middle loop count */
140 fdtmanymiddle: /* Loop if LOOP >= 2 */
141 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
143 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
144 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
145 copy %arg3, %r31 /* Re-init inner loop count */
147 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
148 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
150 fdtoneloop: /* Loop if LOOP = 1 */
152 copy %arg0, %r28 /* init base addr */
153 copy %arg2, %r29 /* init middle loop count */
155 fdtonemiddle: /* Loop if LOOP = 1 */
156 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
157 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
159 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
160 add %r21, %r20, %r20 /* increment space */
165 * Switch back to virtual mode
176 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
177 mtctl %r0, %cr17 /* Clear IIASQ tail */
178 mtctl %r0, %cr17 /* Clear IIASQ head */
179 mtctl %r1, %cr18 /* IIAOQ head */
181 mtctl %r1, %cr18 /* IIAOQ tail */
182 load32 KERNEL_PSW, %r1
183 or %r1, %r19, %r1 /* I-bit to state on entry */
184 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
193 ENDPROC(flush_tlb_all_local)
195 .import cache_info,data
197 ENTRY(flush_instruction_cache_local)
203 load32 cache_info, %r1
205 /* Flush Instruction Cache */
207 LDREG ICACHE_BASE(%r1), %arg0
208 LDREG ICACHE_STRIDE(%r1), %arg1
209 LDREG ICACHE_COUNT(%r1), %arg2
210 LDREG ICACHE_LOOP(%r1), %arg3
211 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
212 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
213 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
215 fimanyloop: /* Loop if LOOP >= 2 */
216 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
217 fice %r0(%sr1, %arg0)
218 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
219 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
220 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
222 fioneloop: /* Loop if LOOP = 1 */
223 addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */
224 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
228 mtsm %r22 /* restore I-bit */
234 ENDPROC(flush_instruction_cache_local)
237 .import cache_info, data
238 ENTRY(flush_data_cache_local)
244 load32 cache_info, %r1
246 /* Flush Data Cache */
248 LDREG DCACHE_BASE(%r1), %arg0
249 LDREG DCACHE_STRIDE(%r1), %arg1
250 LDREG DCACHE_COUNT(%r1), %arg2
251 LDREG DCACHE_LOOP(%r1), %arg3
253 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
254 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
256 fdmanyloop: /* Loop if LOOP >= 2 */
257 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
258 fdce %r0(%sr1, %arg0)
259 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
260 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
261 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
263 fdoneloop: /* Loop if LOOP = 1 */
264 addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */
265 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
270 mtsm %r22 /* restore I-bit */
276 ENDPROC(flush_data_cache_local)
280 ENTRY(copy_user_page_asm)
286 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
287 * Unroll the loop by hand and arrange insn appropriately.
288 * GCC probably can do this just as well.
292 ldi (PAGE_SIZE / 128), %r1
294 ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
295 ldw 128(%r25), %r0 /* prefetch 2 */
298 ldw 192(%r25), %r0 /* prefetch 3 */
299 ldw 256(%r25), %r0 /* prefetch 4 */
341 /* conditional branches nullify on forward taken branch, and on
342 * non-taken backward branch. Note that .+4 is a backwards branch.
343 * The ldd should only get executed if the branch is taken.
345 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
346 ldd 0(%r25), %r19 /* start next loads */
351 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
352 * bundles (very restricted rules for bundling).
353 * Note that until (if) we start saving
354 * the full 64 bit register values on interrupt, we can't
355 * use ldd/std on a 32 bit kernel.
358 ldi (PAGE_SIZE / 64), %r1
394 addib,COND(>),n -1, %r1, 1b
402 ENDPROC(copy_user_page_asm)
405 * NOTE: Code in clear_user_page has a hard coded dependency on the
406 * maximum alias boundary being 4 Mb. We've been assured by the
407 * parisc chip designers that there will not ever be a parisc
408 * chip with a larger alias boundary (Never say never :-) ).
410 * Subtle: the dtlb miss handlers support the temp alias region by
411 * "knowing" that if a dtlb miss happens within the temp alias
412 * region it must have occurred while in clear_user_page. Since
413 * this routine makes use of processor local translations, we
414 * don't want to insert them into the kernel page table. Instead,
415 * we load up some general registers (they need to be registers
416 * which aren't shadowed) with the physical page numbers (preshifted
417 * for tlb insertion) needed to insert the translations. When we
418 * miss on the translation, the dtlb miss handler inserts the
419 * translation into the tlb using these values:
421 * %r26 physical page (shifted for tlb insert) of "to" translation
422 * %r23 physical page (shifted for tlb insert) of "from" translation
428 * We can't do this since copy_user_page is used to bring in
429 * file data that might have instructions. Since the data would
430 * then need to be flushed out so the i-fetch can see it, it
431 * makes more sense to just copy through the kernel translation
434 * I'm still keeping this around because it may be possible to
435 * use it if more information is passed into copy_user_page().
436 * Have to do some measurements to see if it is worthwhile to
437 * lobby for such a change.
440 ENTRY(copy_user_page_asm)
445 ldil L%(__PAGE_OFFSET), %r1
447 sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
449 ldil L%(TMPALIAS_MAP_START), %r28
450 /* FIXME for different page sizes != 4k */
452 extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
453 extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
454 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
455 depdi 0, 63,12, %r28 /* Clear any offset bits */
457 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
459 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
460 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
461 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
462 depwi 0, 31,12, %r28 /* Clear any offset bits */
464 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
467 /* Purge any old translations */
475 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
476 * bundles (very restricted rules for bundling). It probably
477 * does OK on PCXU and better, but we could do better with
478 * ldd/std instructions. Note that until (if) we start saving
479 * the full 64 bit register values on interrupt, we can't
480 * use ldd/std on a 32 bit kernel.
518 addib,COND(>) -1, %r1,1b
526 ENDPROC(copy_user_page_asm)
529 ENTRY(__clear_user_page_asm)
536 ldil L%(TMPALIAS_MAP_START), %r28
538 #if (TMPALIAS_MAP_START >= 0x80000000)
539 depdi 0, 31,32, %r28 /* clear any sign extension */
540 /* FIXME: page size dependend */
542 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
543 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
544 depdi 0, 63,12, %r28 /* Clear any offset bits */
546 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
547 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
548 depwi 0, 31,12, %r28 /* Clear any offset bits */
551 /* Purge any old translation */
556 ldi (PAGE_SIZE / 128), %r1
558 /* PREFETCH (Write) has not (yet) been proven to help here */
559 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
577 addib,COND(>) -1, %r1, 1b
580 #else /* ! CONFIG_64BIT */
581 ldi (PAGE_SIZE / 64), %r1
600 addib,COND(>) -1, %r1, 1b
602 #endif /* CONFIG_64BIT */
609 ENDPROC(__clear_user_page_asm)
611 ENTRY(flush_kernel_dcache_page_asm)
616 ldil L%dcache_stride, %r1
617 ldw R%dcache_stride(%r1), %r23
620 depdi,z 1, 63-PAGE_SHIFT,1, %r25
622 depwi,z 1, 31-PAGE_SHIFT,1, %r25
643 cmpb,COND(<<) %r26, %r25,1b
652 ENDPROC(flush_kernel_dcache_page_asm)
654 ENTRY(flush_user_dcache_page)
659 ldil L%dcache_stride, %r1
660 ldw R%dcache_stride(%r1), %r23
663 depdi,z 1,63-PAGE_SHIFT,1, %r25
665 depwi,z 1,31-PAGE_SHIFT,1, %r25
671 1: fdc,m %r23(%sr3, %r26)
672 fdc,m %r23(%sr3, %r26)
673 fdc,m %r23(%sr3, %r26)
674 fdc,m %r23(%sr3, %r26)
675 fdc,m %r23(%sr3, %r26)
676 fdc,m %r23(%sr3, %r26)
677 fdc,m %r23(%sr3, %r26)
678 fdc,m %r23(%sr3, %r26)
679 fdc,m %r23(%sr3, %r26)
680 fdc,m %r23(%sr3, %r26)
681 fdc,m %r23(%sr3, %r26)
682 fdc,m %r23(%sr3, %r26)
683 fdc,m %r23(%sr3, %r26)
684 fdc,m %r23(%sr3, %r26)
685 fdc,m %r23(%sr3, %r26)
686 cmpb,COND(<<) %r26, %r25,1b
687 fdc,m %r23(%sr3, %r26)
695 ENDPROC(flush_user_dcache_page)
697 ENTRY(flush_user_icache_page)
702 ldil L%dcache_stride, %r1
703 ldw R%dcache_stride(%r1), %r23
706 depdi,z 1, 63-PAGE_SHIFT,1, %r25
708 depwi,z 1, 31-PAGE_SHIFT,1, %r25
714 1: fic,m %r23(%sr3, %r26)
715 fic,m %r23(%sr3, %r26)
716 fic,m %r23(%sr3, %r26)
717 fic,m %r23(%sr3, %r26)
718 fic,m %r23(%sr3, %r26)
719 fic,m %r23(%sr3, %r26)
720 fic,m %r23(%sr3, %r26)
721 fic,m %r23(%sr3, %r26)
722 fic,m %r23(%sr3, %r26)
723 fic,m %r23(%sr3, %r26)
724 fic,m %r23(%sr3, %r26)
725 fic,m %r23(%sr3, %r26)
726 fic,m %r23(%sr3, %r26)
727 fic,m %r23(%sr3, %r26)
728 fic,m %r23(%sr3, %r26)
729 cmpb,COND(<<) %r26, %r25,1b
730 fic,m %r23(%sr3, %r26)
738 ENDPROC(flush_user_icache_page)
741 ENTRY(purge_kernel_dcache_page)
746 ldil L%dcache_stride, %r1
747 ldw R%dcache_stride(%r1), %r23
750 depdi,z 1, 63-PAGE_SHIFT,1, %r25
752 depwi,z 1, 31-PAGE_SHIFT,1, %r25
772 cmpb,COND(<<) %r26, %r25, 1b
781 ENDPROC(purge_kernel_dcache_page)
784 /* Currently not used, but it still is a possible alternate
788 ENTRY(flush_alias_page)
795 ldil L%(TMPALIAS_MAP_START), %r28
797 extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
798 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
799 depdi 0, 63,12, %r28 /* Clear any offset bits */
801 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
802 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
803 depwi 0, 31,12, %r28 /* Clear any offset bits */
806 /* Purge any old translation */
810 ldil L%dcache_stride, %r1
811 ldw R%dcache_stride(%r1), %r23
814 depdi,z 1, 63-PAGE_SHIFT,1, %r29
816 depwi,z 1, 31-PAGE_SHIFT,1, %r29
836 cmpb,COND(<<) %r28, %r29, 1b
847 .export flush_user_dcache_range_asm
849 flush_user_dcache_range_asm:
854 ldil L%dcache_stride, %r1
855 ldw R%dcache_stride(%r1), %r23
857 ANDCM %r26, %r21, %r26
859 1: cmpb,COND(<<),n %r26, %r25, 1b
860 fdc,m %r23(%sr3, %r26)
868 ENDPROC(flush_alias_page)
870 ENTRY(flush_kernel_dcache_range_asm)
875 ldil L%dcache_stride, %r1
876 ldw R%dcache_stride(%r1), %r23
878 ANDCM %r26, %r21, %r26
880 1: cmpb,COND(<<),n %r26, %r25,1b
890 ENDPROC(flush_kernel_dcache_range_asm)
892 ENTRY(flush_user_icache_range_asm)
897 ldil L%icache_stride, %r1
898 ldw R%icache_stride(%r1), %r23
900 ANDCM %r26, %r21, %r26
902 1: cmpb,COND(<<),n %r26, %r25,1b
903 fic,m %r23(%sr3, %r26)
911 ENDPROC(flush_user_icache_range_asm)
913 ENTRY(flush_kernel_icache_page)
918 ldil L%icache_stride, %r1
919 ldw R%icache_stride(%r1), %r23
922 depdi,z 1, 63-PAGE_SHIFT,1, %r25
924 depwi,z 1, 31-PAGE_SHIFT,1, %r25
930 1: fic,m %r23(%sr4, %r26)
931 fic,m %r23(%sr4, %r26)
932 fic,m %r23(%sr4, %r26)
933 fic,m %r23(%sr4, %r26)
934 fic,m %r23(%sr4, %r26)
935 fic,m %r23(%sr4, %r26)
936 fic,m %r23(%sr4, %r26)
937 fic,m %r23(%sr4, %r26)
938 fic,m %r23(%sr4, %r26)
939 fic,m %r23(%sr4, %r26)
940 fic,m %r23(%sr4, %r26)
941 fic,m %r23(%sr4, %r26)
942 fic,m %r23(%sr4, %r26)
943 fic,m %r23(%sr4, %r26)
944 fic,m %r23(%sr4, %r26)
945 cmpb,COND(<<) %r26, %r25, 1b
946 fic,m %r23(%sr4, %r26)
954 ENDPROC(flush_kernel_icache_page)
956 ENTRY(flush_kernel_icache_range_asm)
961 ldil L%icache_stride, %r1
962 ldw R%icache_stride(%r1), %r23
964 ANDCM %r26, %r21, %r26
966 1: cmpb,COND(<<),n %r26, %r25, 1b
967 fic,m %r23(%sr4, %r26)
974 ENDPROC(flush_kernel_icache_range_asm)
976 /* align should cover use of rfi in disable_sr_hashing_asm and
980 ENTRY(disable_sr_hashing_asm)
986 * Switch to real mode
997 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
998 mtctl %r0, %cr17 /* Clear IIASQ tail */
999 mtctl %r0, %cr17 /* Clear IIASQ head */
1000 mtctl %r1, %cr18 /* IIAOQ head */
1002 mtctl %r1, %cr18 /* IIAOQ tail */
1003 load32 REAL_MODE_PSW, %r1
1008 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1009 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1010 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1015 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1017 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1018 .word 0x141c1a00 /* must issue twice */
1019 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1020 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1021 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1022 .word 0x141c1600 /* must issue twice */
1027 /* Disable Space Register Hashing for PCXL */
1029 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1030 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1031 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1036 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1038 .word 0x144008bc /* mfdiag %dr2, %r28 */
1039 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1040 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1044 /* Switch back to virtual mode */
1045 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1053 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1054 mtctl %r0, %cr17 /* Clear IIASQ tail */
1055 mtctl %r0, %cr17 /* Clear IIASQ head */
1056 mtctl %r1, %cr18 /* IIAOQ head */
1058 mtctl %r1, %cr18 /* IIAOQ tail */
1059 load32 KERNEL_PSW, %r1
1069 ENDPROC(disable_sr_hashing_asm)