2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
12 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind.
15 * They're coming to take me a away haha
16 * they're coming to take me a away hoho hihi haha
17 * to the funny farm where code is beautiful all the time ...
19 * (Condolences to Napoleon XIV)
22 #include <linux/bug.h>
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
28 #include <asm/mmu_context.h>
33 static inline int r45k_bvahwbug(void)
35 /* XXX: We should probe for the presence of this bug, but we don't. */
39 static inline int r4k_250MHZhwbug(void)
41 /* XXX: We should probe for the presence of this bug, but we don't. */
45 static inline int __maybe_unused bcm1250_m3_war(void)
47 return BCM1250_M3_WAR;
50 static inline int __maybe_unused r10000_llsc_war(void)
52 return R10000_LLSC_WAR;
56 * Found by experiment: At least some revisions of the 4kc throw under
57 * some circumstances a machine check exception, triggered by invalid
58 * values in the index register. Delaying the tlbp instruction until
59 * after the next branch, plus adding an additional nop in front of
60 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
61 * why; it's not an issue caused by the core RTL.
64 static int __cpuinit m4kc_tlbp_war(void)
66 return (current_cpu_data.processor_id & 0xffff00) ==
67 (PRID_COMP_MIPS | PRID_IMP_4KC);
70 /* Handle labels (which must be positive integers). */
72 label_second_part = 1,
84 label_smp_pgtable_change,
85 label_r3000_write_probe_fail,
88 UASM_L_LA(_second_part)
91 UASM_L_LA(_module_alloc)
94 UASM_L_LA(_vmalloc_done)
95 UASM_L_LA(_tlbw_hazard)
97 UASM_L_LA(_nopage_tlbl)
98 UASM_L_LA(_nopage_tlbs)
99 UASM_L_LA(_nopage_tlbm)
100 UASM_L_LA(_smp_pgtable_change)
101 UASM_L_LA(_r3000_write_probe_fail)
104 * For debug purposes.
106 static inline void dump_handler(const u32 *handler, int count)
110 pr_debug("\t.set push\n");
111 pr_debug("\t.set noreorder\n");
113 for (i = 0; i < count; i++)
114 pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
116 pr_debug("\t.set pop\n");
119 /* The only general purpose registers allowed in TLB handlers. */
123 /* Some CP0 registers */
124 #define C0_INDEX 0, 0
125 #define C0_ENTRYLO0 2, 0
126 #define C0_TCBIND 2, 2
127 #define C0_ENTRYLO1 3, 0
128 #define C0_CONTEXT 4, 0
129 #define C0_BADVADDR 8, 0
130 #define C0_ENTRYHI 10, 0
132 #define C0_XCONTEXT 20, 0
135 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
137 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
140 /* The worst case length of the handler is around 18 instructions for
141 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
142 * Maximum space available is 32 instructions for R3000 and 64
143 * instructions for R4000.
145 * We deliberately chose a buffer size of 128, so we won't scribble
146 * over anything important on overflow before we panic.
148 static u32 tlb_handler[128] __cpuinitdata;
150 /* simply assume worst case size for labels and relocs */
151 static struct uasm_label labels[128] __cpuinitdata;
152 static struct uasm_reloc relocs[128] __cpuinitdata;
155 * The R3000 TLB handler is simple.
157 static void __cpuinit build_r3000_tlb_refill_handler(void)
159 long pgdc = (long)pgd_current;
162 memset(tlb_handler, 0, sizeof(tlb_handler));
165 uasm_i_mfc0(&p, K0, C0_BADVADDR);
166 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
167 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
168 uasm_i_srl(&p, K0, K0, 22); /* load delay */
169 uasm_i_sll(&p, K0, K0, 2);
170 uasm_i_addu(&p, K1, K1, K0);
171 uasm_i_mfc0(&p, K0, C0_CONTEXT);
172 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
173 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
174 uasm_i_addu(&p, K1, K1, K0);
175 uasm_i_lw(&p, K0, 0, K1);
176 uasm_i_nop(&p); /* load delay */
177 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
178 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
179 uasm_i_tlbwr(&p); /* cp0 delay */
181 uasm_i_rfe(&p); /* branch delay */
183 if (p > tlb_handler + 32)
184 panic("TLB refill handler space exceeded");
186 pr_debug("Wrote TLB refill handler (%u instructions).\n",
187 (unsigned int)(p - tlb_handler));
189 memcpy((void *)ebase, tlb_handler, 0x80);
191 dump_handler((u32 *)ebase, 32);
195 * The R4000 TLB handler is much more complicated. We have two
196 * consecutive handler areas with 32 instructions space each.
197 * Since they aren't used at the same time, we can overflow in the
198 * other one.To keep things simple, we first assume linear space,
199 * then we relocate it to the final handler layout as needed.
201 static u32 final_handler[64] __cpuinitdata;
206 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
207 * 2. A timing hazard exists for the TLBP instruction.
209 * stalling_instruction
212 * The JTLB is being read for the TLBP throughout the stall generated by the
213 * previous instruction. This is not really correct as the stalling instruction
214 * can modify the address used to access the JTLB. The failure symptom is that
215 * the TLBP instruction will use an address created for the stalling instruction
216 * and not the address held in C0_ENHI and thus report the wrong results.
218 * The software work-around is to not allow the instruction preceding the TLBP
219 * to stall - make it an NOP or some other instruction guaranteed not to stall.
221 * Errata 2 will not be fixed. This errata is also on the R5000.
223 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
225 static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
227 switch (current_cpu_type()) {
228 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
245 * Write random or indexed TLB entry, and care about the hazards from
246 * the preceeding mtc0 and for the following eret.
248 enum tlb_write_entry { tlb_random, tlb_indexed };
250 static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
251 struct uasm_reloc **r,
252 enum tlb_write_entry wmode)
254 void(*tlbw)(u32 **) = NULL;
257 case tlb_random: tlbw = uasm_i_tlbwr; break;
258 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
261 if (cpu_has_mips_r2) {
262 if (cpu_has_mips_r2_exec_hazard)
268 switch (current_cpu_type()) {
276 * This branch uses up a mtc0 hazard nop slot and saves
277 * two nops after the tlbw instruction.
279 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
281 uasm_l_tlbw_hazard(l, *p);
323 uasm_i_nop(p); /* QED specifies 2 nops hazard */
325 * This branch uses up a mtc0 hazard nop slot and saves
326 * a nop after the tlbw instruction.
328 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
330 uasm_l_tlbw_hazard(l, *p);
343 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
344 * use of the JTLB for instructions should not occur for 4
345 * cpu cycles and use for data translations should not occur
380 panic("No TLB refill handler yet (CPU type: %d)",
381 current_cpu_data.cputype);
388 * TMP and PTR are scratch.
389 * TMP will be clobbered, PTR will hold the pmd entry.
391 static void __cpuinit
392 build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
393 unsigned int tmp, unsigned int ptr)
395 long pgdc = (long)pgd_current;
398 * The vmalloc handling is not in the hotpath.
400 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
402 uasm_il_bltz(p, r, tmp, label_module_alloc);
404 uasm_il_bltz(p, r, tmp, label_vmalloc);
406 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
409 # ifdef CONFIG_MIPS_MT_SMTC
411 * SMTC uses TCBind value as "CPU" index
413 uasm_i_mfc0(p, ptr, C0_TCBIND);
414 uasm_i_dsrl(p, ptr, ptr, 19);
417 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
420 uasm_i_dmfc0(p, ptr, C0_CONTEXT);
421 uasm_i_dsrl(p, ptr, ptr, 23);
423 UASM_i_LA_mostly(p, tmp, pgdc);
424 uasm_i_daddu(p, ptr, ptr, tmp);
425 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
426 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
428 UASM_i_LA_mostly(p, ptr, pgdc);
429 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
432 uasm_l_vmalloc_done(l, *p);
434 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
435 uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
437 uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
439 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
440 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
441 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
442 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
443 uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
444 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
445 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
449 * BVADDR is the faulting address, PTR is scratch.
450 * PTR will hold the pgd for vmalloc.
452 static void __cpuinit
453 build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
454 unsigned int bvaddr, unsigned int ptr)
456 long swpd = (long)swapper_pg_dir;
459 long modd = (long)module_pg_dir;
461 uasm_l_module_alloc(l, *p);
464 * VMALLOC_START >= 0xc000000000000000UL
465 * MODULE_START >= 0xe000000000000000UL
467 UASM_i_SLL(p, ptr, bvaddr, 2);
468 uasm_il_bgez(p, r, ptr, label_vmalloc);
470 if (uasm_in_compat_space_p(MODULE_START) &&
471 !uasm_rel_lo(MODULE_START)) {
472 uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
474 /* unlikely configuration */
475 uasm_i_nop(p); /* delay slot */
476 UASM_i_LA(p, ptr, MODULE_START);
478 uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
480 if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
481 uasm_il_b(p, r, label_vmalloc_done);
482 uasm_i_lui(p, ptr, uasm_rel_hi(modd));
484 UASM_i_LA_mostly(p, ptr, modd);
485 uasm_il_b(p, r, label_vmalloc_done);
486 if (uasm_in_compat_space_p(modd))
487 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
489 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
492 uasm_l_vmalloc(l, *p);
493 if (uasm_in_compat_space_p(MODULE_START) &&
494 !uasm_rel_lo(MODULE_START) &&
495 MODULE_START << 32 == VMALLOC_START)
496 uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */
498 UASM_i_LA(p, ptr, VMALLOC_START);
500 uasm_l_vmalloc(l, *p);
501 UASM_i_LA(p, ptr, VMALLOC_START);
503 uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
505 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
506 uasm_il_b(p, r, label_vmalloc_done);
507 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
509 UASM_i_LA_mostly(p, ptr, swpd);
510 uasm_il_b(p, r, label_vmalloc_done);
511 if (uasm_in_compat_space_p(swpd))
512 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
514 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
518 #else /* !CONFIG_64BIT */
521 * TMP and PTR are scratch.
522 * TMP will be clobbered, PTR will hold the pgd entry.
524 static void __cpuinit __maybe_unused
525 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
527 long pgdc = (long)pgd_current;
529 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
531 #ifdef CONFIG_MIPS_MT_SMTC
533 * SMTC uses TCBind value as "CPU" index
535 uasm_i_mfc0(p, ptr, C0_TCBIND);
536 UASM_i_LA_mostly(p, tmp, pgdc);
537 uasm_i_srl(p, ptr, ptr, 19);
540 * smp_processor_id() << 3 is stored in CONTEXT.
542 uasm_i_mfc0(p, ptr, C0_CONTEXT);
543 UASM_i_LA_mostly(p, tmp, pgdc);
544 uasm_i_srl(p, ptr, ptr, 23);
546 uasm_i_addu(p, ptr, tmp, ptr);
548 UASM_i_LA_mostly(p, ptr, pgdc);
550 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
551 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
552 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
553 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
554 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
557 #endif /* !CONFIG_64BIT */
559 static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
561 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
562 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
564 switch (current_cpu_type()) {
581 UASM_i_SRL(p, ctx, ctx, shift);
582 uasm_i_andi(p, ctx, ctx, mask);
585 static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
588 * Bug workaround for the Nevada. It seems as if under certain
589 * circumstances the move from cp0_context might produce a
590 * bogus result when the mfc0 instruction and its consumer are
591 * in a different cacheline or a load instruction, probably any
592 * memory reference, is between them.
594 switch (current_cpu_type()) {
596 UASM_i_LW(p, ptr, 0, ptr);
597 GET_CONTEXT(p, tmp); /* get context reg */
601 GET_CONTEXT(p, tmp); /* get context reg */
602 UASM_i_LW(p, ptr, 0, ptr);
606 build_adjust_context(p, tmp);
607 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
610 static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
614 * 64bit address support (36bit on a 32bit CPU) in a 32bit
615 * Kernel is a special case. Only a few CPUs use it.
617 #ifdef CONFIG_64BIT_PHYS_ADDR
618 if (cpu_has_64bits) {
619 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
620 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
621 uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
622 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
623 uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
624 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
626 int pte_off_even = sizeof(pte_t) / 2;
627 int pte_off_odd = pte_off_even + sizeof(pte_t);
629 /* The pte entries are pre-shifted */
630 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
631 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
632 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
633 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
636 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
637 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
639 build_tlb_probe_entry(p);
640 UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
641 if (r4k_250MHZhwbug())
642 uasm_i_mtc0(p, 0, C0_ENTRYLO0);
643 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
644 UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
646 uasm_i_mfc0(p, tmp, C0_INDEX);
647 if (r4k_250MHZhwbug())
648 uasm_i_mtc0(p, 0, C0_ENTRYLO1);
649 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
654 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
655 * because EXL == 0. If we wrap, we can also use the 32 instruction
656 * slots before the XTLB refill exception handler which belong to the
657 * unused TLB refill exception.
659 #define MIPS64_REFILL_INSNS 32
661 static void __cpuinit build_r4000_tlb_refill_handler(void)
663 u32 *p = tlb_handler;
664 struct uasm_label *l = labels;
665 struct uasm_reloc *r = relocs;
667 unsigned int final_len;
669 memset(tlb_handler, 0, sizeof(tlb_handler));
670 memset(labels, 0, sizeof(labels));
671 memset(relocs, 0, sizeof(relocs));
672 memset(final_handler, 0, sizeof(final_handler));
675 * create the plain linear handler
677 if (bcm1250_m3_war()) {
678 UASM_i_MFC0(&p, K0, C0_BADVADDR);
679 UASM_i_MFC0(&p, K1, C0_ENTRYHI);
680 uasm_i_xor(&p, K0, K0, K1);
681 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
682 uasm_il_bnez(&p, &r, K0, label_leave);
683 /* No need for uasm_i_nop */
687 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
689 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
692 build_get_ptep(&p, K0, K1);
693 build_update_entries(&p, K0, K1);
694 build_tlb_write_entry(&p, &l, &r, tlb_random);
696 uasm_i_eret(&p); /* return from trap */
699 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
703 * Overflow check: For the 64bit handler, we need at least one
704 * free instruction slot for the wrap-around branch. In worst
705 * case, if the intended insertion point is a delay slot, we
706 * need three, with the second nop'ed and the third being
709 /* Loongson2 ebase is different than r4k, we have more space */
710 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
711 if ((p - tlb_handler) > 64)
712 panic("TLB refill handler space exceeded");
714 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
715 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
716 && uasm_insn_has_bdelay(relocs,
717 tlb_handler + MIPS64_REFILL_INSNS - 3)))
718 panic("TLB refill handler space exceeded");
722 * Now fold the handler in the TLB refill handler space.
724 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
726 /* Simplest case, just copy the handler. */
727 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
728 final_len = p - tlb_handler;
729 #else /* CONFIG_64BIT */
730 f = final_handler + MIPS64_REFILL_INSNS;
731 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
732 /* Just copy the handler. */
733 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
734 final_len = p - tlb_handler;
737 const enum label_id ls = label_module_alloc;
739 const enum label_id ls = label_vmalloc;
745 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
747 BUG_ON(i == ARRAY_SIZE(labels));
748 split = labels[i].addr;
751 * See if we have overflown one way or the other.
753 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
754 split < p - MIPS64_REFILL_INSNS)
759 * Split two instructions before the end. One
760 * for the branch and one for the instruction
763 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
766 * If the branch would fall in a delay slot,
767 * we must back up an additional instruction
768 * so that it is no longer in a delay slot.
770 if (uasm_insn_has_bdelay(relocs, split - 1))
773 /* Copy first part of the handler. */
774 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
775 f += split - tlb_handler;
779 uasm_l_split(&l, final_handler);
780 uasm_il_b(&f, &r, label_split);
781 if (uasm_insn_has_bdelay(relocs, split))
784 uasm_copy_handler(relocs, labels,
785 split, split + 1, f);
786 uasm_move_labels(labels, f, f + 1, -1);
792 /* Copy the rest of the handler. */
793 uasm_copy_handler(relocs, labels, split, p, final_handler);
794 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
797 #endif /* CONFIG_64BIT */
799 uasm_resolve_relocs(relocs, labels);
800 pr_debug("Wrote TLB refill handler (%u instructions).\n",
803 memcpy((void *)ebase, final_handler, 0x100);
805 dump_handler((u32 *)ebase, 64);
809 * TLB load/store/modify handlers.
811 * Only the fastpath gets synthesized at runtime, the slowpath for
812 * do_page_fault remains normal asm.
814 extern void tlb_do_page_fault_0(void);
815 extern void tlb_do_page_fault_1(void);
818 * 128 instructions for the fastpath handler is generous and should
821 #define FASTPATH_SIZE 128
823 u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
824 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
825 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
827 static void __cpuinit
828 iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
831 # ifdef CONFIG_64BIT_PHYS_ADDR
833 uasm_i_lld(p, pte, 0, ptr);
836 UASM_i_LL(p, pte, 0, ptr);
838 # ifdef CONFIG_64BIT_PHYS_ADDR
840 uasm_i_ld(p, pte, 0, ptr);
843 UASM_i_LW(p, pte, 0, ptr);
847 static void __cpuinit
848 iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
851 #ifdef CONFIG_64BIT_PHYS_ADDR
852 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
855 uasm_i_ori(p, pte, pte, mode);
857 # ifdef CONFIG_64BIT_PHYS_ADDR
859 uasm_i_scd(p, pte, 0, ptr);
862 UASM_i_SC(p, pte, 0, ptr);
864 if (r10000_llsc_war())
865 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
867 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
869 # ifdef CONFIG_64BIT_PHYS_ADDR
870 if (!cpu_has_64bits) {
871 /* no uasm_i_nop needed */
872 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
873 uasm_i_ori(p, pte, pte, hwmode);
874 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
875 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
876 /* no uasm_i_nop needed */
877 uasm_i_lw(p, pte, 0, ptr);
884 # ifdef CONFIG_64BIT_PHYS_ADDR
886 uasm_i_sd(p, pte, 0, ptr);
889 UASM_i_SW(p, pte, 0, ptr);
891 # ifdef CONFIG_64BIT_PHYS_ADDR
892 if (!cpu_has_64bits) {
893 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
894 uasm_i_ori(p, pte, pte, hwmode);
895 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
896 uasm_i_lw(p, pte, 0, ptr);
903 * Check if PTE is present, if not then jump to LABEL. PTR points to
904 * the page table where this PTE is located, PTE will be re-loaded
905 * with it's original value.
907 static void __cpuinit
908 build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
909 unsigned int pte, unsigned int ptr, enum label_id lid)
911 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
912 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
913 uasm_il_bnez(p, r, pte, lid);
914 iPTE_LW(p, l, pte, ptr);
917 /* Make PTE valid, store result in PTR. */
918 static void __cpuinit
919 build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
922 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
924 iPTE_SW(p, r, pte, ptr, mode);
928 * Check if PTE can be written to, if not branch to LABEL. Regardless
929 * restore PTE with value from PTR when done.
931 static void __cpuinit
932 build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
933 unsigned int pte, unsigned int ptr, enum label_id lid)
935 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
936 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
937 uasm_il_bnez(p, r, pte, lid);
938 iPTE_LW(p, l, pte, ptr);
941 /* Make PTE writable, update software status bits as well, then store
944 static void __cpuinit
945 build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
948 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
951 iPTE_SW(p, r, pte, ptr, mode);
955 * Check if PTE can be modified, if not branch to LABEL. Regardless
956 * restore PTE with value from PTR when done.
958 static void __cpuinit
959 build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
960 unsigned int pte, unsigned int ptr, enum label_id lid)
962 uasm_i_andi(p, pte, pte, _PAGE_WRITE);
963 uasm_il_beqz(p, r, pte, lid);
964 iPTE_LW(p, l, pte, ptr);
968 * R3000 style TLB load/store/modify handlers.
972 * This places the pte into ENTRYLO0 and writes it with tlbwi.
975 static void __cpuinit
976 build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
978 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
979 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
982 uasm_i_rfe(p); /* branch delay */
986 * This places the pte into ENTRYLO0 and writes it with tlbwi
987 * or tlbwr as appropriate. This is because the index register
988 * may have the probe fail bit set as a result of a trap on a
989 * kseg2 access, i.e. without refill. Then it returns.
991 static void __cpuinit
992 build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
993 struct uasm_reloc **r, unsigned int pte,
996 uasm_i_mfc0(p, tmp, C0_INDEX);
997 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
998 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
999 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1000 uasm_i_tlbwi(p); /* cp0 delay */
1002 uasm_i_rfe(p); /* branch delay */
1003 uasm_l_r3000_write_probe_fail(l, *p);
1004 uasm_i_tlbwr(p); /* cp0 delay */
1006 uasm_i_rfe(p); /* branch delay */
1009 static void __cpuinit
1010 build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1013 long pgdc = (long)pgd_current;
1015 uasm_i_mfc0(p, pte, C0_BADVADDR);
1016 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1017 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1018 uasm_i_srl(p, pte, pte, 22); /* load delay */
1019 uasm_i_sll(p, pte, pte, 2);
1020 uasm_i_addu(p, ptr, ptr, pte);
1021 uasm_i_mfc0(p, pte, C0_CONTEXT);
1022 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1023 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1024 uasm_i_addu(p, ptr, ptr, pte);
1025 uasm_i_lw(p, pte, 0, ptr);
1026 uasm_i_tlbp(p); /* load delay */
1029 static void __cpuinit build_r3000_tlb_load_handler(void)
1031 u32 *p = handle_tlbl;
1032 struct uasm_label *l = labels;
1033 struct uasm_reloc *r = relocs;
1035 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1036 memset(labels, 0, sizeof(labels));
1037 memset(relocs, 0, sizeof(relocs));
1039 build_r3000_tlbchange_handler_head(&p, K0, K1);
1040 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
1041 uasm_i_nop(&p); /* load delay */
1042 build_make_valid(&p, &r, K0, K1);
1043 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1045 uasm_l_nopage_tlbl(&l, p);
1046 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1049 if ((p - handle_tlbl) > FASTPATH_SIZE)
1050 panic("TLB load handler fastpath space exceeded");
1052 uasm_resolve_relocs(relocs, labels);
1053 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1054 (unsigned int)(p - handle_tlbl));
1056 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1059 static void __cpuinit build_r3000_tlb_store_handler(void)
1061 u32 *p = handle_tlbs;
1062 struct uasm_label *l = labels;
1063 struct uasm_reloc *r = relocs;
1065 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1066 memset(labels, 0, sizeof(labels));
1067 memset(relocs, 0, sizeof(relocs));
1069 build_r3000_tlbchange_handler_head(&p, K0, K1);
1070 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
1071 uasm_i_nop(&p); /* load delay */
1072 build_make_write(&p, &r, K0, K1);
1073 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
1075 uasm_l_nopage_tlbs(&l, p);
1076 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1079 if ((p - handle_tlbs) > FASTPATH_SIZE)
1080 panic("TLB store handler fastpath space exceeded");
1082 uasm_resolve_relocs(relocs, labels);
1083 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1084 (unsigned int)(p - handle_tlbs));
1086 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1089 static void __cpuinit build_r3000_tlb_modify_handler(void)
1091 u32 *p = handle_tlbm;
1092 struct uasm_label *l = labels;
1093 struct uasm_reloc *r = relocs;
1095 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1096 memset(labels, 0, sizeof(labels));
1097 memset(relocs, 0, sizeof(relocs));
1099 build_r3000_tlbchange_handler_head(&p, K0, K1);
1100 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
1101 uasm_i_nop(&p); /* load delay */
1102 build_make_write(&p, &r, K0, K1);
1103 build_r3000_pte_reload_tlbwi(&p, K0, K1);
1105 uasm_l_nopage_tlbm(&l, p);
1106 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1109 if ((p - handle_tlbm) > FASTPATH_SIZE)
1110 panic("TLB modify handler fastpath space exceeded");
1112 uasm_resolve_relocs(relocs, labels);
1113 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1114 (unsigned int)(p - handle_tlbm));
1116 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1120 * R4000 style TLB load/store/modify handlers.
1122 static void __cpuinit
1123 build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1124 struct uasm_reloc **r, unsigned int pte,
1128 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
1130 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1133 UASM_i_MFC0(p, pte, C0_BADVADDR);
1134 UASM_i_LW(p, ptr, 0, ptr);
1135 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1136 uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1137 UASM_i_ADDU(p, ptr, ptr, pte);
1140 uasm_l_smp_pgtable_change(l, *p);
1142 iPTE_LW(p, l, pte, ptr); /* get even pte */
1143 if (!m4kc_tlbp_war())
1144 build_tlb_probe_entry(p);
1147 static void __cpuinit
1148 build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1149 struct uasm_reloc **r, unsigned int tmp,
1152 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1153 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
1154 build_update_entries(p, tmp, ptr);
1155 build_tlb_write_entry(p, l, r, tlb_indexed);
1156 uasm_l_leave(l, *p);
1157 uasm_i_eret(p); /* return from trap */
1160 build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
1164 static void __cpuinit build_r4000_tlb_load_handler(void)
1166 u32 *p = handle_tlbl;
1167 struct uasm_label *l = labels;
1168 struct uasm_reloc *r = relocs;
1170 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1171 memset(labels, 0, sizeof(labels));
1172 memset(relocs, 0, sizeof(relocs));
1174 if (bcm1250_m3_war()) {
1175 UASM_i_MFC0(&p, K0, C0_BADVADDR);
1176 UASM_i_MFC0(&p, K1, C0_ENTRYHI);
1177 uasm_i_xor(&p, K0, K0, K1);
1178 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1);
1179 uasm_il_bnez(&p, &r, K0, label_leave);
1180 /* No need for uasm_i_nop */
1183 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1184 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl);
1185 if (m4kc_tlbp_war())
1186 build_tlb_probe_entry(&p);
1187 build_make_valid(&p, &r, K0, K1);
1188 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1190 uasm_l_nopage_tlbl(&l, p);
1191 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1194 if ((p - handle_tlbl) > FASTPATH_SIZE)
1195 panic("TLB load handler fastpath space exceeded");
1197 uasm_resolve_relocs(relocs, labels);
1198 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1199 (unsigned int)(p - handle_tlbl));
1201 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
1204 static void __cpuinit build_r4000_tlb_store_handler(void)
1206 u32 *p = handle_tlbs;
1207 struct uasm_label *l = labels;
1208 struct uasm_reloc *r = relocs;
1210 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1211 memset(labels, 0, sizeof(labels));
1212 memset(relocs, 0, sizeof(relocs));
1214 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1215 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs);
1216 if (m4kc_tlbp_war())
1217 build_tlb_probe_entry(&p);
1218 build_make_write(&p, &r, K0, K1);
1219 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1221 uasm_l_nopage_tlbs(&l, p);
1222 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1225 if ((p - handle_tlbs) > FASTPATH_SIZE)
1226 panic("TLB store handler fastpath space exceeded");
1228 uasm_resolve_relocs(relocs, labels);
1229 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1230 (unsigned int)(p - handle_tlbs));
1232 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
1235 static void __cpuinit build_r4000_tlb_modify_handler(void)
1237 u32 *p = handle_tlbm;
1238 struct uasm_label *l = labels;
1239 struct uasm_reloc *r = relocs;
1241 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1242 memset(labels, 0, sizeof(labels));
1243 memset(relocs, 0, sizeof(relocs));
1245 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1246 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm);
1247 if (m4kc_tlbp_war())
1248 build_tlb_probe_entry(&p);
1249 /* Present and writable bits set, set accessed and dirty bits. */
1250 build_make_write(&p, &r, K0, K1);
1251 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1253 uasm_l_nopage_tlbm(&l, p);
1254 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1257 if ((p - handle_tlbm) > FASTPATH_SIZE)
1258 panic("TLB modify handler fastpath space exceeded");
1260 uasm_resolve_relocs(relocs, labels);
1261 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1262 (unsigned int)(p - handle_tlbm));
1264 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
1267 void __cpuinit build_tlb_refill_handler(void)
1270 * The refill handler is generated per-CPU, multi-node systems
1271 * may have local storage for it. The other handlers are only
1274 static int run_once = 0;
1276 switch (current_cpu_type()) {
1284 build_r3000_tlb_refill_handler();
1286 build_r3000_tlb_load_handler();
1287 build_r3000_tlb_store_handler();
1288 build_r3000_tlb_modify_handler();
1295 panic("No R6000 TLB refill handler yet");
1299 panic("No R8000 TLB refill handler yet");
1303 build_r4000_tlb_refill_handler();
1305 build_r4000_tlb_load_handler();
1306 build_r4000_tlb_store_handler();
1307 build_r4000_tlb_modify_handler();
1313 void __cpuinit flush_tlb_handlers(void)
1315 local_flush_icache_range((unsigned long)handle_tlbl,
1316 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
1317 local_flush_icache_range((unsigned long)handle_tlbs,
1318 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
1319 local_flush_icache_range((unsigned long)handle_tlbm,
1320 (unsigned long)handle_tlbm + sizeof(handle_tlbm));