1 /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
3 * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 #include <linux/config.h>
13 #include <asm/pgtable.h>
19 * On a second level vpte miss, check whether the original fault is to the OBP
20 * range (note that this is only possible for instruction miss, data misses to
21 * obp range do not use vpte). If so, go back directly to the faulting address.
22 * This is because we want to read the tpc, otherwise we have no way of knowing
23 * the 8k aligned faulting address if we are using >8k kernel pagesize. This
24 * also ensures no vpte range addresses are dropped into tlb while obp is
25 * executing (see inherit_locked_prom_mappings() rant).
28 /* Note that kvmap below has verified that the address is
29 * in the range MODULES_VADDR --> VMALLOC_END already. So
30 * here we need only check if it is an OBP address or not.
32 sethi %hi(LOW_OBP_ADDRESS), %g5
34 blu,pn %xcc, kern_vpte
38 blu,pn %xcc, vpte_insn_obp
41 /* These two instructions are patched by paginig_init(). */
43 sethi %hi(swapper_pgd_zero), %g5
44 lduw [%g5 + %lo(swapper_pgd_zero)], %g5
46 /* With kernel PGD in %g5, branch back into dtlb_backend. */
47 ba,pt %xcc, sparc64_kpte_continue
48 andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
51 /* Restore previous TAG_ACCESS, %g5 is zero, and we will
52 * skip over the trap instruction so that the top level
53 * TLB miss handler will thing this %g5 value is just an
54 * invalid PTE, thus branching to full fault processing.
57 stxa %g4, [%g1 + %g1] ASI_DMMU
61 sethi %hi(prom_pmd_phys), %g5
62 ldx [%g5 + %lo(prom_pmd_phys)], %g5
64 /* Behave as if we are at TL0. */
66 rdpr %tpc, %g4 /* Find original faulting iaddr */
67 srlx %g4, 13, %g4 /* Throw out context bits */
68 sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
70 /* Restore previous TAG_ACCESS. */
72 stxa %g4, [%g1 + %g1] ASI_IMMU
79 /* Load PMD, is it valid? */
80 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
90 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
91 brgez,pn %g5, longpath
94 /* TLB load and return from trap. */
95 stxa %g5, [%g0] ASI_ITLB_DATA_IN
99 sethi %hi(prom_pmd_phys), %g5
100 ldx [%g5 + %lo(prom_pmd_phys)], %g5
102 /* Get PMD offset. */
107 /* Load PMD, is it valid? */
108 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
112 /* Get PTE offset. */
118 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
119 brgez,pn %g5, longpath
122 /* TLB load and return from trap. */
123 stxa %g5, [%g0] ASI_DTLB_DATA_IN
127 * On a first level data miss, check whether this is to the OBP range (note
128 * that such accesses can be made by prom, as well as by kernel using
129 * prom_getproperty on "address"), and if so, do not use vpte access ...
130 * rather, use information saved during inherit_prom_mappings() using 8k
135 brgez,pn %g4, kvmap_nonlinear
138 #ifdef CONFIG_DEBUG_PAGEALLOC
139 .globl kvmap_linear_patch
142 ba,pt %xcc, kvmap_load
145 #ifdef CONFIG_DEBUG_PAGEALLOC
146 sethi %hi(swapper_pg_dir), %g5
147 or %g5, %lo(swapper_pg_dir), %g5
148 sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
149 srlx %g6, 64 - PAGE_SHIFT, %g6
151 lduw [%g5 + %g6], %g5
153 sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
154 srlx %g6, 64 - PAGE_SHIFT, %g6
157 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
159 sllx %g4, 64 - PMD_SHIFT, %g6
160 srlx %g6, 64 - PAGE_SHIFT, %g6
163 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
166 ba,a,pt %xcc, kvmap_load
170 sethi %hi(MODULES_VADDR), %g5
172 blu,pn %xcc, longpath
173 mov (VMALLOC_END >> 24), %g5
176 bgeu,pn %xcc, longpath
180 sethi %hi(LOW_OBP_ADDRESS), %g5
182 blu,pn %xcc, kvmap_vmalloc_addr
186 blu,pn %xcc, kvmap_do_obp
190 /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
191 ldxa [%g3 + %g6] ASI_N, %g5
192 brgez,pn %g5, longpath
196 /* PTE is valid, load into TLB and return from trap. */
197 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB