4 /* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
10 * TLB miss trap handler software does the actual lookup via something
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
17 * bne,pn %xcc, tsb_miss_{d,i}tlb
18 * mov FAULT_CODE_{D,I}TLB, %g3
19 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
23 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
24 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
27 * -------------------------------------------------
28 * | - | CONTEXT | - | VADDR bits 63:22 |
29 * -------------------------------------------------
30 * 63 61 60 48 47 42 41 0
32 * Like the powerpc hashtables we need to use locking in order to
33 * synchronize while we update the entries. PTE updates need locking
36 * We need to carefully choose a lock bits for the TSB entry. We
37 * choose to use bit 47 in the tag. Also, since we never map anything
38 * at page zero in context zero, we use zero as an invalid tag entry.
39 * When the lock bit is set, this forces a tag comparison failure.
42 #define TSB_TAG_LOCK_BIT 47
43 #define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
45 #define TSB_MEMBAR membar #StoreStore
47 /* Some cpus support physical address quad loads. We want to use
48 * those if possible so we don't need to hard-lock the TSB mapping
49 * into the TLB. We encode some instruction patching in order to
52 * The kernel TSB is locked into the TLB by virtue of being in the
53 * kernel image, so we don't play these games for swapper_tsb access.
56 struct tsb_phys_patch_entry {
60 extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
62 #define TSB_LOAD_QUAD(TSB, REG) \
63 661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
64 .section .tsb_phys_patch, "ax"; \
66 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
69 #define TSB_LOAD_TAG_HIGH(TSB, REG) \
70 661: lduwa [TSB] ASI_N, REG; \
71 .section .tsb_phys_patch, "ax"; \
73 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
76 #define TSB_LOAD_TAG(TSB, REG) \
77 661: ldxa [TSB] ASI_N, REG; \
78 .section .tsb_phys_patch, "ax"; \
80 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
83 #define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
84 661: casa [TSB] ASI_N, REG1, REG2; \
85 .section .tsb_phys_patch, "ax"; \
87 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
90 #define TSB_CAS_TAG(TSB, REG1, REG2) \
91 661: casxa [TSB] ASI_N, REG1, REG2; \
92 .section .tsb_phys_patch, "ax"; \
94 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
97 #define TSB_STORE(ADDR, VAL) \
98 661: stxa VAL, [ADDR] ASI_N; \
99 .section .tsb_phys_patch, "ax"; \
101 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
104 #define TSB_LOCK_TAG(TSB, REG1, REG2) \
105 99: TSB_LOAD_TAG_HIGH(TSB, REG1); \
106 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
107 andcc REG1, REG2, %g0; \
110 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
116 #define TSB_WRITE(TSB, TTE, TAG) \
118 TSB_STORE(TSB, TTE); \
123 #define KTSB_LOAD_QUAD(TSB, REG) \
124 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
126 #define KTSB_STORE(ADDR, VAL) \
127 stxa VAL, [ADDR] ASI_N;
129 #define KTSB_LOCK_TAG(TSB, REG1, REG2) \
130 99: lduwa [TSB] ASI_N, REG1; \
131 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
132 andcc REG1, REG2, %g0; \
135 casa [TSB] ASI_N, REG1, REG2;\
141 #define KTSB_WRITE(TSB, TTE, TAG) \
143 stxa TTE, [TSB] ASI_N; \
146 stxa TAG, [TSB] ASI_N;
148 /* Do a kernel page table walk. Leaves physical PTE pointer in
149 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
150 * VADDR will not be clobbered, but REG2 will.
152 #define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
153 sethi %hi(swapper_pg_dir), REG1; \
154 or REG1, %lo(swapper_pg_dir), REG1; \
155 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
156 srlx REG2, 64 - PAGE_SHIFT, REG2; \
157 andn REG2, 0x3, REG2; \
158 lduw [REG1 + REG2], REG1; \
159 brz,pn REG1, FAIL_LABEL; \
160 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
161 srlx REG2, 64 - PAGE_SHIFT, REG2; \
162 sllx REG1, 11, REG1; \
163 andn REG2, 0x3, REG2; \
164 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
165 brz,pn REG1, FAIL_LABEL; \
166 sllx VADDR, 64 - PMD_SHIFT, REG2; \
167 srlx REG2, 64 - PAGE_SHIFT, REG2; \
168 sllx REG1, 11, REG1; \
169 andn REG2, 0x7, REG2; \
170 add REG1, REG2, REG1;
172 /* Do a user page table walk in MMU globals. Leaves physical PTE
173 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
174 * termination. Physical base of page tables is in PHYS_PGD which
175 * will not be modified.
177 * VADDR will not be clobbered, but REG1 and REG2 will.
179 #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
180 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
181 srlx REG2, 64 - PAGE_SHIFT, REG2; \
182 andn REG2, 0x3, REG2; \
183 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
184 brz,pn REG1, FAIL_LABEL; \
185 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
186 srlx REG2, 64 - PAGE_SHIFT, REG2; \
187 sllx REG1, 11, REG1; \
188 andn REG2, 0x3, REG2; \
189 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
190 brz,pn REG1, FAIL_LABEL; \
191 sllx VADDR, 64 - PMD_SHIFT, REG2; \
192 srlx REG2, 64 - PAGE_SHIFT, REG2; \
193 sllx REG1, 11, REG1; \
194 andn REG2, 0x7, REG2; \
195 add REG1, REG2, REG1;
197 /* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
198 * If no entry is found, FAIL_LABEL will be branched to. On success
199 * the resulting PTE value will be left in REG1. VADDR is preserved
202 #define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
203 sethi %hi(prom_trans), REG1; \
204 or REG1, %lo(prom_trans), REG1; \
205 97: ldx [REG1 + 0x00], REG2; \
206 brz,pn REG2, FAIL_LABEL; \
208 ldx [REG1 + 0x08], REG3; \
209 add REG2, REG3, REG3; \
214 ldx [REG1 + 0x10], REG3; \
215 sub VADDR, REG2, REG2; \
217 add REG3, REG2, REG1; \
218 98: ba,pt %xcc, 97b; \
219 add REG1, (3 * 8), REG1; \
222 /* We use a 32K TSB for the whole kernel, this allows to
223 * handle about 16MB of modules and vmalloc mappings without
224 * incurring many hash conflicts.
226 #define KERNEL_TSB_SIZE_BYTES (32 * 1024)
227 #define KERNEL_TSB_NENTRIES \
228 (KERNEL_TSB_SIZE_BYTES / 16)
230 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
231 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
232 * and the found TTE will be left in REG1. REG3 and REG4 must
233 * be an even/odd pair of registers.
235 * VADDR and TAG will be preserved and not clobbered by this macro.
237 #define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
238 sethi %hi(swapper_tsb), REG1; \
239 or REG1, %lo(swapper_tsb), REG1; \
240 srlx VADDR, PAGE_SHIFT, REG2; \
241 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
242 sllx REG2, 4, REG2; \
243 add REG1, REG2, REG2; \
244 KTSB_LOAD_QUAD(REG2, REG3); \
246 be,a,pt %xcc, OK_LABEL; \
249 #endif /* !(_SPARC64_TSB_H) */