1 /* arch/sparc64/mm/tsb.c
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <asm/system.h>
9 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgtable.h>
15 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
17 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
20 return vaddr & (nentries - 1);
23 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
25 return (tag == (vaddr >> 22));
28 /* TSB flushes need only occur on the processor initiating the address
29 * space modification, not on each cpu the address space has run on.
30 * Only the TLB flush needs that treatment.
33 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
37 for (v = start; v < end; v += PAGE_SIZE) {
38 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39 struct tsb *ent = &swapper_tsb[hash];
41 if (tag_compare(ent->tag, v)) {
42 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
43 membar_storeload_storestore();
48 void flush_tsb_user(struct mmu_gather *mp)
50 struct mm_struct *mm = mp->mm;
51 struct tsb *tsb = mm->context.tsb;
52 unsigned long nentries = mm->context.tsb_nentries;
56 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
59 base = (unsigned long) tsb;
61 for (i = 0; i < mp->tlb_nr; i++) {
62 unsigned long v = mp->vaddrs[i];
63 unsigned long tag, ent, hash;
67 hash = tsb_hash(v, nentries);
68 ent = base + (hash * sizeof(struct tsb));
75 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
77 unsigned long tsb_reg, base, tsb_paddr;
78 unsigned long page_sz, tte;
80 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
83 tte = pgprot_val(PAGE_KERNEL_LOCKED);
84 tsb_paddr = __pa(mm->context.tsb);
85 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
87 /* Use the smallest page size that can map the whole TSB
93 #ifdef DCACHE_ALIASING_POSSIBLE
94 base += (tsb_paddr & 8192);
116 page_sz = 512 * 1024;
121 page_sz = 512 * 1024;
126 page_sz = 512 * 1024;
131 page_sz = 4 * 1024 * 1024;
137 tte |= pte_sz_bits(page_sz);
139 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
140 /* Physical mapping, no locked TLB entry for TSB. */
141 tsb_reg |= tsb_paddr;
143 mm->context.tsb_reg_val = tsb_reg;
144 mm->context.tsb_map_vaddr = 0;
145 mm->context.tsb_map_pte = 0;
148 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
149 tte |= (tsb_paddr & ~(page_sz - 1UL));
151 mm->context.tsb_reg_val = tsb_reg;
152 mm->context.tsb_map_vaddr = base;
153 mm->context.tsb_map_pte = tte;
156 /* Setup the Hypervisor TSB descriptor. */
157 if (tlb_type == hypervisor) {
158 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
163 hp->pgsz_idx = HV_PGSZ_IDX_8K;
167 hp->pgsz_idx = HV_PGSZ_IDX_64K;
171 hp->pgsz_idx = HV_PGSZ_IDX_512K;
174 case 4 * 1024 * 1024:
175 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
179 hp->num_ttes = tsb_bytes / 16;
184 hp->pgsz_mask = HV_PGSZ_MASK_8K;
188 hp->pgsz_mask = HV_PGSZ_MASK_64K;
192 hp->pgsz_mask = HV_PGSZ_MASK_512K;
195 case 4 * 1024 * 1024:
196 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
199 hp->tsb_base = tsb_paddr;
204 /* The page tables are locked against modifications while this
207 * XXX do some prefetching...
209 static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
210 struct tsb *new_tsb, unsigned long new_size)
212 unsigned long old_nentries = old_size / sizeof(struct tsb);
213 unsigned long new_nentries = new_size / sizeof(struct tsb);
216 for (i = 0; i < old_nentries; i++) {
217 register unsigned long tag asm("o4");
218 register unsigned long pte asm("o5");
219 unsigned long v, hash;
221 if (tlb_type == hypervisor) {
222 __asm__ __volatile__(
224 : "=r" (tag), "=r" (pte)
225 : "r" (__pa(&old_tsb[i])),
226 "i" (ASI_QUAD_LDD_PHYS_4V));
227 } else if (tlb_type == cheetah_plus) {
228 __asm__ __volatile__(
230 : "=r" (tag), "=r" (pte)
231 : "r" (__pa(&old_tsb[i])),
232 "i" (ASI_QUAD_LDD_PHYS));
234 __asm__ __volatile__(
236 : "=r" (tag), "=r" (pte)
238 "i" (ASI_NUCLEUS_QUAD_LDD));
241 if (tag & ((1UL << TSB_TAG_LOCK_BIT) |
242 (1UL << TSB_TAG_INVALID_BIT)))
245 /* We only put base page size PTEs into the TSB,
246 * but that might change in the future. This code
247 * would need to be changed if we start putting larger
248 * page size PTEs into there.
250 WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
252 /* The tag holds bits 22 to 63 of the virtual address
253 * and the context. Clear out the context, and shift
254 * up to make a virtual address.
256 v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
258 /* The implied bits of the tag (bits 13 to 21) are
259 * determined by the TSB entry index, so fill that in.
261 v |= (i & (512UL - 1UL)) << 13UL;
263 hash = tsb_hash(v, new_nentries);
264 if (tlb_type == cheetah_plus ||
265 tlb_type == hypervisor) {
266 __asm__ __volatile__(
267 "stxa %0, [%1] %2\n\t"
271 "r" (__pa(&new_tsb[hash].tag)),
272 "i" (ASI_PHYS_USE_EC),
274 "r" (__pa(&new_tsb[hash].pte)));
276 new_tsb[hash].tag = tag;
277 new_tsb[hash].pte = pte;
282 /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
283 * update_mmu_cache() invokes this routine to try and grow the TSB.
284 * When we reach the maximum TSB size supported, we stick ~0UL into
285 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
286 * will not trigger any longer.
288 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
289 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
290 * must be 512K aligned.
292 * The idea here is to grow the TSB when the RSS of the process approaches
293 * the number of entries that the current TSB can hold at once. Currently,
294 * we trigger when the RSS hits 3/4 of the TSB capacity.
296 void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
298 unsigned long max_tsb_size = 1 * 1024 * 1024;
299 unsigned long size, old_size;
303 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
304 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
306 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
307 unsigned long n_entries = size / sizeof(struct tsb);
309 n_entries = (n_entries * 3) / 4;
314 page = alloc_pages(gfp_flags, get_order(size));
318 /* Mark all tags as invalid. */
319 memset(page_address(page), 0x40, size);
321 if (size == max_tsb_size)
322 mm->context.tsb_rss_limit = ~0UL;
324 mm->context.tsb_rss_limit =
325 ((size / sizeof(struct tsb)) * 3) / 4;
327 old_tsb = mm->context.tsb;
328 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
331 copy_tsb(old_tsb, old_size, page_address(page), size);
333 mm->context.tsb = page_address(page);
334 setup_tsb_params(mm, size);
336 /* If old_tsb is NULL, we're being invoked for the first time
337 * from init_new_context().
340 /* Now force all other processors to reload the new
345 /* Finally reload it on the local cpu. No further
346 * references will remain to the old TSB and we can
349 tsb_context_switch(mm);
351 free_pages((unsigned long) old_tsb, get_order(old_size));
355 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
357 spin_lock_init(&mm->context.lock);
359 mm->context.sparc64_ctx_val = 0UL;
361 /* copy_mm() copies over the parent's mm_struct before calling
362 * us, so we need to zero out the TSB pointer or else tsb_grow()
363 * will be confused and think there is an older TSB to free up.
365 mm->context.tsb = NULL;
366 tsb_grow(mm, 0, GFP_KERNEL);
368 if (unlikely(!mm->context.tsb))
374 void destroy_context(struct mm_struct *mm)
376 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
379 free_pages((unsigned long) mm->context.tsb, get_order(size));
381 /* We can remove these later, but for now it's useful
382 * to catch any bogus post-destroy_context() references
385 mm->context.tsb = NULL;
386 mm->context.tsb_reg_val = 0UL;
388 spin_lock_irqsave(&ctx_alloc_lock, flags);
390 if (CTX_VALID(mm->context)) {
391 unsigned long nr = CTX_NRBITS(mm->context);
392 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
395 spin_unlock_irqrestore(&ctx_alloc_lock, flags);