1 /* arch/sparc64/mm/tsb.c
3 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/preempt.h>
8 #include <asm/system.h>
10 #include <asm/tlbflush.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgtable.h>
15 #include <asm/oplib.h>
17 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
19 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
22 return vaddr & (nentries - 1);
25 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
27 return (tag == (vaddr >> 22));
30 /* TSB flushes need only occur on the processor initiating the address
31 * space modification, not on each cpu the address space has run on.
32 * Only the TLB flush needs that treatment.
35 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
39 for (v = start; v < end; v += PAGE_SIZE) {
40 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
42 struct tsb *ent = &swapper_tsb[hash];
44 if (tag_compare(ent->tag, v))
45 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
49 static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
53 for (i = 0; i < mp->tlb_nr; i++) {
54 unsigned long v = mp->vaddrs[i];
55 unsigned long tag, ent, hash;
59 hash = tsb_hash(v, hash_shift, nentries);
60 ent = tsb + (hash * sizeof(struct tsb));
67 void flush_tsb_user(struct mmu_gather *mp)
69 struct mm_struct *mm = mp->mm;
70 unsigned long nentries, base, flags;
72 spin_lock_irqsave(&mm->context.lock, flags);
74 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
75 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
76 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
80 #ifdef CONFIG_HUGETLB_PAGE
81 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
82 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
83 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
84 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
89 spin_unlock_irqrestore(&mm->context.lock, flags);
92 #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
93 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
94 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
95 #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
96 #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
97 #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
99 #error Broken base page size setting...
102 #ifdef CONFIG_HUGETLB_PAGE
103 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
104 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
105 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
106 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
107 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
108 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
109 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
110 #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
111 #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
113 #error Broken huge page size setting...
117 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
119 unsigned long tsb_reg, base, tsb_paddr;
120 unsigned long page_sz, tte;
122 mm->context.tsb_block[tsb_idx].tsb_nentries =
123 tsb_bytes / sizeof(struct tsb);
126 tte = pgprot_val(PAGE_KERNEL_LOCKED);
127 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
128 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
130 /* Use the smallest page size that can map the whole TSB
136 #ifdef DCACHE_ALIASING_POSSIBLE
137 base += (tsb_paddr & 8192);
159 page_sz = 512 * 1024;
164 page_sz = 512 * 1024;
169 page_sz = 512 * 1024;
174 page_sz = 4 * 1024 * 1024;
178 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
179 current->comm, current->pid, tsb_bytes);
182 tte |= pte_sz_bits(page_sz);
184 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
185 /* Physical mapping, no locked TLB entry for TSB. */
186 tsb_reg |= tsb_paddr;
188 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
189 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
190 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
193 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
194 tte |= (tsb_paddr & ~(page_sz - 1UL));
196 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
197 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
198 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
201 /* Setup the Hypervisor TSB descriptor. */
202 if (tlb_type == hypervisor) {
203 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
207 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
209 #ifdef CONFIG_HUGETLB_PAGE
211 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
218 hp->num_ttes = tsb_bytes / 16;
222 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
224 #ifdef CONFIG_HUGETLB_PAGE
226 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
232 hp->tsb_base = tsb_paddr;
237 static struct kmem_cache *tsb_caches[8] __read_mostly;
239 static const char *tsb_cache_names[8] = {
250 void __init pgtable_cache_init(void)
254 for (i = 0; i < 8; i++) {
255 unsigned long size = 8192 << i;
256 const char *name = tsb_cache_names[i];
258 tsb_caches[i] = kmem_cache_create(name,
261 if (!tsb_caches[i]) {
262 prom_printf("Could not create %s cache\n", name);
268 /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
269 * do_sparc64_fault() invokes this routine to try and grow it.
271 * When we reach the maximum TSB size supported, we stick ~0UL into
272 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
273 * will not trigger any longer.
275 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
276 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
277 * must be 512K aligned. It also must be physically contiguous, so we
278 * cannot use vmalloc().
280 * The idea here is to grow the TSB when the RSS of the process approaches
281 * the number of entries that the current TSB can hold at once. Currently,
282 * we trigger when the RSS hits 3/4 of the TSB capacity.
284 void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
286 unsigned long max_tsb_size = 1 * 1024 * 1024;
287 unsigned long new_size, old_size, flags;
288 struct tsb *old_tsb, *new_tsb;
289 unsigned long new_cache_index, old_cache_index;
290 unsigned long new_rss_limit;
293 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
294 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
297 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
298 unsigned long n_entries = new_size / sizeof(struct tsb);
300 n_entries = (n_entries * 3) / 4;
307 if (new_size == max_tsb_size)
308 new_rss_limit = ~0UL;
310 new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
313 gfp_flags = GFP_KERNEL;
314 if (new_size > (PAGE_SIZE * 2))
315 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
317 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
318 gfp_flags, numa_node_id());
319 if (unlikely(!new_tsb)) {
320 /* Not being able to fork due to a high-order TSB
321 * allocation failure is very bad behavior. Just back
322 * down to a 0-order allocation and force no TSB
323 * growing for this address space.
325 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
326 new_cache_index > 0) {
329 new_rss_limit = ~0UL;
330 goto retry_tsb_alloc;
333 /* If we failed on a TSB grow, we are under serious
334 * memory pressure so don't try to grow any more.
336 if (mm->context.tsb_block[tsb_index].tsb != NULL)
337 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
341 /* Mark all tags as invalid. */
342 tsb_init(new_tsb, new_size);
344 /* Ok, we are about to commit the changes. If we are
345 * growing an existing TSB the locking is very tricky,
348 * We have to hold mm->context.lock while committing to the
349 * new TSB, this synchronizes us with processors in
350 * flush_tsb_user() and switch_mm() for this address space.
352 * But even with that lock held, processors run asynchronously
353 * accessing the old TSB via TLB miss handling. This is OK
354 * because those actions are just propagating state from the
355 * Linux page tables into the TSB, page table mappings are not
356 * being changed. If a real fault occurs, the processor will
357 * synchronize with us when it hits flush_tsb_user(), this is
358 * also true for the case where vmscan is modifying the page
359 * tables. The only thing we need to be careful with is to
360 * skip any locked TSB entries during copy_tsb().
362 * When we finish committing to the new TSB, we have to drop
363 * the lock and ask all other cpus running this address space
364 * to run tsb_context_switch() to see the new TSB table.
366 spin_lock_irqsave(&mm->context.lock, flags);
368 old_tsb = mm->context.tsb_block[tsb_index].tsb;
370 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
371 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
375 /* Handle multiple threads trying to grow the TSB at the same time.
376 * One will get in here first, and bump the size and the RSS limit.
377 * The others will get in here next and hit this check.
379 if (unlikely(old_tsb &&
380 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
381 spin_unlock_irqrestore(&mm->context.lock, flags);
383 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
387 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
390 extern void copy_tsb(unsigned long old_tsb_base,
391 unsigned long old_tsb_size,
392 unsigned long new_tsb_base,
393 unsigned long new_tsb_size);
394 unsigned long old_tsb_base = (unsigned long) old_tsb;
395 unsigned long new_tsb_base = (unsigned long) new_tsb;
397 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
398 old_tsb_base = __pa(old_tsb_base);
399 new_tsb_base = __pa(new_tsb_base);
401 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
404 mm->context.tsb_block[tsb_index].tsb = new_tsb;
405 setup_tsb_params(mm, tsb_index, new_size);
407 spin_unlock_irqrestore(&mm->context.lock, flags);
409 /* If old_tsb is NULL, we're being invoked for the first time
410 * from init_new_context().
413 /* Reload it on the local cpu. */
414 tsb_context_switch(mm);
416 /* Now force other processors to do the same. */
421 /* Now it is safe to free the old tsb. */
422 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
426 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
428 #ifdef CONFIG_HUGETLB_PAGE
429 unsigned long huge_pte_count;
433 spin_lock_init(&mm->context.lock);
435 mm->context.sparc64_ctx_val = 0UL;
437 #ifdef CONFIG_HUGETLB_PAGE
438 /* We reset it to zero because the fork() page copying
439 * will re-increment the counters as the parent PTEs are
440 * copied into the child address space.
442 huge_pte_count = mm->context.huge_pte_count;
443 mm->context.huge_pte_count = 0;
446 /* copy_mm() copies over the parent's mm_struct before calling
447 * us, so we need to zero out the TSB pointer or else tsb_grow()
448 * will be confused and think there is an older TSB to free up.
450 for (i = 0; i < MM_NUM_TSBS; i++)
451 mm->context.tsb_block[i].tsb = NULL;
453 /* If this is fork, inherit the parent's TSB size. We would
454 * grow it to that size on the first page fault anyways.
456 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
458 #ifdef CONFIG_HUGETLB_PAGE
459 if (unlikely(huge_pte_count))
460 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
463 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
469 static void tsb_destroy_one(struct tsb_config *tp)
471 unsigned long cache_index;
475 cache_index = tp->tsb_reg_val & 0x7UL;
476 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
478 tp->tsb_reg_val = 0UL;
481 void destroy_context(struct mm_struct *mm)
483 unsigned long flags, i;
485 for (i = 0; i < MM_NUM_TSBS; i++)
486 tsb_destroy_one(&mm->context.tsb_block[i]);
488 spin_lock_irqsave(&ctx_alloc_lock, flags);
490 if (CTX_VALID(mm->context)) {
491 unsigned long nr = CTX_NRBITS(mm->context);
492 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
495 spin_unlock_irqrestore(&ctx_alloc_lock, flags);