[SPARC64]: Bulletproof MMU context locking.
[linux-2.6] / arch / sparc64 / mm / tsb.c
1 /* arch/sparc64/mm/tsb.c
2  *
3  * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4  */
5
6 #include <linux/kernel.h>
7 #include <asm/system.h>
8 #include <asm/page.h>
9 #include <asm/tlbflush.h>
10 #include <asm/tlb.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgtable.h>
13 #include <asm/tsb.h>
14
15 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
16
17 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
18 {
19         vaddr >>= PAGE_SHIFT;
20         return vaddr & (nentries - 1);
21 }
22
23 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
24 {
25         return (tag == (vaddr >> 22));
26 }
27
28 /* TSB flushes need only occur on the processor initiating the address
29  * space modification, not on each cpu the address space has run on.
30  * Only the TLB flush needs that treatment.
31  */
32
33 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
34 {
35         unsigned long v;
36
37         for (v = start; v < end; v += PAGE_SIZE) {
38                 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39                 struct tsb *ent = &swapper_tsb[hash];
40
41                 if (tag_compare(ent->tag, v)) {
42                         ent->tag = (1UL << TSB_TAG_INVALID_BIT);
43                         membar_storeload_storestore();
44                 }
45         }
46 }
47
48 void flush_tsb_user(struct mmu_gather *mp)
49 {
50         struct mm_struct *mm = mp->mm;
51         struct tsb *tsb = mm->context.tsb;
52         unsigned long nentries = mm->context.tsb_nentries;
53         unsigned long base;
54         int i;
55
56         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
57                 base = __pa(tsb);
58         else
59                 base = (unsigned long) tsb;
60         
61         for (i = 0; i < mp->tlb_nr; i++) {
62                 unsigned long v = mp->vaddrs[i];
63                 unsigned long tag, ent, hash;
64
65                 v &= ~0x1UL;
66
67                 hash = tsb_hash(v, nentries);
68                 ent = base + (hash * sizeof(struct tsb));
69                 tag = (v >> 22UL);
70
71                 tsb_flush(ent, tag);
72         }
73 }
74
75 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
76 {
77         unsigned long tsb_reg, base, tsb_paddr;
78         unsigned long page_sz, tte;
79
80         mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
81
82         base = TSBMAP_BASE;
83         tte = pgprot_val(PAGE_KERNEL_LOCKED);
84         tsb_paddr = __pa(mm->context.tsb);
85         BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
86
87         /* Use the smallest page size that can map the whole TSB
88          * in one TLB entry.
89          */
90         switch (tsb_bytes) {
91         case 8192 << 0:
92                 tsb_reg = 0x0UL;
93 #ifdef DCACHE_ALIASING_POSSIBLE
94                 base += (tsb_paddr & 8192);
95 #endif
96                 page_sz = 8192;
97                 break;
98
99         case 8192 << 1:
100                 tsb_reg = 0x1UL;
101                 page_sz = 64 * 1024;
102                 break;
103
104         case 8192 << 2:
105                 tsb_reg = 0x2UL;
106                 page_sz = 64 * 1024;
107                 break;
108
109         case 8192 << 3:
110                 tsb_reg = 0x3UL;
111                 page_sz = 64 * 1024;
112                 break;
113
114         case 8192 << 4:
115                 tsb_reg = 0x4UL;
116                 page_sz = 512 * 1024;
117                 break;
118
119         case 8192 << 5:
120                 tsb_reg = 0x5UL;
121                 page_sz = 512 * 1024;
122                 break;
123
124         case 8192 << 6:
125                 tsb_reg = 0x6UL;
126                 page_sz = 512 * 1024;
127                 break;
128
129         case 8192 << 7:
130                 tsb_reg = 0x7UL;
131                 page_sz = 4 * 1024 * 1024;
132                 break;
133
134         default:
135                 BUG();
136         };
137         tte |= pte_sz_bits(page_sz);
138
139         if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
140                 /* Physical mapping, no locked TLB entry for TSB.  */
141                 tsb_reg |= tsb_paddr;
142
143                 mm->context.tsb_reg_val = tsb_reg;
144                 mm->context.tsb_map_vaddr = 0;
145                 mm->context.tsb_map_pte = 0;
146         } else {
147                 tsb_reg |= base;
148                 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
149                 tte |= (tsb_paddr & ~(page_sz - 1UL));
150
151                 mm->context.tsb_reg_val = tsb_reg;
152                 mm->context.tsb_map_vaddr = base;
153                 mm->context.tsb_map_pte = tte;
154         }
155
156         /* Setup the Hypervisor TSB descriptor.  */
157         if (tlb_type == hypervisor) {
158                 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
159
160                 switch (PAGE_SIZE) {
161                 case 8192:
162                 default:
163                         hp->pgsz_idx = HV_PGSZ_IDX_8K;
164                         break;
165
166                 case 64 * 1024:
167                         hp->pgsz_idx = HV_PGSZ_IDX_64K;
168                         break;
169
170                 case 512 * 1024:
171                         hp->pgsz_idx = HV_PGSZ_IDX_512K;
172                         break;
173
174                 case 4 * 1024 * 1024:
175                         hp->pgsz_idx = HV_PGSZ_IDX_4MB;
176                         break;
177                 };
178                 hp->assoc = 1;
179                 hp->num_ttes = tsb_bytes / 16;
180                 hp->ctx_idx = 0;
181                 switch (PAGE_SIZE) {
182                 case 8192:
183                 default:
184                         hp->pgsz_mask = HV_PGSZ_MASK_8K;
185                         break;
186
187                 case 64 * 1024:
188                         hp->pgsz_mask = HV_PGSZ_MASK_64K;
189                         break;
190
191                 case 512 * 1024:
192                         hp->pgsz_mask = HV_PGSZ_MASK_512K;
193                         break;
194
195                 case 4 * 1024 * 1024:
196                         hp->pgsz_mask = HV_PGSZ_MASK_4MB;
197                         break;
198                 };
199                 hp->tsb_base = tsb_paddr;
200                 hp->resv = 0;
201         }
202 }
203
204 /* The page tables are locked against modifications while this
205  * runs.
206  *
207  * XXX do some prefetching...
208  */
209 static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
210                      struct tsb *new_tsb, unsigned long new_size)
211 {
212         unsigned long old_nentries = old_size / sizeof(struct tsb);
213         unsigned long new_nentries = new_size / sizeof(struct tsb);
214         unsigned long i;
215
216         for (i = 0; i < old_nentries; i++) {
217                 register unsigned long tag asm("o4");
218                 register unsigned long pte asm("o5");
219                 unsigned long v, hash;
220
221                 if (tlb_type == hypervisor) {
222                         __asm__ __volatile__(
223                                 "ldda [%2] %3, %0"
224                                 : "=r" (tag), "=r" (pte)
225                                 : "r" (__pa(&old_tsb[i])),
226                                   "i" (ASI_QUAD_LDD_PHYS_4V));
227                 } else if (tlb_type == cheetah_plus) {
228                         __asm__ __volatile__(
229                                 "ldda [%2] %3, %0"
230                                 : "=r" (tag), "=r" (pte)
231                                 : "r" (__pa(&old_tsb[i])),
232                                   "i" (ASI_QUAD_LDD_PHYS));
233                 } else {
234                         __asm__ __volatile__(
235                                 "ldda [%2] %3, %0"
236                                 : "=r" (tag), "=r" (pte)
237                                 : "r" (&old_tsb[i]),
238                                   "i" (ASI_NUCLEUS_QUAD_LDD));
239                 }
240
241                 if (tag & ((1UL << TSB_TAG_LOCK_BIT) |
242                            (1UL << TSB_TAG_INVALID_BIT)))
243                         continue;
244
245                 /* We only put base page size PTEs into the TSB,
246                  * but that might change in the future.  This code
247                  * would need to be changed if we start putting larger
248                  * page size PTEs into there.
249                  */
250                 WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
251
252                 /* The tag holds bits 22 to 63 of the virtual address
253                  * and the context.  Clear out the context, and shift
254                  * up to make a virtual address.
255                  */
256                 v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
257
258                 /* The implied bits of the tag (bits 13 to 21) are
259                  * determined by the TSB entry index, so fill that in.
260                  */
261                 v |= (i & (512UL - 1UL)) << 13UL;
262
263                 hash = tsb_hash(v, new_nentries);
264                 if (tlb_type == cheetah_plus ||
265                     tlb_type == hypervisor) {
266                         __asm__ __volatile__(
267                                 "stxa   %0, [%1] %2\n\t"
268                                 "stxa   %3, [%4] %2"
269                                 : /* no outputs */
270                                 : "r" (tag),
271                                   "r" (__pa(&new_tsb[hash].tag)),
272                                   "i" (ASI_PHYS_USE_EC),
273                                   "r" (pte),
274                                   "r" (__pa(&new_tsb[hash].pte)));
275                 } else {
276                         new_tsb[hash].tag = tag;
277                         new_tsb[hash].pte = pte;
278                 }
279         }
280 }
281
282 /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
283  * update_mmu_cache() invokes this routine to try and grow the TSB.
284  * When we reach the maximum TSB size supported, we stick ~0UL into
285  * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
286  * will not trigger any longer.
287  *
288  * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
289  * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
290  * must be 512K aligned.
291  *
292  * The idea here is to grow the TSB when the RSS of the process approaches
293  * the number of entries that the current TSB can hold at once.  Currently,
294  * we trigger when the RSS hits 3/4 of the TSB capacity.
295  */
296 void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
297 {
298         unsigned long max_tsb_size = 1 * 1024 * 1024;
299         unsigned long size, old_size;
300         struct page *page;
301         struct tsb *old_tsb;
302
303         if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
304                 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
305
306         for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
307                 unsigned long n_entries = size / sizeof(struct tsb);
308
309                 n_entries = (n_entries * 3) / 4;
310                 if (n_entries > rss)
311                         break;
312         }
313
314         page = alloc_pages(gfp_flags, get_order(size));
315         if (unlikely(!page))
316                 return;
317
318         /* Mark all tags as invalid.  */
319         memset(page_address(page), 0x40, size);
320
321         if (size == max_tsb_size)
322                 mm->context.tsb_rss_limit = ~0UL;
323         else
324                 mm->context.tsb_rss_limit =
325                         ((size / sizeof(struct tsb)) * 3) / 4;
326
327         old_tsb = mm->context.tsb;
328         old_size = mm->context.tsb_nentries * sizeof(struct tsb);
329
330         if (old_tsb)
331                 copy_tsb(old_tsb, old_size, page_address(page), size);
332
333         mm->context.tsb = page_address(page);
334         setup_tsb_params(mm, size);
335
336         /* If old_tsb is NULL, we're being invoked for the first time
337          * from init_new_context().
338          */
339         if (old_tsb) {
340                 /* Now force all other processors to reload the new
341                  * TSB state.
342                  */
343                 smp_tsb_sync(mm);
344
345                 /* Finally reload it on the local cpu.  No further
346                  * references will remain to the old TSB and we can
347                  * thus free it up.
348                  */
349                 tsb_context_switch(mm);
350
351                 free_pages((unsigned long) old_tsb, get_order(old_size));
352         }
353 }
354
355 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
356 {
357         spin_lock_init(&mm->context.lock);
358
359         mm->context.sparc64_ctx_val = 0UL;
360
361         /* copy_mm() copies over the parent's mm_struct before calling
362          * us, so we need to zero out the TSB pointer or else tsb_grow()
363          * will be confused and think there is an older TSB to free up.
364          */
365         mm->context.tsb = NULL;
366         tsb_grow(mm, 0, GFP_KERNEL);
367
368         if (unlikely(!mm->context.tsb))
369                 return -ENOMEM;
370
371         return 0;
372 }
373
374 void destroy_context(struct mm_struct *mm)
375 {
376         unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
377         unsigned long flags;
378
379         free_pages((unsigned long) mm->context.tsb, get_order(size));
380
381         /* We can remove these later, but for now it's useful
382          * to catch any bogus post-destroy_context() references
383          * to the TSB.
384          */
385         mm->context.tsb = NULL;
386         mm->context.tsb_reg_val = 0UL;
387
388         spin_lock_irqsave(&ctx_alloc_lock, flags);
389
390         if (CTX_VALID(mm->context)) {
391                 unsigned long nr = CTX_NRBITS(mm->context);
392                 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
393         }
394
395         spin_unlock_irqrestore(&ctx_alloc_lock, flags);
396 }