Merge branch 'for-linus' of git://neil.brown.name/md
[linux-2.6] / arch / mips / mm / tlb-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15
16 #include <asm/cpu.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
21
22 extern void build_tlb_refill_handler(void);
23
24 /*
25  * Make sure all entries differ.  If they're not different
26  * MIPS32 will take revenge ...
27  */
28 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29
30 /* Atomicity and interruptability */
31 #ifdef CONFIG_MIPS_MT_SMTC
32
33 #include <asm/smtc.h>
34 #include <asm/mipsmtregs.h>
35
36 #define ENTER_CRITICAL(flags) \
37         { \
38         unsigned int mvpflags; \
39         local_irq_save(flags);\
40         mvpflags = dvpe()
41 #define EXIT_CRITICAL(flags) \
42         evpe(mvpflags); \
43         local_irq_restore(flags); \
44         }
45 #else
46
47 #define ENTER_CRITICAL(flags) local_irq_save(flags)
48 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
49
50 #endif /* CONFIG_MIPS_MT_SMTC */
51
52 #if defined(CONFIG_CPU_LOONGSON2)
53 /*
54  * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
55  * unfortrunately, itlb is not totally transparent to software.
56  */
57 #define FLUSH_ITLB write_c0_diag(4);
58
59 #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
60
61 #else
62
63 #define FLUSH_ITLB
64 #define FLUSH_ITLB_VM(vma)
65
66 #endif
67
68 void local_flush_tlb_all(void)
69 {
70         unsigned long flags;
71         unsigned long old_ctx;
72         int entry;
73
74         ENTER_CRITICAL(flags);
75         /* Save old context and create impossible VPN2 value */
76         old_ctx = read_c0_entryhi();
77         write_c0_entrylo0(0);
78         write_c0_entrylo1(0);
79
80         entry = read_c0_wired();
81
82         /* Blast 'em all away. */
83         while (entry < current_cpu_data.tlbsize) {
84                 /* Make sure all entries differ. */
85                 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
86                 write_c0_index(entry);
87                 mtc0_tlbw_hazard();
88                 tlb_write_indexed();
89                 entry++;
90         }
91         tlbw_use_hazard();
92         write_c0_entryhi(old_ctx);
93         FLUSH_ITLB;
94         EXIT_CRITICAL(flags);
95 }
96
97 /* All entries common to a mm share an asid.  To effectively flush
98    these entries, we just bump the asid. */
99 void local_flush_tlb_mm(struct mm_struct *mm)
100 {
101         int cpu;
102
103         preempt_disable();
104
105         cpu = smp_processor_id();
106
107         if (cpu_context(cpu, mm) != 0) {
108                 drop_mmu_context(mm, cpu);
109         }
110
111         preempt_enable();
112 }
113
114 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
115         unsigned long end)
116 {
117         struct mm_struct *mm = vma->vm_mm;
118         int cpu = smp_processor_id();
119
120         if (cpu_context(cpu, mm) != 0) {
121                 unsigned long size, flags;
122
123                 ENTER_CRITICAL(flags);
124                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
125                 size = (size + 1) >> 1;
126                 if (size <= current_cpu_data.tlbsize/2) {
127                         int oldpid = read_c0_entryhi();
128                         int newpid = cpu_asid(cpu, mm);
129
130                         start &= (PAGE_MASK << 1);
131                         end += ((PAGE_SIZE << 1) - 1);
132                         end &= (PAGE_MASK << 1);
133                         while (start < end) {
134                                 int idx;
135
136                                 write_c0_entryhi(start | newpid);
137                                 start += (PAGE_SIZE << 1);
138                                 mtc0_tlbw_hazard();
139                                 tlb_probe();
140                                 tlb_probe_hazard();
141                                 idx = read_c0_index();
142                                 write_c0_entrylo0(0);
143                                 write_c0_entrylo1(0);
144                                 if (idx < 0)
145                                         continue;
146                                 /* Make sure all entries differ. */
147                                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
148                                 mtc0_tlbw_hazard();
149                                 tlb_write_indexed();
150                         }
151                         tlbw_use_hazard();
152                         write_c0_entryhi(oldpid);
153                 } else {
154                         drop_mmu_context(mm, cpu);
155                 }
156                 FLUSH_ITLB;
157                 EXIT_CRITICAL(flags);
158         }
159 }
160
161 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
162 {
163         unsigned long size, flags;
164
165         ENTER_CRITICAL(flags);
166         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
167         size = (size + 1) >> 1;
168         if (size <= current_cpu_data.tlbsize / 2) {
169                 int pid = read_c0_entryhi();
170
171                 start &= (PAGE_MASK << 1);
172                 end += ((PAGE_SIZE << 1) - 1);
173                 end &= (PAGE_MASK << 1);
174
175                 while (start < end) {
176                         int idx;
177
178                         write_c0_entryhi(start);
179                         start += (PAGE_SIZE << 1);
180                         mtc0_tlbw_hazard();
181                         tlb_probe();
182                         tlb_probe_hazard();
183                         idx = read_c0_index();
184                         write_c0_entrylo0(0);
185                         write_c0_entrylo1(0);
186                         if (idx < 0)
187                                 continue;
188                         /* Make sure all entries differ. */
189                         write_c0_entryhi(UNIQUE_ENTRYHI(idx));
190                         mtc0_tlbw_hazard();
191                         tlb_write_indexed();
192                 }
193                 tlbw_use_hazard();
194                 write_c0_entryhi(pid);
195         } else {
196                 local_flush_tlb_all();
197         }
198         FLUSH_ITLB;
199         EXIT_CRITICAL(flags);
200 }
201
202 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
203 {
204         int cpu = smp_processor_id();
205
206         if (cpu_context(cpu, vma->vm_mm) != 0) {
207                 unsigned long flags;
208                 int oldpid, newpid, idx;
209
210                 newpid = cpu_asid(cpu, vma->vm_mm);
211                 page &= (PAGE_MASK << 1);
212                 ENTER_CRITICAL(flags);
213                 oldpid = read_c0_entryhi();
214                 write_c0_entryhi(page | newpid);
215                 mtc0_tlbw_hazard();
216                 tlb_probe();
217                 tlb_probe_hazard();
218                 idx = read_c0_index();
219                 write_c0_entrylo0(0);
220                 write_c0_entrylo1(0);
221                 if (idx < 0)
222                         goto finish;
223                 /* Make sure all entries differ. */
224                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
225                 mtc0_tlbw_hazard();
226                 tlb_write_indexed();
227                 tlbw_use_hazard();
228
229         finish:
230                 write_c0_entryhi(oldpid);
231                 FLUSH_ITLB_VM(vma);
232                 EXIT_CRITICAL(flags);
233         }
234 }
235
236 /*
237  * This one is only used for pages with the global bit set so we don't care
238  * much about the ASID.
239  */
240 void local_flush_tlb_one(unsigned long page)
241 {
242         unsigned long flags;
243         int oldpid, idx;
244
245         ENTER_CRITICAL(flags);
246         oldpid = read_c0_entryhi();
247         page &= (PAGE_MASK << 1);
248         write_c0_entryhi(page);
249         mtc0_tlbw_hazard();
250         tlb_probe();
251         tlb_probe_hazard();
252         idx = read_c0_index();
253         write_c0_entrylo0(0);
254         write_c0_entrylo1(0);
255         if (idx >= 0) {
256                 /* Make sure all entries differ. */
257                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
258                 mtc0_tlbw_hazard();
259                 tlb_write_indexed();
260                 tlbw_use_hazard();
261         }
262         write_c0_entryhi(oldpid);
263         FLUSH_ITLB;
264         EXIT_CRITICAL(flags);
265 }
266
267 /*
268  * We will need multiple versions of update_mmu_cache(), one that just
269  * updates the TLB with the new pte(s), and another which also checks
270  * for the R4k "end of page" hardware bug and does the needy.
271  */
272 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
273 {
274         unsigned long flags;
275         pgd_t *pgdp;
276         pud_t *pudp;
277         pmd_t *pmdp;
278         pte_t *ptep;
279         int idx, pid;
280
281         /*
282          * Handle debugger faulting in for debugee.
283          */
284         if (current->active_mm != vma->vm_mm)
285                 return;
286
287         ENTER_CRITICAL(flags);
288
289         pid = read_c0_entryhi() & ASID_MASK;
290         address &= (PAGE_MASK << 1);
291         write_c0_entryhi(address | pid);
292         pgdp = pgd_offset(vma->vm_mm, address);
293         mtc0_tlbw_hazard();
294         tlb_probe();
295         tlb_probe_hazard();
296         pudp = pud_offset(pgdp, address);
297         pmdp = pmd_offset(pudp, address);
298         idx = read_c0_index();
299 #ifdef CONFIG_HUGETLB_PAGE
300         /* this could be a huge page  */
301         if (pmd_huge(*pmdp)) {
302                 unsigned long lo;
303                 write_c0_pagemask(PM_HUGE_MASK);
304                 ptep = (pte_t *)pmdp;
305                 lo = pte_val(*ptep) >> 6;
306                 write_c0_entrylo0(lo);
307                 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
308
309                 mtc0_tlbw_hazard();
310                 if (idx < 0)
311                         tlb_write_random();
312                 else
313                         tlb_write_indexed();
314                 write_c0_pagemask(PM_DEFAULT_MASK);
315         } else
316 #endif
317         {
318                 ptep = pte_offset_map(pmdp, address);
319
320 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
321                 write_c0_entrylo0(ptep->pte_high);
322                 ptep++;
323                 write_c0_entrylo1(ptep->pte_high);
324 #else
325                 write_c0_entrylo0(pte_val(*ptep++) >> 6);
326                 write_c0_entrylo1(pte_val(*ptep) >> 6);
327 #endif
328                 mtc0_tlbw_hazard();
329                 if (idx < 0)
330                         tlb_write_random();
331                 else
332                         tlb_write_indexed();
333         }
334         tlbw_use_hazard();
335         FLUSH_ITLB_VM(vma);
336         EXIT_CRITICAL(flags);
337 }
338
339 #if 0
340 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
341                                        unsigned long address, pte_t pte)
342 {
343         unsigned long flags;
344         unsigned int asid;
345         pgd_t *pgdp;
346         pmd_t *pmdp;
347         pte_t *ptep;
348         int idx;
349
350         ENTER_CRITICAL(flags);
351         address &= (PAGE_MASK << 1);
352         asid = read_c0_entryhi() & ASID_MASK;
353         write_c0_entryhi(address | asid);
354         pgdp = pgd_offset(vma->vm_mm, address);
355         mtc0_tlbw_hazard();
356         tlb_probe();
357         tlb_probe_hazard();
358         pmdp = pmd_offset(pgdp, address);
359         idx = read_c0_index();
360         ptep = pte_offset_map(pmdp, address);
361         write_c0_entrylo0(pte_val(*ptep++) >> 6);
362         write_c0_entrylo1(pte_val(*ptep) >> 6);
363         mtc0_tlbw_hazard();
364         if (idx < 0)
365                 tlb_write_random();
366         else
367                 tlb_write_indexed();
368         tlbw_use_hazard();
369         EXIT_CRITICAL(flags);
370 }
371 #endif
372
373 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
374         unsigned long entryhi, unsigned long pagemask)
375 {
376         unsigned long flags;
377         unsigned long wired;
378         unsigned long old_pagemask;
379         unsigned long old_ctx;
380
381         ENTER_CRITICAL(flags);
382         /* Save old context and create impossible VPN2 value */
383         old_ctx = read_c0_entryhi();
384         old_pagemask = read_c0_pagemask();
385         wired = read_c0_wired();
386         write_c0_wired(wired + 1);
387         write_c0_index(wired);
388         tlbw_use_hazard();      /* What is the hazard here? */
389         write_c0_pagemask(pagemask);
390         write_c0_entryhi(entryhi);
391         write_c0_entrylo0(entrylo0);
392         write_c0_entrylo1(entrylo1);
393         mtc0_tlbw_hazard();
394         tlb_write_indexed();
395         tlbw_use_hazard();
396
397         write_c0_entryhi(old_ctx);
398         tlbw_use_hazard();      /* What is the hazard here? */
399         write_c0_pagemask(old_pagemask);
400         local_flush_tlb_all();
401         EXIT_CRITICAL(flags);
402 }
403
404 /*
405  * Used for loading TLB entries before trap_init() has started, when we
406  * don't actually want to add a wired entry which remains throughout the
407  * lifetime of the system
408  */
409
410 static int temp_tlb_entry __cpuinitdata;
411
412 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
413                                unsigned long entryhi, unsigned long pagemask)
414 {
415         int ret = 0;
416         unsigned long flags;
417         unsigned long wired;
418         unsigned long old_pagemask;
419         unsigned long old_ctx;
420
421         ENTER_CRITICAL(flags);
422         /* Save old context and create impossible VPN2 value */
423         old_ctx = read_c0_entryhi();
424         old_pagemask = read_c0_pagemask();
425         wired = read_c0_wired();
426         if (--temp_tlb_entry < wired) {
427                 printk(KERN_WARNING
428                        "No TLB space left for add_temporary_entry\n");
429                 ret = -ENOSPC;
430                 goto out;
431         }
432
433         write_c0_index(temp_tlb_entry);
434         write_c0_pagemask(pagemask);
435         write_c0_entryhi(entryhi);
436         write_c0_entrylo0(entrylo0);
437         write_c0_entrylo1(entrylo1);
438         mtc0_tlbw_hazard();
439         tlb_write_indexed();
440         tlbw_use_hazard();
441
442         write_c0_entryhi(old_ctx);
443         write_c0_pagemask(old_pagemask);
444 out:
445         EXIT_CRITICAL(flags);
446         return ret;
447 }
448
449 static void __cpuinit probe_tlb(unsigned long config)
450 {
451         struct cpuinfo_mips *c = &current_cpu_data;
452         unsigned int reg;
453
454         /*
455          * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
456          * is not supported, we assume R4k style.  Cpu probing already figured
457          * out the number of tlb entries.
458          */
459         if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
460                 return;
461 #ifdef CONFIG_MIPS_MT_SMTC
462         /*
463          * If TLB is shared in SMTC system, total size already
464          * has been calculated and written into cpu_data tlbsize
465          */
466         if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
467                 return;
468 #endif /* CONFIG_MIPS_MT_SMTC */
469
470         reg = read_c0_config1();
471         if (!((config >> 7) & 3))
472                 panic("No TLB present");
473
474         c->tlbsize = ((reg >> 25) & 0x3f) + 1;
475 }
476
477 static int __cpuinitdata ntlb = 0;
478 static int __init set_ntlb(char *str)
479 {
480         get_option(&str, &ntlb);
481         return 1;
482 }
483
484 __setup("ntlb=", set_ntlb);
485
486 void __cpuinit tlb_init(void)
487 {
488         unsigned int config = read_c0_config();
489
490         /*
491          * You should never change this register:
492          *   - On R4600 1.7 the tlbp never hits for pages smaller than
493          *     the value in the c0_pagemask register.
494          *   - The entire mm handling assumes the c0_pagemask register to
495          *     be set to fixed-size pages.
496          */
497         probe_tlb(config);
498         write_c0_pagemask(PM_DEFAULT_MASK);
499         write_c0_wired(0);
500         if (current_cpu_type() == CPU_R10000 ||
501             current_cpu_type() == CPU_R12000 ||
502             current_cpu_type() == CPU_R14000)
503                 write_c0_framemask(0);
504         temp_tlb_entry = current_cpu_data.tlbsize - 1;
505
506         /* From this point on the ARC firmware is dead.  */
507         local_flush_tlb_all();
508
509         /* Did I tell you that ARC SUCKS?  */
510
511         if (ntlb) {
512                 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
513                         int wired = current_cpu_data.tlbsize - ntlb;
514                         write_c0_wired(wired);
515                         write_c0_index(wired-1);
516                         printk("Restricting TLB to %d entries\n", ntlb);
517                 } else
518                         printk("Ignoring invalid argument ntlb=%d\n", ntlb);
519         }
520
521         build_tlb_refill_handler();
522 }