Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/linville...
[linux-2.6] / arch / mips / mm / tlb-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15
16 #include <asm/cpu.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
21
22 extern void build_tlb_refill_handler(void);
23
24 /*
25  * Make sure all entries differ.  If they're not different
26  * MIPS32 will take revenge ...
27  */
28 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29
30 /* CP0 hazard avoidance. */
31 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
32                                      "nop; nop; nop; nop; nop; nop;\n\t" \
33                                      ".set reorder\n\t")
34
35 /* Atomicity and interruptability */
36 #ifdef CONFIG_MIPS_MT_SMTC
37
38 #include <asm/smtc.h>
39 #include <asm/mipsmtregs.h>
40
41 #define ENTER_CRITICAL(flags) \
42         { \
43         unsigned int mvpflags; \
44         local_irq_save(flags);\
45         mvpflags = dvpe()
46 #define EXIT_CRITICAL(flags) \
47         evpe(mvpflags); \
48         local_irq_restore(flags); \
49         }
50 #else
51
52 #define ENTER_CRITICAL(flags) local_irq_save(flags)
53 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
54
55 #endif /* CONFIG_MIPS_MT_SMTC */
56
57 void local_flush_tlb_all(void)
58 {
59         unsigned long flags;
60         unsigned long old_ctx;
61         int entry;
62
63         ENTER_CRITICAL(flags);
64         /* Save old context and create impossible VPN2 value */
65         old_ctx = read_c0_entryhi();
66         write_c0_entrylo0(0);
67         write_c0_entrylo1(0);
68
69         entry = read_c0_wired();
70
71         /* Blast 'em all away. */
72         while (entry < current_cpu_data.tlbsize) {
73                 /* Make sure all entries differ. */
74                 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
75                 write_c0_index(entry);
76                 mtc0_tlbw_hazard();
77                 tlb_write_indexed();
78                 entry++;
79         }
80         tlbw_use_hazard();
81         write_c0_entryhi(old_ctx);
82         EXIT_CRITICAL(flags);
83 }
84
85 /* All entries common to a mm share an asid.  To effectively flush
86    these entries, we just bump the asid. */
87 void local_flush_tlb_mm(struct mm_struct *mm)
88 {
89         int cpu;
90
91         preempt_disable();
92
93         cpu = smp_processor_id();
94
95         if (cpu_context(cpu, mm) != 0) {
96                 drop_mmu_context(mm, cpu);
97         }
98
99         preempt_enable();
100 }
101
102 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
103         unsigned long end)
104 {
105         struct mm_struct *mm = vma->vm_mm;
106         int cpu = smp_processor_id();
107
108         if (cpu_context(cpu, mm) != 0) {
109                 unsigned long flags;
110                 int size;
111
112                 ENTER_CRITICAL(flags);
113                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
114                 size = (size + 1) >> 1;
115                 local_irq_save(flags);
116                 if (size <= current_cpu_data.tlbsize/2) {
117                         int oldpid = read_c0_entryhi();
118                         int newpid = cpu_asid(cpu, mm);
119
120                         start &= (PAGE_MASK << 1);
121                         end += ((PAGE_SIZE << 1) - 1);
122                         end &= (PAGE_MASK << 1);
123                         while (start < end) {
124                                 int idx;
125
126                                 write_c0_entryhi(start | newpid);
127                                 start += (PAGE_SIZE << 1);
128                                 mtc0_tlbw_hazard();
129                                 tlb_probe();
130                                 BARRIER;
131                                 idx = read_c0_index();
132                                 write_c0_entrylo0(0);
133                                 write_c0_entrylo1(0);
134                                 if (idx < 0)
135                                         continue;
136                                 /* Make sure all entries differ. */
137                                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
138                                 mtc0_tlbw_hazard();
139                                 tlb_write_indexed();
140                         }
141                         tlbw_use_hazard();
142                         write_c0_entryhi(oldpid);
143                 } else {
144                         drop_mmu_context(mm, cpu);
145                 }
146                 EXIT_CRITICAL(flags);
147         }
148 }
149
150 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
151 {
152         unsigned long flags;
153         int size;
154
155         ENTER_CRITICAL(flags);
156         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
157         size = (size + 1) >> 1;
158         if (size <= current_cpu_data.tlbsize / 2) {
159                 int pid = read_c0_entryhi();
160
161                 start &= (PAGE_MASK << 1);
162                 end += ((PAGE_SIZE << 1) - 1);
163                 end &= (PAGE_MASK << 1);
164
165                 while (start < end) {
166                         int idx;
167
168                         write_c0_entryhi(start);
169                         start += (PAGE_SIZE << 1);
170                         mtc0_tlbw_hazard();
171                         tlb_probe();
172                         BARRIER;
173                         idx = read_c0_index();
174                         write_c0_entrylo0(0);
175                         write_c0_entrylo1(0);
176                         if (idx < 0)
177                                 continue;
178                         /* Make sure all entries differ. */
179                         write_c0_entryhi(UNIQUE_ENTRYHI(idx));
180                         mtc0_tlbw_hazard();
181                         tlb_write_indexed();
182                 }
183                 tlbw_use_hazard();
184                 write_c0_entryhi(pid);
185         } else {
186                 local_flush_tlb_all();
187         }
188         EXIT_CRITICAL(flags);
189 }
190
191 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
192 {
193         int cpu = smp_processor_id();
194
195         if (cpu_context(cpu, vma->vm_mm) != 0) {
196                 unsigned long flags;
197                 int oldpid, newpid, idx;
198
199                 newpid = cpu_asid(cpu, vma->vm_mm);
200                 page &= (PAGE_MASK << 1);
201                 ENTER_CRITICAL(flags);
202                 oldpid = read_c0_entryhi();
203                 write_c0_entryhi(page | newpid);
204                 mtc0_tlbw_hazard();
205                 tlb_probe();
206                 BARRIER;
207                 idx = read_c0_index();
208                 write_c0_entrylo0(0);
209                 write_c0_entrylo1(0);
210                 if (idx < 0)
211                         goto finish;
212                 /* Make sure all entries differ. */
213                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
214                 mtc0_tlbw_hazard();
215                 tlb_write_indexed();
216                 tlbw_use_hazard();
217
218         finish:
219                 write_c0_entryhi(oldpid);
220                 EXIT_CRITICAL(flags);
221         }
222 }
223
224 /*
225  * This one is only used for pages with the global bit set so we don't care
226  * much about the ASID.
227  */
228 void local_flush_tlb_one(unsigned long page)
229 {
230         unsigned long flags;
231         int oldpid, idx;
232
233         ENTER_CRITICAL(flags);
234         oldpid = read_c0_entryhi();
235         page &= (PAGE_MASK << 1);
236         write_c0_entryhi(page);
237         mtc0_tlbw_hazard();
238         tlb_probe();
239         BARRIER;
240         idx = read_c0_index();
241         write_c0_entrylo0(0);
242         write_c0_entrylo1(0);
243         if (idx >= 0) {
244                 /* Make sure all entries differ. */
245                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
246                 mtc0_tlbw_hazard();
247                 tlb_write_indexed();
248                 tlbw_use_hazard();
249         }
250         write_c0_entryhi(oldpid);
251
252         EXIT_CRITICAL(flags);
253 }
254
255 /*
256  * We will need multiple versions of update_mmu_cache(), one that just
257  * updates the TLB with the new pte(s), and another which also checks
258  * for the R4k "end of page" hardware bug and does the needy.
259  */
260 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
261 {
262         unsigned long flags;
263         pgd_t *pgdp;
264         pud_t *pudp;
265         pmd_t *pmdp;
266         pte_t *ptep;
267         int idx, pid;
268
269         /*
270          * Handle debugger faulting in for debugee.
271          */
272         if (current->active_mm != vma->vm_mm)
273                 return;
274
275         ENTER_CRITICAL(flags);
276
277         pid = read_c0_entryhi() & ASID_MASK;
278         address &= (PAGE_MASK << 1);
279         write_c0_entryhi(address | pid);
280         pgdp = pgd_offset(vma->vm_mm, address);
281         mtc0_tlbw_hazard();
282         tlb_probe();
283         BARRIER;
284         pudp = pud_offset(pgdp, address);
285         pmdp = pmd_offset(pudp, address);
286         idx = read_c0_index();
287         ptep = pte_offset_map(pmdp, address);
288
289 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
290         write_c0_entrylo0(ptep->pte_high);
291         ptep++;
292         write_c0_entrylo1(ptep->pte_high);
293 #else
294         write_c0_entrylo0(pte_val(*ptep++) >> 6);
295         write_c0_entrylo1(pte_val(*ptep) >> 6);
296 #endif
297         mtc0_tlbw_hazard();
298         if (idx < 0)
299                 tlb_write_random();
300         else
301                 tlb_write_indexed();
302         tlbw_use_hazard();
303         EXIT_CRITICAL(flags);
304 }
305
306 #if 0
307 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
308                                        unsigned long address, pte_t pte)
309 {
310         unsigned long flags;
311         unsigned int asid;
312         pgd_t *pgdp;
313         pmd_t *pmdp;
314         pte_t *ptep;
315         int idx;
316
317         ENTER_CRITICAL(flags);
318         address &= (PAGE_MASK << 1);
319         asid = read_c0_entryhi() & ASID_MASK;
320         write_c0_entryhi(address | asid);
321         pgdp = pgd_offset(vma->vm_mm, address);
322         mtc0_tlbw_hazard();
323         tlb_probe();
324         BARRIER;
325         pmdp = pmd_offset(pgdp, address);
326         idx = read_c0_index();
327         ptep = pte_offset_map(pmdp, address);
328         write_c0_entrylo0(pte_val(*ptep++) >> 6);
329         write_c0_entrylo1(pte_val(*ptep) >> 6);
330         mtc0_tlbw_hazard();
331         if (idx < 0)
332                 tlb_write_random();
333         else
334                 tlb_write_indexed();
335         tlbw_use_hazard();
336         EXIT_CRITICAL(flags);
337 }
338 #endif
339
340 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
341         unsigned long entryhi, unsigned long pagemask)
342 {
343         unsigned long flags;
344         unsigned long wired;
345         unsigned long old_pagemask;
346         unsigned long old_ctx;
347
348         ENTER_CRITICAL(flags);
349         /* Save old context and create impossible VPN2 value */
350         old_ctx = read_c0_entryhi();
351         old_pagemask = read_c0_pagemask();
352         wired = read_c0_wired();
353         write_c0_wired(wired + 1);
354         write_c0_index(wired);
355         BARRIER;
356         write_c0_pagemask(pagemask);
357         write_c0_entryhi(entryhi);
358         write_c0_entrylo0(entrylo0);
359         write_c0_entrylo1(entrylo1);
360         mtc0_tlbw_hazard();
361         tlb_write_indexed();
362         tlbw_use_hazard();
363
364         write_c0_entryhi(old_ctx);
365         BARRIER;
366         write_c0_pagemask(old_pagemask);
367         local_flush_tlb_all();
368         EXIT_CRITICAL(flags);
369 }
370
371 /*
372  * Used for loading TLB entries before trap_init() has started, when we
373  * don't actually want to add a wired entry which remains throughout the
374  * lifetime of the system
375  */
376
377 static int temp_tlb_entry __initdata;
378
379 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
380                                unsigned long entryhi, unsigned long pagemask)
381 {
382         int ret = 0;
383         unsigned long flags;
384         unsigned long wired;
385         unsigned long old_pagemask;
386         unsigned long old_ctx;
387
388         ENTER_CRITICAL(flags);
389         /* Save old context and create impossible VPN2 value */
390         old_ctx = read_c0_entryhi();
391         old_pagemask = read_c0_pagemask();
392         wired = read_c0_wired();
393         if (--temp_tlb_entry < wired) {
394                 printk(KERN_WARNING
395                        "No TLB space left for add_temporary_entry\n");
396                 ret = -ENOSPC;
397                 goto out;
398         }
399
400         write_c0_index(temp_tlb_entry);
401         write_c0_pagemask(pagemask);
402         write_c0_entryhi(entryhi);
403         write_c0_entrylo0(entrylo0);
404         write_c0_entrylo1(entrylo1);
405         mtc0_tlbw_hazard();
406         tlb_write_indexed();
407         tlbw_use_hazard();
408
409         write_c0_entryhi(old_ctx);
410         write_c0_pagemask(old_pagemask);
411 out:
412         EXIT_CRITICAL(flags);
413         return ret;
414 }
415
416 static void __init probe_tlb(unsigned long config)
417 {
418         struct cpuinfo_mips *c = &current_cpu_data;
419         unsigned int reg;
420
421         /*
422          * If this isn't a MIPS32 / MIPS64 compliant CPU.  Config 1 register
423          * is not supported, we assume R4k style.  Cpu probing already figured
424          * out the number of tlb entries.
425          */
426         if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
427                 return;
428 #ifdef CONFIG_MIPS_MT_SMTC
429         /*
430          * If TLB is shared in SMTC system, total size already
431          * has been calculated and written into cpu_data tlbsize
432          */
433         if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
434                 return;
435 #endif /* CONFIG_MIPS_MT_SMTC */
436
437         reg = read_c0_config1();
438         if (!((config >> 7) & 3))
439                 panic("No TLB present");
440
441         c->tlbsize = ((reg >> 25) & 0x3f) + 1;
442 }
443
444 static int __initdata ntlb = 0;
445 static int __init set_ntlb(char *str)
446 {
447         get_option(&str, &ntlb);
448         return 1;
449 }
450
451 __setup("ntlb=", set_ntlb);
452
453 void __init tlb_init(void)
454 {
455         unsigned int config = read_c0_config();
456
457         /*
458          * You should never change this register:
459          *   - On R4600 1.7 the tlbp never hits for pages smaller than
460          *     the value in the c0_pagemask register.
461          *   - The entire mm handling assumes the c0_pagemask register to
462          *     be set for 4kb pages.
463          */
464         probe_tlb(config);
465         write_c0_pagemask(PM_DEFAULT_MASK);
466         write_c0_wired(0);
467         write_c0_framemask(0);
468         temp_tlb_entry = current_cpu_data.tlbsize - 1;
469
470         /* From this point on the ARC firmware is dead.  */
471         local_flush_tlb_all();
472
473         /* Did I tell you that ARC SUCKS?  */
474
475         if (ntlb) {
476                 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
477                         int wired = current_cpu_data.tlbsize - ntlb;
478                         write_c0_wired(wired);
479                         write_c0_index(wired-1);
480                         printk ("Restricting TLB to %d entries\n", ntlb);
481                 } else
482                         printk("Ignoring invalid argument ntlb=%d\n", ntlb);
483         }
484
485         build_tlb_refill_handler();
486 }