2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/init.h>
12 #include <linux/sched.h>
14 #include <linux/hugetlb.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
22 extern void build_tlb_refill_handler(void);
25 * Make sure all entries differ. If they're not different
26 * MIPS32 will take revenge ...
28 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
30 /* Atomicity and interruptability */
31 #ifdef CONFIG_MIPS_MT_SMTC
34 #include <asm/mipsmtregs.h>
36 #define ENTER_CRITICAL(flags) \
38 unsigned int mvpflags; \
39 local_irq_save(flags);\
41 #define EXIT_CRITICAL(flags) \
43 local_irq_restore(flags); \
47 #define ENTER_CRITICAL(flags) local_irq_save(flags)
48 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
50 #endif /* CONFIG_MIPS_MT_SMTC */
52 #if defined(CONFIG_CPU_LOONGSON2)
54 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
55 * unfortrunately, itlb is not totally transparent to software.
57 #define FLUSH_ITLB write_c0_diag(4);
59 #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
64 #define FLUSH_ITLB_VM(vma)
68 void local_flush_tlb_all(void)
71 unsigned long old_ctx;
74 ENTER_CRITICAL(flags);
75 /* Save old context and create impossible VPN2 value */
76 old_ctx = read_c0_entryhi();
80 entry = read_c0_wired();
82 /* Blast 'em all away. */
83 while (entry < current_cpu_data.tlbsize) {
84 /* Make sure all entries differ. */
85 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
86 write_c0_index(entry);
92 write_c0_entryhi(old_ctx);
97 /* All entries common to a mm share an asid. To effectively flush
98 these entries, we just bump the asid. */
99 void local_flush_tlb_mm(struct mm_struct *mm)
105 cpu = smp_processor_id();
107 if (cpu_context(cpu, mm) != 0) {
108 drop_mmu_context(mm, cpu);
114 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
117 struct mm_struct *mm = vma->vm_mm;
118 int cpu = smp_processor_id();
120 if (cpu_context(cpu, mm) != 0) {
121 unsigned long size, flags;
123 ENTER_CRITICAL(flags);
124 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
125 size = (size + 1) >> 1;
126 if (size <= current_cpu_data.tlbsize/2) {
127 int oldpid = read_c0_entryhi();
128 int newpid = cpu_asid(cpu, mm);
130 start &= (PAGE_MASK << 1);
131 end += ((PAGE_SIZE << 1) - 1);
132 end &= (PAGE_MASK << 1);
133 while (start < end) {
136 write_c0_entryhi(start | newpid);
137 start += (PAGE_SIZE << 1);
141 idx = read_c0_index();
142 write_c0_entrylo0(0);
143 write_c0_entrylo1(0);
146 /* Make sure all entries differ. */
147 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
152 write_c0_entryhi(oldpid);
154 drop_mmu_context(mm, cpu);
157 EXIT_CRITICAL(flags);
161 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
163 unsigned long size, flags;
165 ENTER_CRITICAL(flags);
166 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
167 size = (size + 1) >> 1;
168 if (size <= current_cpu_data.tlbsize / 2) {
169 int pid = read_c0_entryhi();
171 start &= (PAGE_MASK << 1);
172 end += ((PAGE_SIZE << 1) - 1);
173 end &= (PAGE_MASK << 1);
175 while (start < end) {
178 write_c0_entryhi(start);
179 start += (PAGE_SIZE << 1);
183 idx = read_c0_index();
184 write_c0_entrylo0(0);
185 write_c0_entrylo1(0);
188 /* Make sure all entries differ. */
189 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
194 write_c0_entryhi(pid);
196 local_flush_tlb_all();
199 EXIT_CRITICAL(flags);
202 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
204 int cpu = smp_processor_id();
206 if (cpu_context(cpu, vma->vm_mm) != 0) {
208 int oldpid, newpid, idx;
210 newpid = cpu_asid(cpu, vma->vm_mm);
211 page &= (PAGE_MASK << 1);
212 ENTER_CRITICAL(flags);
213 oldpid = read_c0_entryhi();
214 write_c0_entryhi(page | newpid);
218 idx = read_c0_index();
219 write_c0_entrylo0(0);
220 write_c0_entrylo1(0);
223 /* Make sure all entries differ. */
224 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
230 write_c0_entryhi(oldpid);
232 EXIT_CRITICAL(flags);
237 * This one is only used for pages with the global bit set so we don't care
238 * much about the ASID.
240 void local_flush_tlb_one(unsigned long page)
245 ENTER_CRITICAL(flags);
246 oldpid = read_c0_entryhi();
247 page &= (PAGE_MASK << 1);
248 write_c0_entryhi(page);
252 idx = read_c0_index();
253 write_c0_entrylo0(0);
254 write_c0_entrylo1(0);
256 /* Make sure all entries differ. */
257 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
262 write_c0_entryhi(oldpid);
264 EXIT_CRITICAL(flags);
268 * We will need multiple versions of update_mmu_cache(), one that just
269 * updates the TLB with the new pte(s), and another which also checks
270 * for the R4k "end of page" hardware bug and does the needy.
272 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
282 * Handle debugger faulting in for debugee.
284 if (current->active_mm != vma->vm_mm)
287 ENTER_CRITICAL(flags);
289 pid = read_c0_entryhi() & ASID_MASK;
290 address &= (PAGE_MASK << 1);
291 write_c0_entryhi(address | pid);
292 pgdp = pgd_offset(vma->vm_mm, address);
296 pudp = pud_offset(pgdp, address);
297 pmdp = pmd_offset(pudp, address);
298 idx = read_c0_index();
299 #ifdef CONFIG_HUGETLB_PAGE
300 /* this could be a huge page */
301 if (pmd_huge(*pmdp)) {
303 write_c0_pagemask(PM_HUGE_MASK);
304 ptep = (pte_t *)pmdp;
305 lo = pte_val(*ptep) >> 6;
306 write_c0_entrylo0(lo);
307 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
314 write_c0_pagemask(PM_DEFAULT_MASK);
318 ptep = pte_offset_map(pmdp, address);
320 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
321 write_c0_entrylo0(ptep->pte_high);
323 write_c0_entrylo1(ptep->pte_high);
325 write_c0_entrylo0(pte_val(*ptep++) >> 6);
326 write_c0_entrylo1(pte_val(*ptep) >> 6);
336 EXIT_CRITICAL(flags);
340 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
341 unsigned long address, pte_t pte)
350 ENTER_CRITICAL(flags);
351 address &= (PAGE_MASK << 1);
352 asid = read_c0_entryhi() & ASID_MASK;
353 write_c0_entryhi(address | asid);
354 pgdp = pgd_offset(vma->vm_mm, address);
358 pmdp = pmd_offset(pgdp, address);
359 idx = read_c0_index();
360 ptep = pte_offset_map(pmdp, address);
361 write_c0_entrylo0(pte_val(*ptep++) >> 6);
362 write_c0_entrylo1(pte_val(*ptep) >> 6);
369 EXIT_CRITICAL(flags);
373 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
374 unsigned long entryhi, unsigned long pagemask)
378 unsigned long old_pagemask;
379 unsigned long old_ctx;
381 ENTER_CRITICAL(flags);
382 /* Save old context and create impossible VPN2 value */
383 old_ctx = read_c0_entryhi();
384 old_pagemask = read_c0_pagemask();
385 wired = read_c0_wired();
386 write_c0_wired(wired + 1);
387 write_c0_index(wired);
388 tlbw_use_hazard(); /* What is the hazard here? */
389 write_c0_pagemask(pagemask);
390 write_c0_entryhi(entryhi);
391 write_c0_entrylo0(entrylo0);
392 write_c0_entrylo1(entrylo1);
397 write_c0_entryhi(old_ctx);
398 tlbw_use_hazard(); /* What is the hazard here? */
399 write_c0_pagemask(old_pagemask);
400 local_flush_tlb_all();
401 EXIT_CRITICAL(flags);
405 * Used for loading TLB entries before trap_init() has started, when we
406 * don't actually want to add a wired entry which remains throughout the
407 * lifetime of the system
410 static int temp_tlb_entry __cpuinitdata;
412 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
413 unsigned long entryhi, unsigned long pagemask)
418 unsigned long old_pagemask;
419 unsigned long old_ctx;
421 ENTER_CRITICAL(flags);
422 /* Save old context and create impossible VPN2 value */
423 old_ctx = read_c0_entryhi();
424 old_pagemask = read_c0_pagemask();
425 wired = read_c0_wired();
426 if (--temp_tlb_entry < wired) {
428 "No TLB space left for add_temporary_entry\n");
433 write_c0_index(temp_tlb_entry);
434 write_c0_pagemask(pagemask);
435 write_c0_entryhi(entryhi);
436 write_c0_entrylo0(entrylo0);
437 write_c0_entrylo1(entrylo1);
442 write_c0_entryhi(old_ctx);
443 write_c0_pagemask(old_pagemask);
445 EXIT_CRITICAL(flags);
449 static void __cpuinit probe_tlb(unsigned long config)
451 struct cpuinfo_mips *c = ¤t_cpu_data;
455 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
456 * is not supported, we assume R4k style. Cpu probing already figured
457 * out the number of tlb entries.
459 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
461 #ifdef CONFIG_MIPS_MT_SMTC
463 * If TLB is shared in SMTC system, total size already
464 * has been calculated and written into cpu_data tlbsize
466 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
468 #endif /* CONFIG_MIPS_MT_SMTC */
470 reg = read_c0_config1();
471 if (!((config >> 7) & 3))
472 panic("No TLB present");
474 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
477 static int __cpuinitdata ntlb = 0;
478 static int __init set_ntlb(char *str)
480 get_option(&str, &ntlb);
484 __setup("ntlb=", set_ntlb);
486 void __cpuinit tlb_init(void)
488 unsigned int config = read_c0_config();
491 * You should never change this register:
492 * - On R4600 1.7 the tlbp never hits for pages smaller than
493 * the value in the c0_pagemask register.
494 * - The entire mm handling assumes the c0_pagemask register to
495 * be set to fixed-size pages.
498 write_c0_pagemask(PM_DEFAULT_MASK);
500 if (current_cpu_type() == CPU_R10000 ||
501 current_cpu_type() == CPU_R12000 ||
502 current_cpu_type() == CPU_R14000)
503 write_c0_framemask(0);
504 temp_tlb_entry = current_cpu_data.tlbsize - 1;
506 /* From this point on the ARC firmware is dead. */
507 local_flush_tlb_all();
509 /* Did I tell you that ARC SUCKS? */
512 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
513 int wired = current_cpu_data.tlbsize - ntlb;
514 write_c0_wired(wired);
515 write_c0_index(wired-1);
516 printk("Restricting TLB to %d entries\n", ntlb);
518 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
521 build_tlb_refill_handler();