2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/init.h>
11 #include <linux/highmem.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
16 #include <linux/bitops.h>
18 #include <asm/bcache.h>
19 #include <asm/bootinfo.h>
20 #include <asm/cache.h>
21 #include <asm/cacheops.h>
23 #include <asm/cpu-features.h>
26 #include <asm/pgtable.h>
27 #include <asm/r4kcache.h>
28 #include <asm/sections.h>
29 #include <asm/system.h>
30 #include <asm/mmu_context.h>
32 #include <asm/cacheflush.h> /* for run_uncached() */
36 * Special Variant of smp_call_function for use by cache functions:
39 * o collapses to normal function call on UP kernels
40 * o collapses to normal function call on systems with a single shared
43 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
48 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
49 smp_call_function(func, info, retry, wait);
58 static unsigned long icache_size __read_mostly;
59 static unsigned long dcache_size __read_mostly;
60 static unsigned long scache_size __read_mostly;
63 * Dummy cache handling routines for machines without boardcaches
65 static void cache_noop(void) {}
67 static struct bcache_ops no_sc_ops = {
68 .bc_enable = (void *)cache_noop,
69 .bc_disable = (void *)cache_noop,
70 .bc_wback_inv = (void *)cache_noop,
71 .bc_inv = (void *)cache_noop
74 struct bcache_ops *bcops = &no_sc_ops;
76 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
77 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
79 #define R4600_HIT_CACHEOP_WAR_IMPL \
81 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
82 *(volatile unsigned long *)CKSEG1; \
83 if (R4600_V1_HIT_CACHEOP_WAR) \
84 __asm__ __volatile__("nop;nop;nop;nop"); \
87 static void (*r4k_blast_dcache_page)(unsigned long addr);
89 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
91 R4600_HIT_CACHEOP_WAR_IMPL;
92 blast_dcache32_page(addr);
95 static void __init r4k_blast_dcache_page_setup(void)
97 unsigned long dc_lsize = cpu_dcache_line_size();
100 r4k_blast_dcache_page = (void *)cache_noop;
101 else if (dc_lsize == 16)
102 r4k_blast_dcache_page = blast_dcache16_page;
103 else if (dc_lsize == 32)
104 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
107 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
109 static void __init r4k_blast_dcache_page_indexed_setup(void)
111 unsigned long dc_lsize = cpu_dcache_line_size();
114 r4k_blast_dcache_page_indexed = (void *)cache_noop;
115 else if (dc_lsize == 16)
116 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
117 else if (dc_lsize == 32)
118 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
121 static void (* r4k_blast_dcache)(void);
123 static void __init r4k_blast_dcache_setup(void)
125 unsigned long dc_lsize = cpu_dcache_line_size();
128 r4k_blast_dcache = (void *)cache_noop;
129 else if (dc_lsize == 16)
130 r4k_blast_dcache = blast_dcache16;
131 else if (dc_lsize == 32)
132 r4k_blast_dcache = blast_dcache32;
135 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
136 #define JUMP_TO_ALIGN(order) \
137 __asm__ __volatile__( \
139 ".align\t" #order "\n\t" \
142 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
143 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
145 static inline void blast_r4600_v1_icache32(void)
149 local_irq_save(flags);
151 local_irq_restore(flags);
154 static inline void tx49_blast_icache32(void)
156 unsigned long start = INDEX_BASE;
157 unsigned long end = start + current_cpu_data.icache.waysize;
158 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
159 unsigned long ws_end = current_cpu_data.icache.ways <<
160 current_cpu_data.icache.waybit;
161 unsigned long ws, addr;
163 CACHE32_UNROLL32_ALIGN2;
164 /* I'm in even chunk. blast odd chunks */
165 for (ws = 0; ws < ws_end; ws += ws_inc)
166 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
167 cache32_unroll32(addr|ws, Index_Invalidate_I);
168 CACHE32_UNROLL32_ALIGN;
169 /* I'm in odd chunk. blast even chunks */
170 for (ws = 0; ws < ws_end; ws += ws_inc)
171 for (addr = start; addr < end; addr += 0x400 * 2)
172 cache32_unroll32(addr|ws, Index_Invalidate_I);
175 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
179 local_irq_save(flags);
180 blast_icache32_page_indexed(page);
181 local_irq_restore(flags);
184 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
186 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
187 unsigned long start = INDEX_BASE + (page & indexmask);
188 unsigned long end = start + PAGE_SIZE;
189 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
190 unsigned long ws_end = current_cpu_data.icache.ways <<
191 current_cpu_data.icache.waybit;
192 unsigned long ws, addr;
194 CACHE32_UNROLL32_ALIGN2;
195 /* I'm in even chunk. blast odd chunks */
196 for (ws = 0; ws < ws_end; ws += ws_inc)
197 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
198 cache32_unroll32(addr|ws, Index_Invalidate_I);
199 CACHE32_UNROLL32_ALIGN;
200 /* I'm in odd chunk. blast even chunks */
201 for (ws = 0; ws < ws_end; ws += ws_inc)
202 for (addr = start; addr < end; addr += 0x400 * 2)
203 cache32_unroll32(addr|ws, Index_Invalidate_I);
206 static void (* r4k_blast_icache_page)(unsigned long addr);
208 static void __init r4k_blast_icache_page_setup(void)
210 unsigned long ic_lsize = cpu_icache_line_size();
213 r4k_blast_icache_page = (void *)cache_noop;
214 else if (ic_lsize == 16)
215 r4k_blast_icache_page = blast_icache16_page;
216 else if (ic_lsize == 32)
217 r4k_blast_icache_page = blast_icache32_page;
218 else if (ic_lsize == 64)
219 r4k_blast_icache_page = blast_icache64_page;
223 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
225 static void __init r4k_blast_icache_page_indexed_setup(void)
227 unsigned long ic_lsize = cpu_icache_line_size();
230 r4k_blast_icache_page_indexed = (void *)cache_noop;
231 else if (ic_lsize == 16)
232 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
233 else if (ic_lsize == 32) {
234 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
235 r4k_blast_icache_page_indexed =
236 blast_icache32_r4600_v1_page_indexed;
237 else if (TX49XX_ICACHE_INDEX_INV_WAR)
238 r4k_blast_icache_page_indexed =
239 tx49_blast_icache32_page_indexed;
241 r4k_blast_icache_page_indexed =
242 blast_icache32_page_indexed;
243 } else if (ic_lsize == 64)
244 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
247 static void (* r4k_blast_icache)(void);
249 static void __init r4k_blast_icache_setup(void)
251 unsigned long ic_lsize = cpu_icache_line_size();
254 r4k_blast_icache = (void *)cache_noop;
255 else if (ic_lsize == 16)
256 r4k_blast_icache = blast_icache16;
257 else if (ic_lsize == 32) {
258 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
259 r4k_blast_icache = blast_r4600_v1_icache32;
260 else if (TX49XX_ICACHE_INDEX_INV_WAR)
261 r4k_blast_icache = tx49_blast_icache32;
263 r4k_blast_icache = blast_icache32;
264 } else if (ic_lsize == 64)
265 r4k_blast_icache = blast_icache64;
268 static void (* r4k_blast_scache_page)(unsigned long addr);
270 static void __init r4k_blast_scache_page_setup(void)
272 unsigned long sc_lsize = cpu_scache_line_size();
274 if (scache_size == 0)
275 r4k_blast_scache_page = (void *)cache_noop;
276 else if (sc_lsize == 16)
277 r4k_blast_scache_page = blast_scache16_page;
278 else if (sc_lsize == 32)
279 r4k_blast_scache_page = blast_scache32_page;
280 else if (sc_lsize == 64)
281 r4k_blast_scache_page = blast_scache64_page;
282 else if (sc_lsize == 128)
283 r4k_blast_scache_page = blast_scache128_page;
286 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
288 static void __init r4k_blast_scache_page_indexed_setup(void)
290 unsigned long sc_lsize = cpu_scache_line_size();
292 if (scache_size == 0)
293 r4k_blast_scache_page_indexed = (void *)cache_noop;
294 else if (sc_lsize == 16)
295 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
296 else if (sc_lsize == 32)
297 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
298 else if (sc_lsize == 64)
299 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
300 else if (sc_lsize == 128)
301 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
304 static void (* r4k_blast_scache)(void);
306 static void __init r4k_blast_scache_setup(void)
308 unsigned long sc_lsize = cpu_scache_line_size();
310 if (scache_size == 0)
311 r4k_blast_scache = (void *)cache_noop;
312 else if (sc_lsize == 16)
313 r4k_blast_scache = blast_scache16;
314 else if (sc_lsize == 32)
315 r4k_blast_scache = blast_scache32;
316 else if (sc_lsize == 64)
317 r4k_blast_scache = blast_scache64;
318 else if (sc_lsize == 128)
319 r4k_blast_scache = blast_scache128;
322 static inline void local_r4k___flush_cache_all(void * args)
324 #if defined(CONFIG_CPU_LOONGSON2)
331 switch (current_cpu_type()) {
343 static void r4k___flush_cache_all(void)
345 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
348 static inline void local_r4k_flush_cache_range(void * args)
350 struct vm_area_struct *vma = args;
352 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
358 static void r4k_flush_cache_range(struct vm_area_struct *vma,
359 unsigned long start, unsigned long end)
361 if (!cpu_has_dc_aliases)
364 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
367 static inline void local_r4k_flush_cache_mm(void * args)
369 struct mm_struct *mm = args;
371 if (!cpu_context(smp_processor_id(), mm))
375 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
376 * only flush the primary caches but R10000 and R12000 behave sane ...
377 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
378 * caches, so we can bail out early.
380 if (current_cpu_type() == CPU_R4000SC ||
381 current_cpu_type() == CPU_R4000MC ||
382 current_cpu_type() == CPU_R4400SC ||
383 current_cpu_type() == CPU_R4400MC) {
391 static void r4k_flush_cache_mm(struct mm_struct *mm)
393 if (!cpu_has_dc_aliases)
396 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
399 struct flush_cache_page_args {
400 struct vm_area_struct *vma;
405 static inline void local_r4k_flush_cache_page(void *args)
407 struct flush_cache_page_args *fcp_args = args;
408 struct vm_area_struct *vma = fcp_args->vma;
409 unsigned long addr = fcp_args->addr;
410 struct page *page = pfn_to_page(fcp_args->pfn);
411 int exec = vma->vm_flags & VM_EXEC;
412 struct mm_struct *mm = vma->vm_mm;
420 * If ownes no valid ASID yet, cannot possibly have gotten
421 * this page into the cache.
423 if (cpu_context(smp_processor_id(), mm) == 0)
427 pgdp = pgd_offset(mm, addr);
428 pudp = pud_offset(pgdp, addr);
429 pmdp = pmd_offset(pudp, addr);
430 ptep = pte_offset(pmdp, addr);
433 * If the page isn't marked valid, the page cannot possibly be
436 if (!(pte_val(*ptep) & _PAGE_PRESENT))
439 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
443 * Use kmap_coherent or kmap_atomic to do flushes for
444 * another ASID than the current one.
446 if (cpu_has_dc_aliases)
447 vaddr = kmap_coherent(page, addr);
449 vaddr = kmap_atomic(page, KM_USER0);
450 addr = (unsigned long)vaddr;
453 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
454 r4k_blast_dcache_page(addr);
455 if (exec && !cpu_icache_snoops_remote_store)
456 r4k_blast_scache_page(addr);
459 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
460 int cpu = smp_processor_id();
462 if (cpu_context(cpu, mm) != 0)
463 drop_mmu_context(mm, cpu);
465 r4k_blast_icache_page(addr);
469 if (cpu_has_dc_aliases)
472 kunmap_atomic(vaddr, KM_USER0);
476 static void r4k_flush_cache_page(struct vm_area_struct *vma,
477 unsigned long addr, unsigned long pfn)
479 struct flush_cache_page_args args;
485 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
488 static inline void local_r4k_flush_data_cache_page(void * addr)
490 r4k_blast_dcache_page((unsigned long) addr);
493 static void r4k_flush_data_cache_page(unsigned long addr)
495 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
498 struct flush_icache_range_args {
503 static inline void local_r4k_flush_icache_range(void *args)
505 struct flush_icache_range_args *fir_args = args;
506 unsigned long start = fir_args->start;
507 unsigned long end = fir_args->end;
509 if (!cpu_has_ic_fills_f_dc) {
510 if (end - start >= dcache_size) {
513 R4600_HIT_CACHEOP_WAR_IMPL;
514 protected_blast_dcache_range(start, end);
517 if (!cpu_icache_snoops_remote_store && scache_size) {
518 if (end - start > scache_size)
521 protected_blast_scache_range(start, end);
525 if (end - start > icache_size)
528 protected_blast_icache_range(start, end);
531 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
533 struct flush_icache_range_args args;
538 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
539 instruction_hazard();
542 #ifdef CONFIG_DMA_NONCOHERENT
544 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
546 /* Catch bad driver code */
549 if (cpu_has_inclusive_pcaches) {
550 if (size >= scache_size)
553 blast_scache_range(addr, addr + size);
558 * Either no secondary cache or the available caches don't have the
559 * subset property so we have to flush the primary caches
562 if (size >= dcache_size) {
565 R4600_HIT_CACHEOP_WAR_IMPL;
566 blast_dcache_range(addr, addr + size);
569 bc_wback_inv(addr, size);
572 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
574 /* Catch bad driver code */
577 if (cpu_has_inclusive_pcaches) {
578 if (size >= scache_size)
581 blast_scache_range(addr, addr + size);
585 if (size >= dcache_size) {
588 R4600_HIT_CACHEOP_WAR_IMPL;
589 blast_dcache_range(addr, addr + size);
594 #endif /* CONFIG_DMA_NONCOHERENT */
597 * While we're protected against bad userland addresses we don't care
598 * very much about what happens in that case. Usually a segmentation
599 * fault will dump the process later on anyway ...
601 static void local_r4k_flush_cache_sigtramp(void * arg)
603 unsigned long ic_lsize = cpu_icache_line_size();
604 unsigned long dc_lsize = cpu_dcache_line_size();
605 unsigned long sc_lsize = cpu_scache_line_size();
606 unsigned long addr = (unsigned long) arg;
608 R4600_HIT_CACHEOP_WAR_IMPL;
610 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
611 if (!cpu_icache_snoops_remote_store && scache_size)
612 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
614 protected_flush_icache_line(addr & ~(ic_lsize - 1));
615 if (MIPS4K_ICACHE_REFILL_WAR) {
616 __asm__ __volatile__ (
631 : "i" (Hit_Invalidate_I));
633 if (MIPS_CACHE_SYNC_WAR)
634 __asm__ __volatile__ ("sync");
637 static void r4k_flush_cache_sigtramp(unsigned long addr)
639 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
642 static void r4k_flush_icache_all(void)
644 if (cpu_has_vtag_icache)
648 static inline void rm7k_erratum31(void)
650 const unsigned long ic_lsize = 32;
653 /* RM7000 erratum #31. The icache is screwed at startup. */
657 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
658 __asm__ __volatile__ (
662 "cache\t%1, 0(%0)\n\t"
663 "cache\t%1, 0x1000(%0)\n\t"
664 "cache\t%1, 0x2000(%0)\n\t"
665 "cache\t%1, 0x3000(%0)\n\t"
666 "cache\t%2, 0(%0)\n\t"
667 "cache\t%2, 0x1000(%0)\n\t"
668 "cache\t%2, 0x2000(%0)\n\t"
669 "cache\t%2, 0x3000(%0)\n\t"
670 "cache\t%1, 0(%0)\n\t"
671 "cache\t%1, 0x1000(%0)\n\t"
672 "cache\t%1, 0x2000(%0)\n\t"
673 "cache\t%1, 0x3000(%0)\n\t"
676 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
680 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
681 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
684 static void __init probe_pcache(void)
686 struct cpuinfo_mips *c = ¤t_cpu_data;
687 unsigned int config = read_c0_config();
688 unsigned int prid = read_c0_prid();
689 unsigned long config1;
692 switch (c->cputype) {
693 case CPU_R4600: /* QED style two way caches? */
697 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
698 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
700 c->icache.waybit = __ffs(icache_size/2);
702 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
703 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
705 c->dcache.waybit= __ffs(dcache_size/2);
707 c->options |= MIPS_CPU_CACHE_CDEX_P;
712 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
713 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
717 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
718 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
720 c->dcache.waybit = 0;
722 c->options |= MIPS_CPU_CACHE_CDEX_P;
726 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
727 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
731 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
732 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
734 c->dcache.waybit = 0;
736 c->options |= MIPS_CPU_CACHE_CDEX_P;
737 c->options |= MIPS_CPU_PREFETCH;
747 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
748 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
750 c->icache.waybit = 0; /* doesn't matter */
752 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
753 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
755 c->dcache.waybit = 0; /* does not matter */
757 c->options |= MIPS_CPU_CACHE_CDEX_P;
763 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
764 c->icache.linesz = 64;
766 c->icache.waybit = 0;
768 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
769 c->dcache.linesz = 32;
771 c->dcache.waybit = 0;
773 c->options |= MIPS_CPU_PREFETCH;
777 write_c0_config(config & ~VR41_CONF_P4K);
779 /* Workaround for cache instruction bug of VR4131 */
780 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
781 c->processor_id == 0x0c82U) {
782 config |= 0x00400000U;
783 if (c->processor_id == 0x0c80U)
784 config |= VR41_CONF_BP;
785 write_c0_config(config);
787 c->options |= MIPS_CPU_CACHE_CDEX_P;
789 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
790 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
792 c->icache.waybit = __ffs(icache_size/2);
794 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
795 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
797 c->dcache.waybit = __ffs(dcache_size/2);
806 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
807 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
809 c->icache.waybit = 0; /* doesn't matter */
811 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
812 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
814 c->dcache.waybit = 0; /* does not matter */
816 c->options |= MIPS_CPU_CACHE_CDEX_P;
823 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
824 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
826 c->icache.waybit = __ffs(icache_size / c->icache.ways);
828 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
829 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
831 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
833 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
834 c->options |= MIPS_CPU_CACHE_CDEX_P;
836 c->options |= MIPS_CPU_PREFETCH;
840 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
841 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
846 c->icache.waybit = 0;
848 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
849 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
854 c->dcache.waybit = 0;
858 if (!(config & MIPS_CONF_M))
859 panic("Don't know how to probe P-caches on this cpu.");
862 * So we seem to be a MIPS32 or MIPS64 CPU
863 * So let's probe the I-cache ...
865 config1 = read_c0_config1();
867 if ((lsize = ((config1 >> 19) & 7)))
868 c->icache.linesz = 2 << lsize;
870 c->icache.linesz = lsize;
871 c->icache.sets = 64 << ((config1 >> 22) & 7);
872 c->icache.ways = 1 + ((config1 >> 16) & 7);
874 icache_size = c->icache.sets *
877 c->icache.waybit = __ffs(icache_size/c->icache.ways);
879 if (config & 0x8) /* VI bit */
880 c->icache.flags |= MIPS_CACHE_VTAG;
883 * Now probe the MIPS32 / MIPS64 data cache.
887 if ((lsize = ((config1 >> 10) & 7)))
888 c->dcache.linesz = 2 << lsize;
890 c->dcache.linesz= lsize;
891 c->dcache.sets = 64 << ((config1 >> 13) & 7);
892 c->dcache.ways = 1 + ((config1 >> 7) & 7);
894 dcache_size = c->dcache.sets *
897 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
899 c->options |= MIPS_CPU_PREFETCH;
904 * Processor configuration sanity check for the R4000SC erratum
905 * #5. With page sizes larger than 32kB there is no possibility
906 * to get a VCE exception anymore so we don't care about this
907 * misconfiguration. The case is rather theoretical anyway;
908 * presumably no vendor is shipping his hardware in the "bad"
911 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
912 !(config & CONF_SC) && c->icache.linesz != 16 &&
914 panic("Improper R4000SC processor configuration detected");
916 /* compute a couple of other cache variables */
917 c->icache.waysize = icache_size / c->icache.ways;
918 c->dcache.waysize = dcache_size / c->dcache.ways;
920 c->icache.sets = c->icache.linesz ?
921 icache_size / (c->icache.linesz * c->icache.ways) : 0;
922 c->dcache.sets = c->dcache.linesz ?
923 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
926 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
927 * 2-way virtually indexed so normally would suffer from aliases. So
928 * normally they'd suffer from aliases but magic in the hardware deals
929 * with that for us so we don't need to take care ourselves.
931 switch (c->cputype) {
936 c->dcache.flags |= MIPS_CACHE_PINDEX;
947 if ((read_c0_config7() & (1 << 16))) {
948 /* effectively physically indexed dcache,
949 thus no virtual aliases. */
950 c->dcache.flags |= MIPS_CACHE_PINDEX;
954 if (c->dcache.waysize > PAGE_SIZE)
955 c->dcache.flags |= MIPS_CACHE_ALIASES;
958 switch (c->cputype) {
961 * Some older 20Kc chips doesn't have the 'VI' bit in
962 * the config register.
964 c->icache.flags |= MIPS_CACHE_VTAG;
972 c->icache.flags |= MIPS_CACHE_IC_F_DC;
976 #ifdef CONFIG_CPU_LOONGSON2
978 * LOONGSON2 has 4 way icache, but when using indexed cache op,
979 * one op will act on all 4 ways
984 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
986 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
987 way_string[c->icache.ways], c->icache.linesz);
989 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
990 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
994 * If you even _breathe_ on this function, look at the gcc output and make sure
995 * it does not pop things on and off the stack for the cache sizing loop that
996 * executes in KSEG1 space or else you will crash and burn badly. You have
999 static int __init probe_scache(void)
1001 unsigned long flags, addr, begin, end, pow2;
1002 unsigned int config = read_c0_config();
1003 struct cpuinfo_mips *c = ¤t_cpu_data;
1006 if (config & CONF_SC)
1009 begin = (unsigned long) &_stext;
1010 begin &= ~((4 * 1024 * 1024) - 1);
1011 end = begin + (4 * 1024 * 1024);
1014 * This is such a bitch, you'd think they would make it easy to do
1015 * this. Away you daemons of stupidity!
1017 local_irq_save(flags);
1019 /* Fill each size-multiple cache line with a valid tag. */
1021 for (addr = begin; addr < end; addr = (begin + pow2)) {
1022 unsigned long *p = (unsigned long *) addr;
1023 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1027 /* Load first line with zero (therefore invalid) tag. */
1030 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1031 cache_op(Index_Store_Tag_I, begin);
1032 cache_op(Index_Store_Tag_D, begin);
1033 cache_op(Index_Store_Tag_SD, begin);
1035 /* Now search for the wrap around point. */
1036 pow2 = (128 * 1024);
1038 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1039 cache_op(Index_Load_Tag_SD, addr);
1040 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1041 if (!read_c0_taglo())
1045 local_irq_restore(flags);
1049 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1051 c->dcache.waybit = 0; /* does not matter */
1056 #if defined(CONFIG_CPU_LOONGSON2)
1057 static void __init loongson2_sc_init(void)
1059 struct cpuinfo_mips *c = ¤t_cpu_data;
1061 scache_size = 512*1024;
1062 c->scache.linesz = 32;
1064 c->scache.waybit = 0;
1065 c->scache.waysize = scache_size / (c->scache.ways);
1066 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1067 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1068 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1070 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1074 extern int r5k_sc_init(void);
1075 extern int rm7k_sc_init(void);
1076 extern int mips_sc_init(void);
1078 static void __init setup_scache(void)
1080 struct cpuinfo_mips *c = ¤t_cpu_data;
1081 unsigned int config = read_c0_config();
1085 * Do the probing thing on R4000SC and R4400SC processors. Other
1086 * processors don't have a S-cache that would be relevant to the
1087 * Linux memory managment.
1089 switch (c->cputype) {
1094 sc_present = run_uncached(probe_scache);
1096 c->options |= MIPS_CPU_CACHE_CDEX_S;
1102 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1103 c->scache.linesz = 64 << ((config >> 13) & 1);
1105 c->scache.waybit= 0;
1111 #ifdef CONFIG_R5000_CPU_SCACHE
1118 #ifdef CONFIG_RM7000_CPU_SCACHE
1123 #if defined(CONFIG_CPU_LOONGSON2)
1125 loongson2_sc_init();
1130 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1131 c->isa_level == MIPS_CPU_ISA_M32R2 ||
1132 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1133 c->isa_level == MIPS_CPU_ISA_M64R2) {
1134 #ifdef CONFIG_MIPS_CPU_SCACHE
1135 if (mips_sc_init ()) {
1136 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1137 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1139 way_string[c->scache.ways], c->scache.linesz);
1142 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1143 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1153 /* compute a couple of other cache variables */
1154 c->scache.waysize = scache_size / c->scache.ways;
1156 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1158 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1159 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1161 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1164 void au1x00_fixup_config_od(void)
1167 * c0_config.od (bit 19) was write only (and read as 0)
1168 * on the early revisions of Alchemy SOCs. It disables the bus
1169 * transaction overlapping and needs to be set to fix various errata.
1171 switch (read_c0_prid()) {
1172 case 0x00030100: /* Au1000 DA */
1173 case 0x00030201: /* Au1000 HA */
1174 case 0x00030202: /* Au1000 HB */
1175 case 0x01030200: /* Au1500 AB */
1177 * Au1100 errata actually keeps silence about this bit, so we set it
1178 * just in case for those revisions that require it to be set according
1179 * to arch/mips/au1000/common/cputable.c
1181 case 0x02030200: /* Au1100 AB */
1182 case 0x02030201: /* Au1100 BA */
1183 case 0x02030202: /* Au1100 BC */
1184 set_c0_config(1 << 19);
1189 static void __init coherency_setup(void)
1191 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1194 * c0_status.cu=0 specifies that updates by the sc instruction use
1195 * the coherency mode specified by the TLB; 1 means cachable
1196 * coherent update on write will be used. Not all processors have
1197 * this bit and; some wire it to zero, others like Toshiba had the
1198 * silly idea of putting something else there ...
1200 switch (current_cpu_type()) {
1207 clear_c0_config(CONF_CU);
1210 * We need to catch the early Alchemy SOCs with
1211 * the write-only co_config.od bit and set it back to one...
1213 case CPU_AU1000: /* rev. DA, HA, HB */
1214 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1215 case CPU_AU1500: /* rev. AB */
1216 au1x00_fixup_config_od();
1221 void __init r4k_cache_init(void)
1223 extern void build_clear_page(void);
1224 extern void build_copy_page(void);
1225 extern char __weak except_vec2_generic;
1226 extern char __weak except_vec2_sb1;
1227 struct cpuinfo_mips *c = ¤t_cpu_data;
1229 switch (c->cputype) {
1232 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1236 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1243 r4k_blast_dcache_page_setup();
1244 r4k_blast_dcache_page_indexed_setup();
1245 r4k_blast_dcache_setup();
1246 r4k_blast_icache_page_setup();
1247 r4k_blast_icache_page_indexed_setup();
1248 r4k_blast_icache_setup();
1249 r4k_blast_scache_page_setup();
1250 r4k_blast_scache_page_indexed_setup();
1251 r4k_blast_scache_setup();
1254 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1255 * This code supports virtually indexed processors and will be
1256 * unnecessarily inefficient on physically indexed processors.
1258 if (c->dcache.linesz)
1259 shm_align_mask = max_t( unsigned long,
1260 c->dcache.sets * c->dcache.linesz - 1,
1263 shm_align_mask = PAGE_SIZE-1;
1264 flush_cache_all = cache_noop;
1265 __flush_cache_all = r4k___flush_cache_all;
1266 flush_cache_mm = r4k_flush_cache_mm;
1267 flush_cache_page = r4k_flush_cache_page;
1268 flush_cache_range = r4k_flush_cache_range;
1270 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1271 flush_icache_all = r4k_flush_icache_all;
1272 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1273 flush_data_cache_page = r4k_flush_data_cache_page;
1274 flush_icache_range = r4k_flush_icache_range;
1276 #ifdef CONFIG_DMA_NONCOHERENT
1277 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1278 _dma_cache_wback = r4k_dma_cache_wback_inv;
1279 _dma_cache_inv = r4k_dma_cache_inv;
1284 local_r4k___flush_cache_all(NULL);