2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/bitops.h>
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cache.h>
20 #include <asm/cacheops.h>
22 #include <asm/cpu-features.h>
25 #include <asm/pgtable.h>
26 #include <asm/r4kcache.h>
27 #include <asm/system.h>
28 #include <asm/mmu_context.h>
30 #include <asm/cacheflush.h> /* for run_uncached() */
35 static unsigned long icache_size __read_mostly;
36 static unsigned long dcache_size __read_mostly;
37 static unsigned long scache_size __read_mostly;
40 * Dummy cache handling routines for machines without boardcaches
42 static void no_sc_noop(void) {}
44 static struct bcache_ops no_sc_ops = {
45 .bc_enable = (void *)no_sc_noop,
46 .bc_disable = (void *)no_sc_noop,
47 .bc_wback_inv = (void *)no_sc_noop,
48 .bc_inv = (void *)no_sc_noop
51 struct bcache_ops *bcops = &no_sc_ops;
53 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
54 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
56 #define R4600_HIT_CACHEOP_WAR_IMPL \
58 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
59 *(volatile unsigned long *)CKSEG1; \
60 if (R4600_V1_HIT_CACHEOP_WAR) \
61 __asm__ __volatile__("nop;nop;nop;nop"); \
64 static void (*r4k_blast_dcache_page)(unsigned long addr);
66 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
68 R4600_HIT_CACHEOP_WAR_IMPL;
69 blast_dcache32_page(addr);
72 static inline void r4k_blast_dcache_page_setup(void)
74 unsigned long dc_lsize = cpu_dcache_line_size();
77 r4k_blast_dcache_page = blast_dcache16_page;
78 else if (dc_lsize == 32)
79 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
82 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
84 static inline void r4k_blast_dcache_page_indexed_setup(void)
86 unsigned long dc_lsize = cpu_dcache_line_size();
89 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
90 else if (dc_lsize == 32)
91 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
94 static void (* r4k_blast_dcache)(void);
96 static inline void r4k_blast_dcache_setup(void)
98 unsigned long dc_lsize = cpu_dcache_line_size();
101 r4k_blast_dcache = blast_dcache16;
102 else if (dc_lsize == 32)
103 r4k_blast_dcache = blast_dcache32;
106 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
107 #define JUMP_TO_ALIGN(order) \
108 __asm__ __volatile__( \
110 ".align\t" #order "\n\t" \
113 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
114 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
116 static inline void blast_r4600_v1_icache32(void)
120 local_irq_save(flags);
122 local_irq_restore(flags);
125 static inline void tx49_blast_icache32(void)
127 unsigned long start = INDEX_BASE;
128 unsigned long end = start + current_cpu_data.icache.waysize;
129 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
130 unsigned long ws_end = current_cpu_data.icache.ways <<
131 current_cpu_data.icache.waybit;
132 unsigned long ws, addr;
134 CACHE32_UNROLL32_ALIGN2;
135 /* I'm in even chunk. blast odd chunks */
136 for (ws = 0; ws < ws_end; ws += ws_inc)
137 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
138 cache32_unroll32(addr|ws,Index_Invalidate_I);
139 CACHE32_UNROLL32_ALIGN;
140 /* I'm in odd chunk. blast even chunks */
141 for (ws = 0; ws < ws_end; ws += ws_inc)
142 for (addr = start; addr < end; addr += 0x400 * 2)
143 cache32_unroll32(addr|ws,Index_Invalidate_I);
146 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
150 local_irq_save(flags);
151 blast_icache32_page_indexed(page);
152 local_irq_restore(flags);
155 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
157 unsigned long start = page;
158 unsigned long end = start + PAGE_SIZE;
159 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
160 unsigned long ws_end = current_cpu_data.icache.ways <<
161 current_cpu_data.icache.waybit;
162 unsigned long ws, addr;
164 CACHE32_UNROLL32_ALIGN2;
165 /* I'm in even chunk. blast odd chunks */
166 for (ws = 0; ws < ws_end; ws += ws_inc)
167 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
168 cache32_unroll32(addr|ws,Index_Invalidate_I);
169 CACHE32_UNROLL32_ALIGN;
170 /* I'm in odd chunk. blast even chunks */
171 for (ws = 0; ws < ws_end; ws += ws_inc)
172 for (addr = start; addr < end; addr += 0x400 * 2)
173 cache32_unroll32(addr|ws,Index_Invalidate_I);
176 static void (* r4k_blast_icache_page)(unsigned long addr);
178 static inline void r4k_blast_icache_page_setup(void)
180 unsigned long ic_lsize = cpu_icache_line_size();
183 r4k_blast_icache_page = blast_icache16_page;
184 else if (ic_lsize == 32)
185 r4k_blast_icache_page = blast_icache32_page;
186 else if (ic_lsize == 64)
187 r4k_blast_icache_page = blast_icache64_page;
191 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
193 static inline void r4k_blast_icache_page_indexed_setup(void)
195 unsigned long ic_lsize = cpu_icache_line_size();
198 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
199 else if (ic_lsize == 32) {
200 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
201 r4k_blast_icache_page_indexed =
202 blast_icache32_r4600_v1_page_indexed;
203 else if (TX49XX_ICACHE_INDEX_INV_WAR)
204 r4k_blast_icache_page_indexed =
205 tx49_blast_icache32_page_indexed;
207 r4k_blast_icache_page_indexed =
208 blast_icache32_page_indexed;
209 } else if (ic_lsize == 64)
210 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
213 static void (* r4k_blast_icache)(void);
215 static inline void r4k_blast_icache_setup(void)
217 unsigned long ic_lsize = cpu_icache_line_size();
220 r4k_blast_icache = blast_icache16;
221 else if (ic_lsize == 32) {
222 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
223 r4k_blast_icache = blast_r4600_v1_icache32;
224 else if (TX49XX_ICACHE_INDEX_INV_WAR)
225 r4k_blast_icache = tx49_blast_icache32;
227 r4k_blast_icache = blast_icache32;
228 } else if (ic_lsize == 64)
229 r4k_blast_icache = blast_icache64;
232 static void (* r4k_blast_scache_page)(unsigned long addr);
234 static inline void r4k_blast_scache_page_setup(void)
236 unsigned long sc_lsize = cpu_scache_line_size();
239 r4k_blast_scache_page = blast_scache16_page;
240 else if (sc_lsize == 32)
241 r4k_blast_scache_page = blast_scache32_page;
242 else if (sc_lsize == 64)
243 r4k_blast_scache_page = blast_scache64_page;
244 else if (sc_lsize == 128)
245 r4k_blast_scache_page = blast_scache128_page;
248 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
250 static inline void r4k_blast_scache_page_indexed_setup(void)
252 unsigned long sc_lsize = cpu_scache_line_size();
255 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
256 else if (sc_lsize == 32)
257 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
258 else if (sc_lsize == 64)
259 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
260 else if (sc_lsize == 128)
261 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
264 static void (* r4k_blast_scache)(void);
266 static inline void r4k_blast_scache_setup(void)
268 unsigned long sc_lsize = cpu_scache_line_size();
271 r4k_blast_scache = blast_scache16;
272 else if (sc_lsize == 32)
273 r4k_blast_scache = blast_scache32;
274 else if (sc_lsize == 64)
275 r4k_blast_scache = blast_scache64;
276 else if (sc_lsize == 128)
277 r4k_blast_scache = blast_scache128;
281 * This is former mm's flush_cache_all() which really should be
282 * flush_cache_vunmap these days ...
284 static inline void local_r4k_flush_cache_all(void * args)
290 static void r4k_flush_cache_all(void)
292 if (!cpu_has_dc_aliases)
295 on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
298 static inline void local_r4k___flush_cache_all(void * args)
303 switch (current_cpu_data.cputype) {
314 static void r4k___flush_cache_all(void)
316 on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
319 static inline void local_r4k_flush_cache_range(void * args)
321 struct vm_area_struct *vma = args;
324 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
327 exec = vma->vm_flags & VM_EXEC;
328 if (cpu_has_dc_aliases || exec)
334 static void r4k_flush_cache_range(struct vm_area_struct *vma,
335 unsigned long start, unsigned long end)
337 on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
340 static inline void local_r4k_flush_cache_mm(void * args)
342 struct mm_struct *mm = args;
344 if (!cpu_context(smp_processor_id(), mm))
351 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
352 * only flush the primary caches but R10000 and R12000 behave sane ...
354 if (current_cpu_data.cputype == CPU_R4000SC ||
355 current_cpu_data.cputype == CPU_R4000MC ||
356 current_cpu_data.cputype == CPU_R4400SC ||
357 current_cpu_data.cputype == CPU_R4400MC)
361 static void r4k_flush_cache_mm(struct mm_struct *mm)
363 if (!cpu_has_dc_aliases)
366 on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
369 struct flush_cache_page_args {
370 struct vm_area_struct *vma;
374 static inline void local_r4k_flush_cache_page(void *args)
376 struct flush_cache_page_args *fcp_args = args;
377 struct vm_area_struct *vma = fcp_args->vma;
378 unsigned long addr = fcp_args->addr;
379 int exec = vma->vm_flags & VM_EXEC;
380 struct mm_struct *mm = vma->vm_mm;
387 * If ownes no valid ASID yet, cannot possibly have gotten
388 * this page into the cache.
390 if (cpu_context(smp_processor_id(), mm) == 0)
394 pgdp = pgd_offset(mm, addr);
395 pudp = pud_offset(pgdp, addr);
396 pmdp = pmd_offset(pudp, addr);
397 ptep = pte_offset(pmdp, addr);
400 * If the page isn't marked valid, the page cannot possibly be
403 if (!(pte_val(*ptep) & _PAGE_PRESENT))
407 * Doing flushes for another ASID than the current one is
408 * too difficult since stupid R4k caches do a TLB translation
409 * for every cache flush operation. So we do indexed flushes
410 * in that case, which doesn't overly flush the cache too much.
412 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
413 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
414 r4k_blast_dcache_page(addr);
415 if (exec && !cpu_icache_snoops_remote_store)
416 r4k_blast_scache_page(addr);
419 r4k_blast_icache_page(addr);
425 * Do indexed flush, too much work to get the (possible) TLB refills
428 addr = INDEX_BASE + (addr & (dcache_size - 1));
429 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
430 r4k_blast_dcache_page_indexed(addr);
431 if (exec && !cpu_icache_snoops_remote_store)
432 r4k_blast_scache_page_indexed(addr);
435 if (cpu_has_vtag_icache) {
436 int cpu = smp_processor_id();
438 if (cpu_context(cpu, mm) != 0)
439 drop_mmu_context(mm, cpu);
441 r4k_blast_icache_page_indexed(addr);
445 static void r4k_flush_cache_page(struct vm_area_struct *vma,
446 unsigned long addr, unsigned long pfn)
448 struct flush_cache_page_args args;
453 on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
456 static inline void local_r4k_flush_data_cache_page(void * addr)
458 r4k_blast_dcache_page((unsigned long) addr);
461 static void r4k_flush_data_cache_page(unsigned long addr)
463 on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
466 struct flush_icache_range_args {
467 unsigned long __user start;
468 unsigned long __user end;
471 static inline void local_r4k_flush_icache_range(void *args)
473 struct flush_icache_range_args *fir_args = args;
474 unsigned long dc_lsize = cpu_dcache_line_size();
475 unsigned long ic_lsize = cpu_icache_line_size();
476 unsigned long sc_lsize = cpu_scache_line_size();
477 unsigned long start = fir_args->start;
478 unsigned long end = fir_args->end;
479 unsigned long addr, aend;
481 if (!cpu_has_ic_fills_f_dc) {
482 if (end - start > dcache_size) {
485 R4600_HIT_CACHEOP_WAR_IMPL;
486 addr = start & ~(dc_lsize - 1);
487 aend = (end - 1) & ~(dc_lsize - 1);
490 /* Hit_Writeback_Inv_D */
491 protected_writeback_dcache_line(addr);
498 if (!cpu_icache_snoops_remote_store) {
499 if (end - start > scache_size) {
502 addr = start & ~(sc_lsize - 1);
503 aend = (end - 1) & ~(sc_lsize - 1);
506 /* Hit_Writeback_Inv_SD */
507 protected_writeback_scache_line(addr);
516 if (end - start > icache_size)
519 addr = start & ~(ic_lsize - 1);
520 aend = (end - 1) & ~(ic_lsize - 1);
522 /* Hit_Invalidate_I */
523 protected_flush_icache_line(addr);
531 static void r4k_flush_icache_range(unsigned long __user start,
532 unsigned long __user end)
534 struct flush_icache_range_args args;
539 on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
540 instruction_hazard();
544 * Ok, this seriously sucks. We use them to flush a user page but don't
545 * know the virtual address, so we have to blast away the whole icache
546 * which is significantly more expensive than the real thing. Otoh we at
547 * least know the kernel address of the page so we can flush it
551 struct flush_icache_page_args {
552 struct vm_area_struct *vma;
556 static inline void local_r4k_flush_icache_page(void *args)
558 struct flush_icache_page_args *fip_args = args;
559 struct vm_area_struct *vma = fip_args->vma;
560 struct page *page = fip_args->page;
563 * Tricky ... Because we don't know the virtual address we've got the
564 * choice of either invalidating the entire primary and secondary
565 * caches or invalidating the secondary caches also. With the subset
566 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
567 * secondary cache will result in any entries in the primary caches
568 * also getting invalidated which hopefully is a bit more economical.
570 if (cpu_has_subset_pcaches) {
571 unsigned long addr = (unsigned long) page_address(page);
573 r4k_blast_scache_page(addr);
574 ClearPageDcacheDirty(page);
579 if (!cpu_has_ic_fills_f_dc) {
580 unsigned long addr = (unsigned long) page_address(page);
581 r4k_blast_dcache_page(addr);
582 if (!cpu_icache_snoops_remote_store)
583 r4k_blast_scache_page(addr);
584 ClearPageDcacheDirty(page);
588 * We're not sure of the virtual address(es) involved here, so
589 * we have to flush the entire I-cache.
591 if (cpu_has_vtag_icache) {
592 int cpu = smp_processor_id();
594 if (cpu_context(cpu, vma->vm_mm) != 0)
595 drop_mmu_context(vma->vm_mm, cpu);
600 static void r4k_flush_icache_page(struct vm_area_struct *vma,
603 struct flush_icache_page_args args;
606 * If there's no context yet, or the page isn't executable, no I-cache
609 if (!(vma->vm_flags & VM_EXEC))
615 on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
619 #ifdef CONFIG_DMA_NONCOHERENT
621 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
623 unsigned long end, a;
625 /* Catch bad driver code */
628 if (cpu_has_subset_pcaches) {
629 unsigned long sc_lsize = cpu_scache_line_size();
631 if (size >= scache_size) {
636 a = addr & ~(sc_lsize - 1);
637 end = (addr + size - 1) & ~(sc_lsize - 1);
639 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
648 * Either no secondary cache or the available caches don't have the
649 * subset property so we have to flush the primary caches
652 if (size >= dcache_size) {
655 unsigned long dc_lsize = cpu_dcache_line_size();
657 R4600_HIT_CACHEOP_WAR_IMPL;
658 a = addr & ~(dc_lsize - 1);
659 end = (addr + size - 1) & ~(dc_lsize - 1);
661 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
668 bc_wback_inv(addr, size);
671 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
673 unsigned long end, a;
675 /* Catch bad driver code */
678 if (cpu_has_subset_pcaches) {
679 unsigned long sc_lsize = cpu_scache_line_size();
681 if (size >= scache_size) {
686 a = addr & ~(sc_lsize - 1);
687 end = (addr + size - 1) & ~(sc_lsize - 1);
689 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
697 if (size >= dcache_size) {
700 unsigned long dc_lsize = cpu_dcache_line_size();
702 R4600_HIT_CACHEOP_WAR_IMPL;
703 a = addr & ~(dc_lsize - 1);
704 end = (addr + size - 1) & ~(dc_lsize - 1);
706 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
715 #endif /* CONFIG_DMA_NONCOHERENT */
718 * While we're protected against bad userland addresses we don't care
719 * very much about what happens in that case. Usually a segmentation
720 * fault will dump the process later on anyway ...
722 static void local_r4k_flush_cache_sigtramp(void * arg)
724 unsigned long ic_lsize = cpu_icache_line_size();
725 unsigned long dc_lsize = cpu_dcache_line_size();
726 unsigned long sc_lsize = cpu_scache_line_size();
727 unsigned long addr = (unsigned long) arg;
729 R4600_HIT_CACHEOP_WAR_IMPL;
730 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
731 if (!cpu_icache_snoops_remote_store)
732 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
733 protected_flush_icache_line(addr & ~(ic_lsize - 1));
734 if (MIPS4K_ICACHE_REFILL_WAR) {
735 __asm__ __volatile__ (
750 : "i" (Hit_Invalidate_I));
752 if (MIPS_CACHE_SYNC_WAR)
753 __asm__ __volatile__ ("sync");
756 static void r4k_flush_cache_sigtramp(unsigned long addr)
758 on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
761 static void r4k_flush_icache_all(void)
763 if (cpu_has_vtag_icache)
767 static inline void rm7k_erratum31(void)
769 const unsigned long ic_lsize = 32;
772 /* RM7000 erratum #31. The icache is screwed at startup. */
776 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
777 __asm__ __volatile__ (
781 "cache\t%1, 0(%0)\n\t"
782 "cache\t%1, 0x1000(%0)\n\t"
783 "cache\t%1, 0x2000(%0)\n\t"
784 "cache\t%1, 0x3000(%0)\n\t"
785 "cache\t%2, 0(%0)\n\t"
786 "cache\t%2, 0x1000(%0)\n\t"
787 "cache\t%2, 0x2000(%0)\n\t"
788 "cache\t%2, 0x3000(%0)\n\t"
789 "cache\t%1, 0(%0)\n\t"
790 "cache\t%1, 0x1000(%0)\n\t"
791 "cache\t%1, 0x2000(%0)\n\t"
792 "cache\t%1, 0x3000(%0)\n\t"
795 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
799 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
800 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
803 static void __init probe_pcache(void)
805 struct cpuinfo_mips *c = ¤t_cpu_data;
806 unsigned int config = read_c0_config();
807 unsigned int prid = read_c0_prid();
808 unsigned long config1;
811 switch (c->cputype) {
812 case CPU_R4600: /* QED style two way caches? */
816 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
817 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
819 c->icache.waybit = ffs(icache_size/2) - 1;
821 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
822 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
824 c->dcache.waybit= ffs(dcache_size/2) - 1;
826 c->options |= MIPS_CPU_CACHE_CDEX_P;
831 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
832 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
836 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
837 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
839 c->dcache.waybit = 0;
841 c->options |= MIPS_CPU_CACHE_CDEX_P;
845 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
846 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
850 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
851 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
853 c->dcache.waybit = 0;
855 c->options |= MIPS_CPU_CACHE_CDEX_P;
865 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
866 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
868 c->icache.waybit = 0; /* doesn't matter */
870 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
871 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
873 c->dcache.waybit = 0; /* does not matter */
875 c->options |= MIPS_CPU_CACHE_CDEX_P;
880 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
881 c->icache.linesz = 64;
883 c->icache.waybit = 0;
885 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
886 c->dcache.linesz = 32;
888 c->dcache.waybit = 0;
890 c->options |= MIPS_CPU_PREFETCH;
894 write_c0_config(config & ~CONF_EB);
896 /* Workaround for cache instruction bug of VR4131 */
897 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
898 c->processor_id == 0x0c82U) {
899 config &= ~0x00000030U;
900 config |= 0x00410000U;
901 write_c0_config(config);
903 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
904 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
906 c->icache.waybit = ffs(icache_size/2) - 1;
908 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
909 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
911 c->dcache.waybit = ffs(dcache_size/2) - 1;
913 c->options |= MIPS_CPU_CACHE_CDEX_P;
922 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
923 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
925 c->icache.waybit = 0; /* doesn't matter */
927 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
928 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
930 c->dcache.waybit = 0; /* does not matter */
932 c->options |= MIPS_CPU_CACHE_CDEX_P;
939 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
940 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
942 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
944 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
945 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
947 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
949 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
950 c->options |= MIPS_CPU_CACHE_CDEX_P;
952 c->options |= MIPS_CPU_PREFETCH;
956 if (!(config & MIPS_CONF_M))
957 panic("Don't know how to probe P-caches on this cpu.");
960 * So we seem to be a MIPS32 or MIPS64 CPU
961 * So let's probe the I-cache ...
963 config1 = read_c0_config1();
965 if ((lsize = ((config1 >> 19) & 7)))
966 c->icache.linesz = 2 << lsize;
968 c->icache.linesz = lsize;
969 c->icache.sets = 64 << ((config1 >> 22) & 7);
970 c->icache.ways = 1 + ((config1 >> 16) & 7);
972 icache_size = c->icache.sets *
975 c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
977 if (config & 0x8) /* VI bit */
978 c->icache.flags |= MIPS_CACHE_VTAG;
981 * Now probe the MIPS32 / MIPS64 data cache.
985 if ((lsize = ((config1 >> 10) & 7)))
986 c->dcache.linesz = 2 << lsize;
988 c->dcache.linesz= lsize;
989 c->dcache.sets = 64 << ((config1 >> 13) & 7);
990 c->dcache.ways = 1 + ((config1 >> 7) & 7);
992 dcache_size = c->dcache.sets *
995 c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
997 c->options |= MIPS_CPU_PREFETCH;
1002 * Processor configuration sanity check for the R4000SC erratum
1003 * #5. With page sizes larger than 32kB there is no possibility
1004 * to get a VCE exception anymore so we don't care about this
1005 * misconfiguration. The case is rather theoretical anyway;
1006 * presumably no vendor is shipping his hardware in the "bad"
1009 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
1010 !(config & CONF_SC) && c->icache.linesz != 16 &&
1011 PAGE_SIZE <= 0x8000)
1012 panic("Improper R4000SC processor configuration detected");
1014 /* compute a couple of other cache variables */
1015 c->icache.waysize = icache_size / c->icache.ways;
1016 c->dcache.waysize = dcache_size / c->dcache.ways;
1018 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
1019 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
1022 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
1023 * 2-way virtually indexed so normally would suffer from aliases. So
1024 * normally they'd suffer from aliases but magic in the hardware deals
1025 * with that for us so we don't need to take care ourselves.
1027 switch (c->cputype) {
1035 if (!(read_c0_config7() & (1 << 16)))
1037 if (c->dcache.waysize > PAGE_SIZE)
1038 c->dcache.flags |= MIPS_CACHE_ALIASES;
1041 switch (c->cputype) {
1044 * Some older 20Kc chips doesn't have the 'VI' bit in
1045 * the config register.
1047 c->icache.flags |= MIPS_CACHE_VTAG;
1055 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1059 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1061 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
1062 way_string[c->icache.ways], c->icache.linesz);
1064 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
1065 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
1069 * If you even _breathe_ on this function, look at the gcc output and make sure
1070 * it does not pop things on and off the stack for the cache sizing loop that
1071 * executes in KSEG1 space or else you will crash and burn badly. You have
1074 static int __init probe_scache(void)
1076 extern unsigned long stext;
1077 unsigned long flags, addr, begin, end, pow2;
1078 unsigned int config = read_c0_config();
1079 struct cpuinfo_mips *c = ¤t_cpu_data;
1082 if (config & CONF_SC)
1085 begin = (unsigned long) &stext;
1086 begin &= ~((4 * 1024 * 1024) - 1);
1087 end = begin + (4 * 1024 * 1024);
1090 * This is such a bitch, you'd think they would make it easy to do
1091 * this. Away you daemons of stupidity!
1093 local_irq_save(flags);
1095 /* Fill each size-multiple cache line with a valid tag. */
1097 for (addr = begin; addr < end; addr = (begin + pow2)) {
1098 unsigned long *p = (unsigned long *) addr;
1099 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1103 /* Load first line with zero (therefore invalid) tag. */
1106 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1107 cache_op(Index_Store_Tag_I, begin);
1108 cache_op(Index_Store_Tag_D, begin);
1109 cache_op(Index_Store_Tag_SD, begin);
1111 /* Now search for the wrap around point. */
1112 pow2 = (128 * 1024);
1114 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1115 cache_op(Index_Load_Tag_SD, addr);
1116 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1117 if (!read_c0_taglo())
1121 local_irq_restore(flags);
1125 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1127 c->dcache.waybit = 0; /* does not matter */
1132 extern int r5k_sc_init(void);
1133 extern int rm7k_sc_init(void);
1135 static void __init setup_scache(void)
1137 struct cpuinfo_mips *c = ¤t_cpu_data;
1138 unsigned int config = read_c0_config();
1142 * Do the probing thing on R4000SC and R4400SC processors. Other
1143 * processors don't have a S-cache that would be relevant to the
1144 * Linux memory managment.
1146 switch (c->cputype) {
1151 sc_present = run_uncached(probe_scache);
1153 c->options |= MIPS_CPU_CACHE_CDEX_S;
1158 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1159 c->scache.linesz = 64 << ((config >> 13) & 1);
1161 c->scache.waybit= 0;
1167 #ifdef CONFIG_R5000_CPU_SCACHE
1174 #ifdef CONFIG_RM7000_CPU_SCACHE
1186 if ((c->isa_level == MIPS_CPU_ISA_M32 ||
1187 c->isa_level == MIPS_CPU_ISA_M64) &&
1188 !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1189 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1191 /* compute a couple of other cache variables */
1192 c->scache.waysize = scache_size / c->scache.ways;
1194 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1196 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1197 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1199 c->options |= MIPS_CPU_SUBSET_CACHES;
1202 static inline void coherency_setup(void)
1204 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1207 * c0_status.cu=0 specifies that updates by the sc instruction use
1208 * the coherency mode specified by the TLB; 1 means cachable
1209 * coherent update on write will be used. Not all processors have
1210 * this bit and; some wire it to zero, others like Toshiba had the
1211 * silly idea of putting something else there ...
1213 switch (current_cpu_data.cputype) {
1220 clear_c0_config(CONF_CU);
1225 void __init r4k_cache_init(void)
1227 extern void build_clear_page(void);
1228 extern void build_copy_page(void);
1229 extern char except_vec2_generic;
1230 struct cpuinfo_mips *c = ¤t_cpu_data;
1232 /* Default cache error handler for R4000 and R5000 family */
1233 set_uncached_handler (0x100, &except_vec2_generic, 0x80);
1238 r4k_blast_dcache_page_setup();
1239 r4k_blast_dcache_page_indexed_setup();
1240 r4k_blast_dcache_setup();
1241 r4k_blast_icache_page_setup();
1242 r4k_blast_icache_page_indexed_setup();
1243 r4k_blast_icache_setup();
1244 r4k_blast_scache_page_setup();
1245 r4k_blast_scache_page_indexed_setup();
1246 r4k_blast_scache_setup();
1249 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1250 * This code supports virtually indexed processors and will be
1251 * unnecessarily inefficient on physically indexed processors.
1253 shm_align_mask = max_t( unsigned long,
1254 c->dcache.sets * c->dcache.linesz - 1,
1257 flush_cache_all = r4k_flush_cache_all;
1258 __flush_cache_all = r4k___flush_cache_all;
1259 flush_cache_mm = r4k_flush_cache_mm;
1260 flush_cache_page = r4k_flush_cache_page;
1261 flush_icache_page = r4k_flush_icache_page;
1262 flush_cache_range = r4k_flush_cache_range;
1264 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1265 flush_icache_all = r4k_flush_icache_all;
1266 flush_data_cache_page = r4k_flush_data_cache_page;
1267 flush_icache_range = r4k_flush_icache_range;
1269 #ifdef CONFIG_DMA_NONCOHERENT
1270 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1271 _dma_cache_wback = r4k_dma_cache_wback_inv;
1272 _dma_cache_inv = r4k_dma_cache_inv;
1277 local_r4k___flush_cache_all(NULL);