2 * r2300.c: R2000 and R3000 specific mmu/cache code.
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
6 * with a lot of changes to make this thing work for R3000s
7 * Tx39XX R4k style caches added. HK
8 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
16 #include <asm/cacheops.h>
18 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
20 #include <asm/system.h>
21 #include <asm/isadep.h>
23 #include <asm/bootinfo.h>
26 /* For R3000 cores with R4000 style caches */
27 static unsigned long icache_size, dcache_size; /* Size in bytes */
29 #include <asm/r4kcache.h>
31 extern int r3k_have_wired_reg; /* in r3k-tlb.c */
33 /* This sequence is required to ensure icache is disabled immediately */
34 #define TX39_STOP_STREAMING() \
35 __asm__ __volatile__( \
37 ".set noreorder\n\t" \
44 /* TX39H-style cache flush routines. */
45 static void tx39h_flush_icache_all(void)
47 unsigned long flags, config;
49 /* disable icache (set ICE#) */
50 local_irq_save(flags);
51 config = read_c0_conf();
52 write_c0_conf(config & ~TX39_CONF_ICE);
53 TX39_STOP_STREAMING();
55 write_c0_conf(config);
56 local_irq_restore(flags);
59 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
61 /* Catch bad driver code */
65 blast_inv_dcache_range(addr, addr + size);
70 static inline void tx39_blast_dcache_page(unsigned long addr)
72 if (current_cpu_data.cputype != CPU_TX3912)
73 blast_dcache16_page(addr);
76 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
78 blast_dcache16_page_indexed(addr);
81 static inline void tx39_blast_dcache(void)
86 static inline void tx39_blast_icache_page(unsigned long addr)
88 unsigned long flags, config;
89 /* disable icache (set ICE#) */
90 local_irq_save(flags);
91 config = read_c0_conf();
92 write_c0_conf(config & ~TX39_CONF_ICE);
93 TX39_STOP_STREAMING();
94 blast_icache16_page(addr);
95 write_c0_conf(config);
96 local_irq_restore(flags);
99 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
101 unsigned long flags, config;
102 /* disable icache (set ICE#) */
103 local_irq_save(flags);
104 config = read_c0_conf();
105 write_c0_conf(config & ~TX39_CONF_ICE);
106 TX39_STOP_STREAMING();
107 blast_icache16_page_indexed(addr);
108 write_c0_conf(config);
109 local_irq_restore(flags);
112 static inline void tx39_blast_icache(void)
114 unsigned long flags, config;
115 /* disable icache (set ICE#) */
116 local_irq_save(flags);
117 config = read_c0_conf();
118 write_c0_conf(config & ~TX39_CONF_ICE);
119 TX39_STOP_STREAMING();
121 write_c0_conf(config);
122 local_irq_restore(flags);
125 static inline void tx39_flush_cache_all(void)
127 if (!cpu_has_dc_aliases)
134 static inline void tx39___flush_cache_all(void)
140 static void tx39_flush_cache_mm(struct mm_struct *mm)
142 if (!cpu_has_dc_aliases)
145 if (cpu_context(smp_processor_id(), mm) != 0) {
146 tx39_flush_cache_all();
150 static void tx39_flush_cache_range(struct vm_area_struct *vma,
151 unsigned long start, unsigned long end)
155 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
158 exec = vma->vm_flags & VM_EXEC;
159 if (cpu_has_dc_aliases || exec)
165 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
167 int exec = vma->vm_flags & VM_EXEC;
168 struct mm_struct *mm = vma->vm_mm;
175 * If ownes no valid ASID yet, cannot possibly have gotten
176 * this page into the cache.
178 if (cpu_context(smp_processor_id(), mm) == 0)
182 pgdp = pgd_offset(mm, page);
183 pudp = pud_offset(pgdp, page);
184 pmdp = pmd_offset(pudp, page);
185 ptep = pte_offset(pmdp, page);
188 * If the page isn't marked valid, the page cannot possibly be
191 if (!(pte_val(*ptep) & _PAGE_PRESENT))
195 * Doing flushes for another ASID than the current one is
196 * too difficult since stupid R4k caches do a TLB translation
197 * for every cache flush operation. So we do indexed flushes
198 * in that case, which doesn't overly flush the cache too much.
200 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
201 if (cpu_has_dc_aliases || exec)
202 tx39_blast_dcache_page(page);
204 tx39_blast_icache_page(page);
210 * Do indexed flush, too much work to get the (possible) TLB refills
213 page = (KSEG0 + (page & (dcache_size - 1)));
214 if (cpu_has_dc_aliases || exec)
215 tx39_blast_dcache_page_indexed(page);
217 tx39_blast_icache_page_indexed(page);
220 static void tx39_flush_data_cache_page(unsigned long addr)
222 tx39_blast_dcache_page(addr);
225 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
227 if (end - start > dcache_size)
230 protected_blast_dcache_range(start, end);
232 if (end - start > icache_size)
235 unsigned long flags, config;
236 /* disable icache (set ICE#) */
237 local_irq_save(flags);
238 config = read_c0_conf();
239 write_c0_conf(config & ~TX39_CONF_ICE);
240 TX39_STOP_STREAMING();
241 protected_blast_icache_range(start, end);
242 write_c0_conf(config);
243 local_irq_restore(flags);
248 * Ok, this seriously sucks. We use them to flush a user page but don't
249 * know the virtual address, so we have to blast away the whole icache
250 * which is significantly more expensive than the real thing. Otoh we at
251 * least know the kernel address of the page so we can flush it
254 static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page)
258 * If there's no context yet, or the page isn't executable, no icache
261 if (!(vma->vm_flags & VM_EXEC))
264 addr = (unsigned long) page_address(page);
265 tx39_blast_dcache_page(addr);
268 * We're not sure of the virtual address(es) involved here, so
269 * we have to flush the entire I-cache.
274 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
278 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
281 tx39_blast_dcache_page(addr);
283 } while(addr != end);
284 } else if (size > dcache_size) {
287 blast_dcache_range(addr, addr + size);
291 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
295 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
298 tx39_blast_dcache_page(addr);
300 } while(addr != end);
301 } else if (size > dcache_size) {
304 blast_inv_dcache_range(addr, addr + size);
308 static void tx39_flush_cache_sigtramp(unsigned long addr)
310 unsigned long ic_lsize = current_cpu_data.icache.linesz;
311 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
312 unsigned long config;
315 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
317 /* disable icache (set ICE#) */
318 local_irq_save(flags);
319 config = read_c0_conf();
320 write_c0_conf(config & ~TX39_CONF_ICE);
321 TX39_STOP_STREAMING();
322 protected_flush_icache_line(addr & ~(ic_lsize - 1));
323 write_c0_conf(config);
324 local_irq_restore(flags);
327 static __init void tx39_probe_cache(void)
329 unsigned long config;
331 config = read_c0_conf();
333 icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
334 TX39_CONF_ICS_SHIFT));
335 dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
336 TX39_CONF_DCS_SHIFT));
338 current_cpu_data.icache.linesz = 16;
339 switch (current_cpu_data.cputype) {
341 current_cpu_data.icache.ways = 1;
342 current_cpu_data.dcache.ways = 1;
343 current_cpu_data.dcache.linesz = 4;
347 current_cpu_data.icache.ways = 2;
348 current_cpu_data.dcache.ways = 2;
349 current_cpu_data.dcache.linesz = 16;
354 current_cpu_data.icache.ways = 1;
355 current_cpu_data.dcache.ways = 1;
356 current_cpu_data.dcache.linesz = 16;
361 void __init tx39_cache_init(void)
363 extern void build_clear_page(void);
364 extern void build_copy_page(void);
365 unsigned long config;
367 config = read_c0_conf();
368 config &= ~TX39_CONF_WBON;
369 write_c0_conf(config);
373 switch (current_cpu_data.cputype) {
375 /* TX39/H core (writethru direct-map cache) */
376 flush_cache_all = tx39h_flush_icache_all;
377 __flush_cache_all = tx39h_flush_icache_all;
378 flush_cache_mm = (void *) tx39h_flush_icache_all;
379 flush_cache_range = (void *) tx39h_flush_icache_all;
380 flush_cache_page = (void *) tx39h_flush_icache_all;
381 flush_icache_page = (void *) tx39h_flush_icache_all;
382 flush_icache_range = (void *) tx39h_flush_icache_all;
384 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
385 flush_data_cache_page = (void *) tx39h_flush_icache_all;
387 _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
389 shm_align_mask = PAGE_SIZE - 1;
396 /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
397 r3k_have_wired_reg = 1;
398 write_c0_wired(0); /* set 8 on reset... */
399 /* board-dependent init code may set WBON */
401 flush_cache_all = tx39_flush_cache_all;
402 __flush_cache_all = tx39___flush_cache_all;
403 flush_cache_mm = tx39_flush_cache_mm;
404 flush_cache_range = tx39_flush_cache_range;
405 flush_cache_page = tx39_flush_cache_page;
406 flush_icache_page = tx39_flush_icache_page;
407 flush_icache_range = tx39_flush_icache_range;
409 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
410 flush_data_cache_page = tx39_flush_data_cache_page;
412 _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
413 _dma_cache_wback = tx39_dma_cache_wback_inv;
414 _dma_cache_inv = tx39_dma_cache_inv;
416 shm_align_mask = max_t(unsigned long,
417 (dcache_size / current_cpu_data.dcache.ways) - 1,
423 current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
424 current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
426 current_cpu_data.icache.sets =
427 current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
428 current_cpu_data.dcache.sets =
429 current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
431 if (current_cpu_data.dcache.waysize > PAGE_SIZE)
432 current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
434 current_cpu_data.icache.waybit = 0;
435 current_cpu_data.dcache.waybit = 0;
437 printk("Primary instruction cache %ldkB, linesize %d bytes\n",
438 icache_size >> 10, current_cpu_data.icache.linesz);
439 printk("Primary data cache %ldkB, linesize %d bytes\n",
440 dcache_size >> 10, current_cpu_data.dcache.linesz);
444 tx39h_flush_icache_all();