[MIPS] i8253 PIT clocksource and clockevent drivers
[linux-2.6] / arch / mips / mm / c-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/linkage.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
16
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cache.h>
20 #include <asm/cacheops.h>
21 #include <asm/cpu.h>
22 #include <asm/cpu-features.h>
23 #include <asm/io.h>
24 #include <asm/page.h>
25 #include <asm/pgtable.h>
26 #include <asm/r4kcache.h>
27 #include <asm/sections.h>
28 #include <asm/system.h>
29 #include <asm/mmu_context.h>
30 #include <asm/war.h>
31 #include <asm/cacheflush.h> /* for run_uncached() */
32
33
34 /*
35  * Special Variant of smp_call_function for use by cache functions:
36  *
37  *  o No return value
38  *  o collapses to normal function call on UP kernels
39  *  o collapses to normal function call on systems with a single shared
40  *    primary cache.
41  */
42 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
43                                    int retry, int wait)
44 {
45         preempt_disable();
46
47 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
48         smp_call_function(func, info, retry, wait);
49 #endif
50         func(info);
51         preempt_enable();
52 }
53
54 /*
55  * Must die.
56  */
57 static unsigned long icache_size __read_mostly;
58 static unsigned long dcache_size __read_mostly;
59 static unsigned long scache_size __read_mostly;
60
61 /*
62  * Dummy cache handling routines for machines without boardcaches
63  */
64 static void cache_noop(void) {}
65
66 static struct bcache_ops no_sc_ops = {
67         .bc_enable = (void *)cache_noop,
68         .bc_disable = (void *)cache_noop,
69         .bc_wback_inv = (void *)cache_noop,
70         .bc_inv = (void *)cache_noop
71 };
72
73 struct bcache_ops *bcops = &no_sc_ops;
74
75 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
76 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
77
78 #define R4600_HIT_CACHEOP_WAR_IMPL                                      \
79 do {                                                                    \
80         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
81                 *(volatile unsigned long *)CKSEG1;                      \
82         if (R4600_V1_HIT_CACHEOP_WAR)                                   \
83                 __asm__ __volatile__("nop;nop;nop;nop");                \
84 } while (0)
85
86 static void (*r4k_blast_dcache_page)(unsigned long addr);
87
88 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
89 {
90         R4600_HIT_CACHEOP_WAR_IMPL;
91         blast_dcache32_page(addr);
92 }
93
94 static void __init r4k_blast_dcache_page_setup(void)
95 {
96         unsigned long  dc_lsize = cpu_dcache_line_size();
97
98         if (dc_lsize == 0)
99                 r4k_blast_dcache_page = (void *)cache_noop;
100         else if (dc_lsize == 16)
101                 r4k_blast_dcache_page = blast_dcache16_page;
102         else if (dc_lsize == 32)
103                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
104 }
105
106 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
107
108 static void __init r4k_blast_dcache_page_indexed_setup(void)
109 {
110         unsigned long dc_lsize = cpu_dcache_line_size();
111
112         if (dc_lsize == 0)
113                 r4k_blast_dcache_page_indexed = (void *)cache_noop;
114         else if (dc_lsize == 16)
115                 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
116         else if (dc_lsize == 32)
117                 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
118 }
119
120 static void (* r4k_blast_dcache)(void);
121
122 static void __init r4k_blast_dcache_setup(void)
123 {
124         unsigned long dc_lsize = cpu_dcache_line_size();
125
126         if (dc_lsize == 0)
127                 r4k_blast_dcache = (void *)cache_noop;
128         else if (dc_lsize == 16)
129                 r4k_blast_dcache = blast_dcache16;
130         else if (dc_lsize == 32)
131                 r4k_blast_dcache = blast_dcache32;
132 }
133
134 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
135 #define JUMP_TO_ALIGN(order) \
136         __asm__ __volatile__( \
137                 "b\t1f\n\t" \
138                 ".align\t" #order "\n\t" \
139                 "1:\n\t" \
140                 )
141 #define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
142 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
143
144 static inline void blast_r4600_v1_icache32(void)
145 {
146         unsigned long flags;
147
148         local_irq_save(flags);
149         blast_icache32();
150         local_irq_restore(flags);
151 }
152
153 static inline void tx49_blast_icache32(void)
154 {
155         unsigned long start = INDEX_BASE;
156         unsigned long end = start + current_cpu_data.icache.waysize;
157         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
158         unsigned long ws_end = current_cpu_data.icache.ways <<
159                                current_cpu_data.icache.waybit;
160         unsigned long ws, addr;
161
162         CACHE32_UNROLL32_ALIGN2;
163         /* I'm in even chunk.  blast odd chunks */
164         for (ws = 0; ws < ws_end; ws += ws_inc)
165                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
166                         cache32_unroll32(addr|ws,Index_Invalidate_I);
167         CACHE32_UNROLL32_ALIGN;
168         /* I'm in odd chunk.  blast even chunks */
169         for (ws = 0; ws < ws_end; ws += ws_inc)
170                 for (addr = start; addr < end; addr += 0x400 * 2)
171                         cache32_unroll32(addr|ws,Index_Invalidate_I);
172 }
173
174 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
175 {
176         unsigned long flags;
177
178         local_irq_save(flags);
179         blast_icache32_page_indexed(page);
180         local_irq_restore(flags);
181 }
182
183 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
184 {
185         unsigned long indexmask = current_cpu_data.icache.waysize - 1;
186         unsigned long start = INDEX_BASE + (page & indexmask);
187         unsigned long end = start + PAGE_SIZE;
188         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
189         unsigned long ws_end = current_cpu_data.icache.ways <<
190                                current_cpu_data.icache.waybit;
191         unsigned long ws, addr;
192
193         CACHE32_UNROLL32_ALIGN2;
194         /* I'm in even chunk.  blast odd chunks */
195         for (ws = 0; ws < ws_end; ws += ws_inc)
196                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
197                         cache32_unroll32(addr|ws,Index_Invalidate_I);
198         CACHE32_UNROLL32_ALIGN;
199         /* I'm in odd chunk.  blast even chunks */
200         for (ws = 0; ws < ws_end; ws += ws_inc)
201                 for (addr = start; addr < end; addr += 0x400 * 2)
202                         cache32_unroll32(addr|ws,Index_Invalidate_I);
203 }
204
205 static void (* r4k_blast_icache_page)(unsigned long addr);
206
207 static void __init r4k_blast_icache_page_setup(void)
208 {
209         unsigned long ic_lsize = cpu_icache_line_size();
210
211         if (ic_lsize == 0)
212                 r4k_blast_icache_page = (void *)cache_noop;
213         else if (ic_lsize == 16)
214                 r4k_blast_icache_page = blast_icache16_page;
215         else if (ic_lsize == 32)
216                 r4k_blast_icache_page = blast_icache32_page;
217         else if (ic_lsize == 64)
218                 r4k_blast_icache_page = blast_icache64_page;
219 }
220
221
222 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
223
224 static void __init r4k_blast_icache_page_indexed_setup(void)
225 {
226         unsigned long ic_lsize = cpu_icache_line_size();
227
228         if (ic_lsize == 0)
229                 r4k_blast_icache_page_indexed = (void *)cache_noop;
230         else if (ic_lsize == 16)
231                 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
232         else if (ic_lsize == 32) {
233                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
234                         r4k_blast_icache_page_indexed =
235                                 blast_icache32_r4600_v1_page_indexed;
236                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
237                         r4k_blast_icache_page_indexed =
238                                 tx49_blast_icache32_page_indexed;
239                 else
240                         r4k_blast_icache_page_indexed =
241                                 blast_icache32_page_indexed;
242         } else if (ic_lsize == 64)
243                 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
244 }
245
246 static void (* r4k_blast_icache)(void);
247
248 static void __init r4k_blast_icache_setup(void)
249 {
250         unsigned long ic_lsize = cpu_icache_line_size();
251
252         if (ic_lsize == 0)
253                 r4k_blast_icache = (void *)cache_noop;
254         else if (ic_lsize == 16)
255                 r4k_blast_icache = blast_icache16;
256         else if (ic_lsize == 32) {
257                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
258                         r4k_blast_icache = blast_r4600_v1_icache32;
259                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
260                         r4k_blast_icache = tx49_blast_icache32;
261                 else
262                         r4k_blast_icache = blast_icache32;
263         } else if (ic_lsize == 64)
264                 r4k_blast_icache = blast_icache64;
265 }
266
267 static void (* r4k_blast_scache_page)(unsigned long addr);
268
269 static void __init r4k_blast_scache_page_setup(void)
270 {
271         unsigned long sc_lsize = cpu_scache_line_size();
272
273         if (scache_size == 0)
274                 r4k_blast_scache_page = (void *)cache_noop;
275         else if (sc_lsize == 16)
276                 r4k_blast_scache_page = blast_scache16_page;
277         else if (sc_lsize == 32)
278                 r4k_blast_scache_page = blast_scache32_page;
279         else if (sc_lsize == 64)
280                 r4k_blast_scache_page = blast_scache64_page;
281         else if (sc_lsize == 128)
282                 r4k_blast_scache_page = blast_scache128_page;
283 }
284
285 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
286
287 static void __init r4k_blast_scache_page_indexed_setup(void)
288 {
289         unsigned long sc_lsize = cpu_scache_line_size();
290
291         if (scache_size == 0)
292                 r4k_blast_scache_page_indexed = (void *)cache_noop;
293         else if (sc_lsize == 16)
294                 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
295         else if (sc_lsize == 32)
296                 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
297         else if (sc_lsize == 64)
298                 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
299         else if (sc_lsize == 128)
300                 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
301 }
302
303 static void (* r4k_blast_scache)(void);
304
305 static void __init r4k_blast_scache_setup(void)
306 {
307         unsigned long sc_lsize = cpu_scache_line_size();
308
309         if (scache_size == 0)
310                 r4k_blast_scache = (void *)cache_noop;
311         else if (sc_lsize == 16)
312                 r4k_blast_scache = blast_scache16;
313         else if (sc_lsize == 32)
314                 r4k_blast_scache = blast_scache32;
315         else if (sc_lsize == 64)
316                 r4k_blast_scache = blast_scache64;
317         else if (sc_lsize == 128)
318                 r4k_blast_scache = blast_scache128;
319 }
320
321 /*
322  * This is former mm's flush_cache_all() which really should be
323  * flush_cache_vunmap these days ...
324  */
325 static inline void local_r4k_flush_cache_all(void * args)
326 {
327         r4k_blast_dcache();
328 }
329
330 static void r4k_flush_cache_all(void)
331 {
332         if (!cpu_has_dc_aliases)
333                 return;
334
335         r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
336 }
337
338 static inline void local_r4k___flush_cache_all(void * args)
339 {
340 #if defined(CONFIG_CPU_LOONGSON2)
341         r4k_blast_scache();
342         return;
343 #endif
344         r4k_blast_dcache();
345         r4k_blast_icache();
346
347         switch (current_cpu_data.cputype) {
348         case CPU_R4000SC:
349         case CPU_R4000MC:
350         case CPU_R4400SC:
351         case CPU_R4400MC:
352         case CPU_R10000:
353         case CPU_R12000:
354         case CPU_R14000:
355                 r4k_blast_scache();
356         }
357 }
358
359 static void r4k___flush_cache_all(void)
360 {
361         r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
362 }
363
364 static inline void local_r4k_flush_cache_range(void * args)
365 {
366         struct vm_area_struct *vma = args;
367
368         if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
369                 return;
370
371         r4k_blast_dcache();
372 }
373
374 static void r4k_flush_cache_range(struct vm_area_struct *vma,
375         unsigned long start, unsigned long end)
376 {
377         if (!cpu_has_dc_aliases)
378                 return;
379
380         r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
381 }
382
383 static inline void local_r4k_flush_cache_mm(void * args)
384 {
385         struct mm_struct *mm = args;
386
387         if (!cpu_context(smp_processor_id(), mm))
388                 return;
389
390         /*
391          * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
392          * only flush the primary caches but R10000 and R12000 behave sane ...
393          * R4000SC and R4400SC indexed S-cache ops also invalidate primary
394          * caches, so we can bail out early.
395          */
396         if (current_cpu_data.cputype == CPU_R4000SC ||
397             current_cpu_data.cputype == CPU_R4000MC ||
398             current_cpu_data.cputype == CPU_R4400SC ||
399             current_cpu_data.cputype == CPU_R4400MC) {
400                 r4k_blast_scache();
401                 return;
402         }
403
404         r4k_blast_dcache();
405 }
406
407 static void r4k_flush_cache_mm(struct mm_struct *mm)
408 {
409         if (!cpu_has_dc_aliases)
410                 return;
411
412         r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
413 }
414
415 struct flush_cache_page_args {
416         struct vm_area_struct *vma;
417         unsigned long addr;
418         unsigned long pfn;
419 };
420
421 static inline void local_r4k_flush_cache_page(void *args)
422 {
423         struct flush_cache_page_args *fcp_args = args;
424         struct vm_area_struct *vma = fcp_args->vma;
425         unsigned long addr = fcp_args->addr;
426         unsigned long paddr = fcp_args->pfn << PAGE_SHIFT;
427         int exec = vma->vm_flags & VM_EXEC;
428         struct mm_struct *mm = vma->vm_mm;
429         pgd_t *pgdp;
430         pud_t *pudp;
431         pmd_t *pmdp;
432         pte_t *ptep;
433
434         /*
435          * If ownes no valid ASID yet, cannot possibly have gotten
436          * this page into the cache.
437          */
438         if (cpu_context(smp_processor_id(), mm) == 0)
439                 return;
440
441         addr &= PAGE_MASK;
442         pgdp = pgd_offset(mm, addr);
443         pudp = pud_offset(pgdp, addr);
444         pmdp = pmd_offset(pudp, addr);
445         ptep = pte_offset(pmdp, addr);
446
447         /*
448          * If the page isn't marked valid, the page cannot possibly be
449          * in the cache.
450          */
451         if (!(pte_val(*ptep) & _PAGE_PRESENT))
452                 return;
453
454         /*
455          * Doing flushes for another ASID than the current one is
456          * too difficult since stupid R4k caches do a TLB translation
457          * for every cache flush operation.  So we do indexed flushes
458          * in that case, which doesn't overly flush the cache too much.
459          */
460         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
461                 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
462                         r4k_blast_dcache_page(addr);
463                         if (exec && !cpu_icache_snoops_remote_store)
464                                 r4k_blast_scache_page(addr);
465                 }
466                 if (exec)
467                         r4k_blast_icache_page(addr);
468
469                 return;
470         }
471
472         /*
473          * Do indexed flush, too much work to get the (possible) TLB refills
474          * to work correctly.
475          */
476         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
477                 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache ?
478                                               paddr : addr);
479                 if (exec && !cpu_icache_snoops_remote_store) {
480                         r4k_blast_scache_page_indexed(paddr);
481                 }
482         }
483         if (exec) {
484                 if (cpu_has_vtag_icache && mm == current->active_mm) {
485                         int cpu = smp_processor_id();
486
487                         if (cpu_context(cpu, mm) != 0)
488                                 drop_mmu_context(mm, cpu);
489                 } else
490                         r4k_blast_icache_page_indexed(addr);
491         }
492 }
493
494 static void r4k_flush_cache_page(struct vm_area_struct *vma,
495         unsigned long addr, unsigned long pfn)
496 {
497         struct flush_cache_page_args args;
498
499         args.vma = vma;
500         args.addr = addr;
501         args.pfn = pfn;
502
503         r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
504 }
505
506 static inline void local_r4k_flush_data_cache_page(void * addr)
507 {
508         r4k_blast_dcache_page((unsigned long) addr);
509 }
510
511 static void r4k_flush_data_cache_page(unsigned long addr)
512 {
513         r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
514 }
515
516 struct flush_icache_range_args {
517         unsigned long start;
518         unsigned long end;
519 };
520
521 static inline void local_r4k_flush_icache_range(void *args)
522 {
523         struct flush_icache_range_args *fir_args = args;
524         unsigned long start = fir_args->start;
525         unsigned long end = fir_args->end;
526
527         if (!cpu_has_ic_fills_f_dc) {
528                 if (end - start >= dcache_size) {
529                         r4k_blast_dcache();
530                 } else {
531                         R4600_HIT_CACHEOP_WAR_IMPL;
532                         protected_blast_dcache_range(start, end);
533                 }
534
535                 if (!cpu_icache_snoops_remote_store && scache_size) {
536                         if (end - start > scache_size)
537                                 r4k_blast_scache();
538                         else
539                                 protected_blast_scache_range(start, end);
540                 }
541         }
542
543         if (end - start > icache_size)
544                 r4k_blast_icache();
545         else
546                 protected_blast_icache_range(start, end);
547 }
548
549 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
550 {
551         struct flush_icache_range_args args;
552
553         args.start = start;
554         args.end = end;
555
556         r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
557         instruction_hazard();
558 }
559
560 #ifdef CONFIG_DMA_NONCOHERENT
561
562 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
563 {
564         /* Catch bad driver code */
565         BUG_ON(size == 0);
566
567         if (cpu_has_inclusive_pcaches) {
568                 if (size >= scache_size)
569                         r4k_blast_scache();
570                 else
571                         blast_scache_range(addr, addr + size);
572                 return;
573         }
574
575         /*
576          * Either no secondary cache or the available caches don't have the
577          * subset property so we have to flush the primary caches
578          * explicitly
579          */
580         if (size >= dcache_size) {
581                 r4k_blast_dcache();
582         } else {
583                 R4600_HIT_CACHEOP_WAR_IMPL;
584                 blast_dcache_range(addr, addr + size);
585         }
586
587         bc_wback_inv(addr, size);
588 }
589
590 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
591 {
592         /* Catch bad driver code */
593         BUG_ON(size == 0);
594
595         if (cpu_has_inclusive_pcaches) {
596                 if (size >= scache_size)
597                         r4k_blast_scache();
598                 else
599                         blast_scache_range(addr, addr + size);
600                 return;
601         }
602
603         if (size >= dcache_size) {
604                 r4k_blast_dcache();
605         } else {
606                 R4600_HIT_CACHEOP_WAR_IMPL;
607                 blast_dcache_range(addr, addr + size);
608         }
609
610         bc_inv(addr, size);
611 }
612 #endif /* CONFIG_DMA_NONCOHERENT */
613
614 /*
615  * While we're protected against bad userland addresses we don't care
616  * very much about what happens in that case.  Usually a segmentation
617  * fault will dump the process later on anyway ...
618  */
619 static void local_r4k_flush_cache_sigtramp(void * arg)
620 {
621         unsigned long ic_lsize = cpu_icache_line_size();
622         unsigned long dc_lsize = cpu_dcache_line_size();
623         unsigned long sc_lsize = cpu_scache_line_size();
624         unsigned long addr = (unsigned long) arg;
625
626         R4600_HIT_CACHEOP_WAR_IMPL;
627         if (dc_lsize)
628                 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
629         if (!cpu_icache_snoops_remote_store && scache_size)
630                 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
631         if (ic_lsize)
632                 protected_flush_icache_line(addr & ~(ic_lsize - 1));
633         if (MIPS4K_ICACHE_REFILL_WAR) {
634                 __asm__ __volatile__ (
635                         ".set push\n\t"
636                         ".set noat\n\t"
637                         ".set mips3\n\t"
638 #ifdef CONFIG_32BIT
639                         "la     $at,1f\n\t"
640 #endif
641 #ifdef CONFIG_64BIT
642                         "dla    $at,1f\n\t"
643 #endif
644                         "cache  %0,($at)\n\t"
645                         "nop; nop; nop\n"
646                         "1:\n\t"
647                         ".set pop"
648                         :
649                         : "i" (Hit_Invalidate_I));
650         }
651         if (MIPS_CACHE_SYNC_WAR)
652                 __asm__ __volatile__ ("sync");
653 }
654
655 static void r4k_flush_cache_sigtramp(unsigned long addr)
656 {
657         r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
658 }
659
660 static void r4k_flush_icache_all(void)
661 {
662         if (cpu_has_vtag_icache)
663                 r4k_blast_icache();
664 }
665
666 static inline void rm7k_erratum31(void)
667 {
668         const unsigned long ic_lsize = 32;
669         unsigned long addr;
670
671         /* RM7000 erratum #31. The icache is screwed at startup. */
672         write_c0_taglo(0);
673         write_c0_taghi(0);
674
675         for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
676                 __asm__ __volatile__ (
677                         ".set push\n\t"
678                         ".set noreorder\n\t"
679                         ".set mips3\n\t"
680                         "cache\t%1, 0(%0)\n\t"
681                         "cache\t%1, 0x1000(%0)\n\t"
682                         "cache\t%1, 0x2000(%0)\n\t"
683                         "cache\t%1, 0x3000(%0)\n\t"
684                         "cache\t%2, 0(%0)\n\t"
685                         "cache\t%2, 0x1000(%0)\n\t"
686                         "cache\t%2, 0x2000(%0)\n\t"
687                         "cache\t%2, 0x3000(%0)\n\t"
688                         "cache\t%1, 0(%0)\n\t"
689                         "cache\t%1, 0x1000(%0)\n\t"
690                         "cache\t%1, 0x2000(%0)\n\t"
691                         "cache\t%1, 0x3000(%0)\n\t"
692                         ".set pop\n"
693                         :
694                         : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
695         }
696 }
697
698 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
699         "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
700 };
701
702 static void __init probe_pcache(void)
703 {
704         struct cpuinfo_mips *c = &current_cpu_data;
705         unsigned int config = read_c0_config();
706         unsigned int prid = read_c0_prid();
707         unsigned long config1;
708         unsigned int lsize;
709
710         switch (c->cputype) {
711         case CPU_R4600:                 /* QED style two way caches? */
712         case CPU_R4700:
713         case CPU_R5000:
714         case CPU_NEVADA:
715                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
716                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
717                 c->icache.ways = 2;
718                 c->icache.waybit = __ffs(icache_size/2);
719
720                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
721                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
722                 c->dcache.ways = 2;
723                 c->dcache.waybit= __ffs(dcache_size/2);
724
725                 c->options |= MIPS_CPU_CACHE_CDEX_P;
726                 break;
727
728         case CPU_R5432:
729         case CPU_R5500:
730                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
731                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
732                 c->icache.ways = 2;
733                 c->icache.waybit= 0;
734
735                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
736                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
737                 c->dcache.ways = 2;
738                 c->dcache.waybit = 0;
739
740                 c->options |= MIPS_CPU_CACHE_CDEX_P;
741                 break;
742
743         case CPU_TX49XX:
744                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
745                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
746                 c->icache.ways = 4;
747                 c->icache.waybit= 0;
748
749                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
750                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
751                 c->dcache.ways = 4;
752                 c->dcache.waybit = 0;
753
754                 c->options |= MIPS_CPU_CACHE_CDEX_P;
755                 c->options |= MIPS_CPU_PREFETCH;
756                 break;
757
758         case CPU_R4000PC:
759         case CPU_R4000SC:
760         case CPU_R4000MC:
761         case CPU_R4400PC:
762         case CPU_R4400SC:
763         case CPU_R4400MC:
764         case CPU_R4300:
765                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
766                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
767                 c->icache.ways = 1;
768                 c->icache.waybit = 0;   /* doesn't matter */
769
770                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
771                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
772                 c->dcache.ways = 1;
773                 c->dcache.waybit = 0;   /* does not matter */
774
775                 c->options |= MIPS_CPU_CACHE_CDEX_P;
776                 break;
777
778         case CPU_R10000:
779         case CPU_R12000:
780         case CPU_R14000:
781                 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
782                 c->icache.linesz = 64;
783                 c->icache.ways = 2;
784                 c->icache.waybit = 0;
785
786                 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
787                 c->dcache.linesz = 32;
788                 c->dcache.ways = 2;
789                 c->dcache.waybit = 0;
790
791                 c->options |= MIPS_CPU_PREFETCH;
792                 break;
793
794         case CPU_VR4133:
795                 write_c0_config(config & ~VR41_CONF_P4K);
796         case CPU_VR4131:
797                 /* Workaround for cache instruction bug of VR4131 */
798                 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
799                     c->processor_id == 0x0c82U) {
800                         config |= 0x00400000U;
801                         if (c->processor_id == 0x0c80U)
802                                 config |= VR41_CONF_BP;
803                         write_c0_config(config);
804                 } else
805                         c->options |= MIPS_CPU_CACHE_CDEX_P;
806
807                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
808                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
809                 c->icache.ways = 2;
810                 c->icache.waybit = __ffs(icache_size/2);
811
812                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
813                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
814                 c->dcache.ways = 2;
815                 c->dcache.waybit = __ffs(dcache_size/2);
816                 break;
817
818         case CPU_VR41XX:
819         case CPU_VR4111:
820         case CPU_VR4121:
821         case CPU_VR4122:
822         case CPU_VR4181:
823         case CPU_VR4181A:
824                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
825                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
826                 c->icache.ways = 1;
827                 c->icache.waybit = 0;   /* doesn't matter */
828
829                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
830                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
831                 c->dcache.ways = 1;
832                 c->dcache.waybit = 0;   /* does not matter */
833
834                 c->options |= MIPS_CPU_CACHE_CDEX_P;
835                 break;
836
837         case CPU_RM7000:
838                 rm7k_erratum31();
839
840         case CPU_RM9000:
841                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
842                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
843                 c->icache.ways = 4;
844                 c->icache.waybit = __ffs(icache_size / c->icache.ways);
845
846                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
847                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
848                 c->dcache.ways = 4;
849                 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
850
851 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
852                 c->options |= MIPS_CPU_CACHE_CDEX_P;
853 #endif
854                 c->options |= MIPS_CPU_PREFETCH;
855                 break;
856
857         case CPU_LOONGSON2:
858                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
859                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
860                 if (prid & 0x3)
861                         c->icache.ways = 4;
862                 else
863                         c->icache.ways = 2;
864                 c->icache.waybit = 0;
865
866                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
867                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
868                 if (prid & 0x3)
869                         c->dcache.ways = 4;
870                 else
871                         c->dcache.ways = 2;
872                 c->dcache.waybit = 0;
873                 break;
874
875         default:
876                 if (!(config & MIPS_CONF_M))
877                         panic("Don't know how to probe P-caches on this cpu.");
878
879                 /*
880                  * So we seem to be a MIPS32 or MIPS64 CPU
881                  * So let's probe the I-cache ...
882                  */
883                 config1 = read_c0_config1();
884
885                 if ((lsize = ((config1 >> 19) & 7)))
886                         c->icache.linesz = 2 << lsize;
887                 else
888                         c->icache.linesz = lsize;
889                 c->icache.sets = 64 << ((config1 >> 22) & 7);
890                 c->icache.ways = 1 + ((config1 >> 16) & 7);
891
892                 icache_size = c->icache.sets *
893                               c->icache.ways *
894                               c->icache.linesz;
895                 c->icache.waybit = __ffs(icache_size/c->icache.ways);
896
897                 if (config & 0x8)               /* VI bit */
898                         c->icache.flags |= MIPS_CACHE_VTAG;
899
900                 /*
901                  * Now probe the MIPS32 / MIPS64 data cache.
902                  */
903                 c->dcache.flags = 0;
904
905                 if ((lsize = ((config1 >> 10) & 7)))
906                         c->dcache.linesz = 2 << lsize;
907                 else
908                         c->dcache.linesz= lsize;
909                 c->dcache.sets = 64 << ((config1 >> 13) & 7);
910                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
911
912                 dcache_size = c->dcache.sets *
913                               c->dcache.ways *
914                               c->dcache.linesz;
915                 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
916
917                 c->options |= MIPS_CPU_PREFETCH;
918                 break;
919         }
920
921         /*
922          * Processor configuration sanity check for the R4000SC erratum
923          * #5.  With page sizes larger than 32kB there is no possibility
924          * to get a VCE exception anymore so we don't care about this
925          * misconfiguration.  The case is rather theoretical anyway;
926          * presumably no vendor is shipping his hardware in the "bad"
927          * configuration.
928          */
929         if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
930             !(config & CONF_SC) && c->icache.linesz != 16 &&
931             PAGE_SIZE <= 0x8000)
932                 panic("Improper R4000SC processor configuration detected");
933
934         /* compute a couple of other cache variables */
935         c->icache.waysize = icache_size / c->icache.ways;
936         c->dcache.waysize = dcache_size / c->dcache.ways;
937
938         c->icache.sets = c->icache.linesz ?
939                 icache_size / (c->icache.linesz * c->icache.ways) : 0;
940         c->dcache.sets = c->dcache.linesz ?
941                 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
942
943         /*
944          * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
945          * 2-way virtually indexed so normally would suffer from aliases.  So
946          * normally they'd suffer from aliases but magic in the hardware deals
947          * with that for us so we don't need to take care ourselves.
948          */
949         switch (c->cputype) {
950         case CPU_20KC:
951         case CPU_25KF:
952         case CPU_SB1:
953         case CPU_SB1A:
954                 c->dcache.flags |= MIPS_CACHE_PINDEX;
955                 break;
956
957         case CPU_R10000:
958         case CPU_R12000:
959         case CPU_R14000:
960                 break;
961
962         case CPU_24K:
963         case CPU_34K:
964         case CPU_74K:
965                 if ((read_c0_config7() & (1 << 16))) {
966                         /* effectively physically indexed dcache,
967                            thus no virtual aliases. */
968                         c->dcache.flags |= MIPS_CACHE_PINDEX;
969                         break;
970                 }
971         default:
972                 if (c->dcache.waysize > PAGE_SIZE)
973                         c->dcache.flags |= MIPS_CACHE_ALIASES;
974         }
975
976         switch (c->cputype) {
977         case CPU_20KC:
978                 /*
979                  * Some older 20Kc chips doesn't have the 'VI' bit in
980                  * the config register.
981                  */
982                 c->icache.flags |= MIPS_CACHE_VTAG;
983                 break;
984
985         case CPU_AU1000:
986         case CPU_AU1500:
987         case CPU_AU1100:
988         case CPU_AU1550:
989         case CPU_AU1200:
990                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
991                 break;
992         }
993
994 #ifdef  CONFIG_CPU_LOONGSON2
995         /*
996          * LOONGSON2 has 4 way icache, but when using indexed cache op,
997          * one op will act on all 4 ways
998          */
999         c->icache.ways = 1;
1000 #endif
1001
1002         printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1003                icache_size >> 10,
1004                cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
1005                way_string[c->icache.ways], c->icache.linesz);
1006
1007         printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
1008                dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
1009 }
1010
1011 /*
1012  * If you even _breathe_ on this function, look at the gcc output and make sure
1013  * it does not pop things on and off the stack for the cache sizing loop that
1014  * executes in KSEG1 space or else you will crash and burn badly.  You have
1015  * been warned.
1016  */
1017 static int __init probe_scache(void)
1018 {
1019         unsigned long flags, addr, begin, end, pow2;
1020         unsigned int config = read_c0_config();
1021         struct cpuinfo_mips *c = &current_cpu_data;
1022         int tmp;
1023
1024         if (config & CONF_SC)
1025                 return 0;
1026
1027         begin = (unsigned long) &_stext;
1028         begin &= ~((4 * 1024 * 1024) - 1);
1029         end = begin + (4 * 1024 * 1024);
1030
1031         /*
1032          * This is such a bitch, you'd think they would make it easy to do
1033          * this.  Away you daemons of stupidity!
1034          */
1035         local_irq_save(flags);
1036
1037         /* Fill each size-multiple cache line with a valid tag. */
1038         pow2 = (64 * 1024);
1039         for (addr = begin; addr < end; addr = (begin + pow2)) {
1040                 unsigned long *p = (unsigned long *) addr;
1041                 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1042                 pow2 <<= 1;
1043         }
1044
1045         /* Load first line with zero (therefore invalid) tag. */
1046         write_c0_taglo(0);
1047         write_c0_taghi(0);
1048         __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1049         cache_op(Index_Store_Tag_I, begin);
1050         cache_op(Index_Store_Tag_D, begin);
1051         cache_op(Index_Store_Tag_SD, begin);
1052
1053         /* Now search for the wrap around point. */
1054         pow2 = (128 * 1024);
1055         tmp = 0;
1056         for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1057                 cache_op(Index_Load_Tag_SD, addr);
1058                 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1059                 if (!read_c0_taglo())
1060                         break;
1061                 pow2 <<= 1;
1062         }
1063         local_irq_restore(flags);
1064         addr -= begin;
1065
1066         scache_size = addr;
1067         c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1068         c->scache.ways = 1;
1069         c->dcache.waybit = 0;           /* does not matter */
1070
1071         return 1;
1072 }
1073
1074 #if defined(CONFIG_CPU_LOONGSON2)
1075 static void __init loongson2_sc_init(void)
1076 {
1077         struct cpuinfo_mips *c = &current_cpu_data;
1078
1079         scache_size = 512*1024;
1080         c->scache.linesz = 32;
1081         c->scache.ways = 4;
1082         c->scache.waybit = 0;
1083         c->scache.waysize = scache_size / (c->scache.ways);
1084         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1085         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1086                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1087
1088         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1089 }
1090 #endif
1091
1092 extern int r5k_sc_init(void);
1093 extern int rm7k_sc_init(void);
1094 extern int mips_sc_init(void);
1095
1096 static void __init setup_scache(void)
1097 {
1098         struct cpuinfo_mips *c = &current_cpu_data;
1099         unsigned int config = read_c0_config();
1100         int sc_present = 0;
1101
1102         /*
1103          * Do the probing thing on R4000SC and R4400SC processors.  Other
1104          * processors don't have a S-cache that would be relevant to the
1105          * Linux memory managment.
1106          */
1107         switch (c->cputype) {
1108         case CPU_R4000SC:
1109         case CPU_R4000MC:
1110         case CPU_R4400SC:
1111         case CPU_R4400MC:
1112                 sc_present = run_uncached(probe_scache);
1113                 if (sc_present)
1114                         c->options |= MIPS_CPU_CACHE_CDEX_S;
1115                 break;
1116
1117         case CPU_R10000:
1118         case CPU_R12000:
1119         case CPU_R14000:
1120                 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1121                 c->scache.linesz = 64 << ((config >> 13) & 1);
1122                 c->scache.ways = 2;
1123                 c->scache.waybit= 0;
1124                 sc_present = 1;
1125                 break;
1126
1127         case CPU_R5000:
1128         case CPU_NEVADA:
1129 #ifdef CONFIG_R5000_CPU_SCACHE
1130                 r5k_sc_init();
1131 #endif
1132                 return;
1133
1134         case CPU_RM7000:
1135         case CPU_RM9000:
1136 #ifdef CONFIG_RM7000_CPU_SCACHE
1137                 rm7k_sc_init();
1138 #endif
1139                 return;
1140
1141 #if defined(CONFIG_CPU_LOONGSON2)
1142         case CPU_LOONGSON2:
1143                 loongson2_sc_init();
1144                 return;
1145 #endif
1146
1147         default:
1148                 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1149                     c->isa_level == MIPS_CPU_ISA_M32R2 ||
1150                     c->isa_level == MIPS_CPU_ISA_M64R1 ||
1151                     c->isa_level == MIPS_CPU_ISA_M64R2) {
1152 #ifdef CONFIG_MIPS_CPU_SCACHE
1153                         if (mips_sc_init ()) {
1154                                 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1155                                 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1156                                        scache_size >> 10,
1157                                        way_string[c->scache.ways], c->scache.linesz);
1158                         }
1159 #else
1160                         if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1161                                 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1162 #endif
1163                         return;
1164                 }
1165                 sc_present = 0;
1166         }
1167
1168         if (!sc_present)
1169                 return;
1170
1171         /* compute a couple of other cache variables */
1172         c->scache.waysize = scache_size / c->scache.ways;
1173
1174         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1175
1176         printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1177                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1178
1179         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1180 }
1181
1182 void au1x00_fixup_config_od(void)
1183 {
1184         /*
1185          * c0_config.od (bit 19) was write only (and read as 0)
1186          * on the early revisions of Alchemy SOCs.  It disables the bus
1187          * transaction overlapping and needs to be set to fix various errata.
1188          */
1189         switch (read_c0_prid()) {
1190         case 0x00030100: /* Au1000 DA */
1191         case 0x00030201: /* Au1000 HA */
1192         case 0x00030202: /* Au1000 HB */
1193         case 0x01030200: /* Au1500 AB */
1194         /*
1195          * Au1100 errata actually keeps silence about this bit, so we set it
1196          * just in case for those revisions that require it to be set according
1197          * to arch/mips/au1000/common/cputable.c
1198          */
1199         case 0x02030200: /* Au1100 AB */
1200         case 0x02030201: /* Au1100 BA */
1201         case 0x02030202: /* Au1100 BC */
1202                 set_c0_config(1 << 19);
1203                 break;
1204         }
1205 }
1206
1207 static void __init coherency_setup(void)
1208 {
1209         change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1210
1211         /*
1212          * c0_status.cu=0 specifies that updates by the sc instruction use
1213          * the coherency mode specified by the TLB; 1 means cachable
1214          * coherent update on write will be used.  Not all processors have
1215          * this bit and; some wire it to zero, others like Toshiba had the
1216          * silly idea of putting something else there ...
1217          */
1218         switch (current_cpu_data.cputype) {
1219         case CPU_R4000PC:
1220         case CPU_R4000SC:
1221         case CPU_R4000MC:
1222         case CPU_R4400PC:
1223         case CPU_R4400SC:
1224         case CPU_R4400MC:
1225                 clear_c0_config(CONF_CU);
1226                 break;
1227         /*
1228          * We need to catch the early Alchemy SOCs with
1229          * the write-only co_config.od bit and set it back to one...
1230          */
1231         case CPU_AU1000: /* rev. DA, HA, HB */
1232         case CPU_AU1100: /* rev. AB, BA, BC ?? */
1233         case CPU_AU1500: /* rev. AB */
1234                 au1x00_fixup_config_od();
1235                 break;
1236         }
1237 }
1238
1239 void __init r4k_cache_init(void)
1240 {
1241         extern void build_clear_page(void);
1242         extern void build_copy_page(void);
1243         extern char __weak except_vec2_generic;
1244         extern char __weak except_vec2_sb1;
1245         struct cpuinfo_mips *c = &current_cpu_data;
1246
1247         switch (c->cputype) {
1248         case CPU_SB1:
1249         case CPU_SB1A:
1250                 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1251                 break;
1252
1253         default:
1254                 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1255                 break;
1256         }
1257
1258         probe_pcache();
1259         setup_scache();
1260
1261         r4k_blast_dcache_page_setup();
1262         r4k_blast_dcache_page_indexed_setup();
1263         r4k_blast_dcache_setup();
1264         r4k_blast_icache_page_setup();
1265         r4k_blast_icache_page_indexed_setup();
1266         r4k_blast_icache_setup();
1267         r4k_blast_scache_page_setup();
1268         r4k_blast_scache_page_indexed_setup();
1269         r4k_blast_scache_setup();
1270
1271         /*
1272          * Some MIPS32 and MIPS64 processors have physically indexed caches.
1273          * This code supports virtually indexed processors and will be
1274          * unnecessarily inefficient on physically indexed processors.
1275          */
1276         if (c->dcache.linesz)
1277                 shm_align_mask = max_t( unsigned long,
1278                                         c->dcache.sets * c->dcache.linesz - 1,
1279                                         PAGE_SIZE - 1);
1280         else
1281                 shm_align_mask = PAGE_SIZE-1;
1282         flush_cache_all         = r4k_flush_cache_all;
1283         __flush_cache_all       = r4k___flush_cache_all;
1284         flush_cache_mm          = r4k_flush_cache_mm;
1285         flush_cache_page        = r4k_flush_cache_page;
1286         flush_cache_range       = r4k_flush_cache_range;
1287
1288         flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
1289         flush_icache_all        = r4k_flush_icache_all;
1290         local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
1291         flush_data_cache_page   = r4k_flush_data_cache_page;
1292         flush_icache_range      = r4k_flush_icache_range;
1293
1294 #ifdef CONFIG_DMA_NONCOHERENT
1295         _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1296         _dma_cache_wback        = r4k_dma_cache_wback_inv;
1297         _dma_cache_inv          = r4k_dma_cache_inv;
1298 #endif
1299
1300         build_clear_page();
1301         build_copy_page();
1302         local_r4k___flush_cache_all(NULL);
1303         coherency_setup();
1304 }