2 * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
4 * Copyright (C) 2008 Marvell Semiconductor
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
11 * - Unified Layer 2 Cache for Feroceon CPU Cores,
12 * Document ID MV-S104858-00, Rev. A, October 23 2007.
15 #include <linux/init.h>
16 #include <asm/cacheflush.h>
17 #include <asm/kmap_types.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <plat/cache-feroceon-l2.h>
25 * Low-level cache maintenance operations.
27 * As well as the regular 'clean/invalidate/flush L2 cache line by
28 * MVA' instructions, the Feroceon L2 cache controller also features
29 * 'clean/invalidate L2 range by MVA' operations.
31 * Cache range operations are initiated by writing the start and
32 * end addresses to successive cp15 registers, and process every
33 * cache line whose first byte address lies in the inclusive range
36 * The cache range operations stall the CPU pipeline until completion.
38 * The range operations require two successive cp15 writes, in
39 * between which we don't want to be preempted.
42 static inline unsigned long l2_start_va(unsigned long paddr)
46 * Let's do our own fixmap stuff in a minimal way here.
47 * Because range ops can't be done on physical addresses,
48 * we simply install a virtual mapping for it only for the
49 * TLB lookup to occur, hence no need to flush the untouched
50 * memory mapping. This is protected with the disabling of
51 * interrupts by the caller.
53 unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
54 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
56 local_flush_tlb_kernel_page(vaddr);
57 return vaddr + (paddr & ~PAGE_MASK);
59 return __phys_to_virt(paddr);
63 static inline void l2_clean_pa(unsigned long addr)
65 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
68 static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
70 unsigned long va_start, va_end, flags;
73 * Make sure 'start' and 'end' reference the same page, as
74 * L2 is PIPT and range operations only do a TLB lookup on
77 BUG_ON((start ^ end) >> PAGE_SHIFT);
79 raw_local_irq_save(flags);
80 va_start = l2_start_va(start);
81 va_end = va_start + (end - start);
82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
83 "mcr p15, 1, %1, c15, c9, 5"
84 : : "r" (va_start), "r" (va_end));
85 raw_local_irq_restore(flags);
88 static inline void l2_clean_inv_pa(unsigned long addr)
90 __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
93 static inline void l2_inv_pa(unsigned long addr)
95 __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
98 static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
100 unsigned long va_start, va_end, flags;
103 * Make sure 'start' and 'end' reference the same page, as
104 * L2 is PIPT and range operations only do a TLB lookup on
107 BUG_ON((start ^ end) >> PAGE_SHIFT);
109 raw_local_irq_save(flags);
110 va_start = l2_start_va(start);
111 va_end = va_start + (end - start);
112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
113 "mcr p15, 1, %1, c15, c11, 5"
114 : : "r" (va_start), "r" (va_end));
115 raw_local_irq_restore(flags);
122 * Note that the end addresses passed to Linux primitives are
123 * noninclusive, while the hardware cache range operations use
124 * inclusive start and end addresses.
126 #define CACHE_LINE_SIZE 32
127 #define MAX_RANGE_SIZE 1024
129 static int l2_wt_override;
131 static unsigned long calc_range_end(unsigned long start, unsigned long end)
133 unsigned long range_end;
135 BUG_ON(start & (CACHE_LINE_SIZE - 1));
136 BUG_ON(end & (CACHE_LINE_SIZE - 1));
139 * Try to process all cache lines between 'start' and 'end'.
144 * Limit the number of cache lines processed at once,
145 * since cache range operations stall the CPU pipeline
148 if (range_end > start + MAX_RANGE_SIZE)
149 range_end = start + MAX_RANGE_SIZE;
152 * Cache range operations can't straddle a page boundary.
154 if (range_end > (start | (PAGE_SIZE - 1)) + 1)
155 range_end = (start | (PAGE_SIZE - 1)) + 1;
160 static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
163 * Clean and invalidate partial first cache line.
165 if (start & (CACHE_LINE_SIZE - 1)) {
166 l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
167 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
171 * Clean and invalidate partial last cache line.
173 if (start < end && end & (CACHE_LINE_SIZE - 1)) {
174 l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
175 end &= ~(CACHE_LINE_SIZE - 1);
179 * Invalidate all full cache lines between 'start' and 'end'.
181 while (start < end) {
182 unsigned long range_end = calc_range_end(start, end);
183 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
190 static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
193 * If L2 is forced to WT, the L2 will always be clean and we
194 * don't need to do anything here.
196 if (!l2_wt_override) {
197 start &= ~(CACHE_LINE_SIZE - 1);
198 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
199 while (start != end) {
200 unsigned long range_end = calc_range_end(start, end);
201 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
209 static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
211 start &= ~(CACHE_LINE_SIZE - 1);
212 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
213 while (start != end) {
214 unsigned long range_end = calc_range_end(start, end);
216 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
217 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
226 * Routines to disable and re-enable the D-cache and I-cache at run
227 * time. These are necessary because the L2 cache can only be enabled
228 * or disabled while the L1 Dcache and Icache are both disabled.
230 static int __init flush_and_disable_dcache(void)
238 raw_local_irq_save(flags);
241 raw_local_irq_restore(flags);
247 static void __init enable_dcache(void)
255 static void __init __invalidate_icache(void)
259 __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 0" : "=r" (dummy));
262 static int __init invalidate_and_disable_icache(void)
269 __invalidate_icache();
275 static void __init enable_icache(void)
283 static inline u32 read_extra_features(void)
287 __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
292 static inline void write_extra_features(u32 u)
294 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
297 static void __init disable_l2_prefetch(void)
302 * Read the CPU Extra Features register and verify that the
303 * Disable L2 Prefetch bit is set.
305 u = read_extra_features();
306 if (!(u & 0x01000000)) {
307 printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
308 write_extra_features(u | 0x01000000);
312 static void __init enable_l2(void)
316 u = read_extra_features();
317 if (!(u & 0x00400000)) {
320 printk(KERN_INFO "Feroceon L2: Enabling L2\n");
322 d = flush_and_disable_dcache();
323 i = invalidate_and_disable_icache();
324 write_extra_features(u | 0x00400000);
332 void __init feroceon_l2_init(int __l2_wt_override)
334 l2_wt_override = __l2_wt_override;
336 disable_l2_prefetch();
338 outer_cache.inv_range = feroceon_l2_inv_range;
339 outer_cache.clean_range = feroceon_l2_clean_range;
340 outer_cache.flush_range = feroceon_l2_flush_range;
344 printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
345 l2_wt_override ? ", in WT override mode" : "");