2 * linux/arch/arm/mm/flush.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/pagemap.h>
14 #include <asm/cacheflush.h>
15 #include <asm/system.h>
16 #include <asm/tlbflush.h>
20 #ifdef CONFIG_CPU_CACHE_VIPT
22 #define ALIAS_FLUSH_START 0xffff4000
24 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
29 set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
30 flush_tlb_kernel_page(to);
32 asm( "mcrr p15, 0, %1, %0, c14\n"
33 " mcr p15, 0, %2, c7, c10, 4\n"
34 " mcr p15, 0, %2, c7, c5, 0\n"
36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
40 void flush_cache_mm(struct mm_struct *mm)
42 if (cache_is_vivt()) {
43 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
44 __cpuc_flush_user_all();
48 if (cache_is_vipt_aliasing()) {
49 asm( "mcr p15, 0, %0, c7, c14, 0\n"
50 " mcr p15, 0, %0, c7, c5, 0\n"
51 " mcr p15, 0, %0, c7, c10, 4"
58 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
60 if (cache_is_vivt()) {
61 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
62 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
67 if (cache_is_vipt_aliasing()) {
68 asm( "mcr p15, 0, %0, c7, c14, 0\n"
69 " mcr p15, 0, %0, c7, c5, 0\n"
70 " mcr p15, 0, %0, c7, c10, 4"
77 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
79 if (cache_is_vivt()) {
80 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
81 unsigned long addr = user_addr & PAGE_MASK;
82 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
87 if (cache_is_vipt_aliasing())
88 flush_pfn_alias(pfn, user_addr);
91 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
92 unsigned long uaddr, void *kaddr,
93 unsigned long len, int write)
95 if (cache_is_vivt()) {
96 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
97 unsigned long addr = (unsigned long)kaddr;
98 __cpuc_coherent_kern_range(addr, addr + len);
103 if (cache_is_vipt_aliasing()) {
104 flush_pfn_alias(page_to_pfn(page), uaddr);
108 /* VIPT non-aliasing cache */
109 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
110 vma->vm_flags & VM_EXEC) {
111 unsigned long addr = (unsigned long)kaddr;
112 /* only flushing the kernel mapping on non-aliasing VIPT */
113 __cpuc_coherent_kern_range(addr, addr + len);
117 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
120 void __flush_dcache_page(struct address_space *mapping, struct page *page)
123 * Writeback any data associated with the kernel mapping of this
124 * page. This ensures that data in the physical page is mutually
125 * coherent with the kernels mapping.
127 __cpuc_flush_dcache_page(page_address(page));
130 * If this is a page cache page, and we have an aliasing VIPT cache,
131 * we only need to do one flush - which would be at the relevant
132 * userspace colour, which is congruent with page->index.
134 if (mapping && cache_is_vipt_aliasing())
135 flush_pfn_alias(page_to_pfn(page),
136 page->index << PAGE_CACHE_SHIFT);
139 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
141 struct mm_struct *mm = current->active_mm;
142 struct vm_area_struct *mpnt;
143 struct prio_tree_iter iter;
147 * There are possible user space mappings of this page:
148 * - VIVT cache: we need to also write back and invalidate all user
149 * data in the current VM view associated with this page.
150 * - aliasing VIPT: we only need to find one mapping of this page.
152 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
154 flush_dcache_mmap_lock(mapping);
155 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
156 unsigned long offset;
159 * If this VMA is not in our MM, we can ignore it.
161 if (mpnt->vm_mm != mm)
163 if (!(mpnt->vm_flags & VM_MAYSHARE))
165 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
166 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
168 flush_dcache_mmap_unlock(mapping);
172 * Ensure cache coherency between kernel mapping and userspace mapping
175 * We have three cases to consider:
176 * - VIPT non-aliasing cache: fully coherent so nothing required.
177 * - VIVT: fully aliasing, so we need to handle every alias in our
179 * - VIPT aliasing: need to handle one alias in our current VM view.
181 * If we need to handle aliasing:
182 * If the page only exists in the page cache and there are no user
183 * space mappings, we can be lazy and remember that we may have dirty
184 * kernel cache lines for later. Otherwise, we assume we have
187 * Note that we disable the lazy flush for SMP.
189 void flush_dcache_page(struct page *page)
191 struct address_space *mapping = page_mapping(page);
194 if (mapping && !mapping_mapped(mapping))
195 set_bit(PG_dcache_dirty, &page->flags);
199 __flush_dcache_page(mapping, page);
200 if (mapping && cache_is_vivt())
201 __flush_dcache_aliases(mapping, page);
203 __flush_icache_all();
206 EXPORT_SYMBOL(flush_dcache_page);
209 * Flush an anonymous page so that users of get_user_pages()
210 * can safely access the data. The expected sequence is:
214 * memcpy() to/from page
215 * if written to page, flush_dcache_page()
217 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
221 /* VIPT non-aliasing caches need do nothing */
222 if (cache_is_vipt_nonaliasing())
226 * Write back and invalidate userspace mapping.
228 pfn = page_to_pfn(page);
229 if (cache_is_vivt()) {
230 flush_cache_page(vma, vmaddr, pfn);
233 * For aliasing VIPT, we can flush an alias of the
234 * userspace address only.
236 flush_pfn_alias(pfn, vmaddr);
240 * Invalidate kernel mapping. No data should be contained
241 * in this mapping of the page. FIXME: this is overkill
242 * since we actually ask for a write-back and invalidate.
244 __cpuc_flush_dcache_page(page_address(page));