1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
5 #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8 * Unfortunately, that doesn't apply to PA-RISC. */
10 /* Cache flush operations */
13 #define flush_cache_mm(mm) flush_cache_all()
15 #define flush_cache_mm(mm) flush_cache_all_local()
18 #define flush_kernel_dcache_range(start,size) \
19 flush_kernel_dcache_range_asm((start), (start)+(size));
21 extern void flush_cache_all_local(void);
23 static inline void cacheflush_h_tmp_function(void *dummy)
25 flush_cache_all_local();
28 static inline void flush_cache_all(void)
30 on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
33 #define flush_cache_vmap(start, end) flush_cache_all()
34 #define flush_cache_vunmap(start, end) flush_cache_all()
36 extern int parisc_cache_flush_threshold;
37 void parisc_setup_cache_timing(void);
40 flush_user_dcache_range(unsigned long start, unsigned long end)
42 if ((end - start) < parisc_cache_flush_threshold)
43 flush_user_dcache_range_asm(start,end);
49 flush_user_icache_range(unsigned long start, unsigned long end)
51 if ((end - start) < parisc_cache_flush_threshold)
52 flush_user_icache_range_asm(start,end);
54 flush_instruction_cache();
57 extern void flush_dcache_page(struct page *page);
59 #define flush_dcache_mmap_lock(mapping) \
60 write_lock_irq(&(mapping)->tree_lock)
61 #define flush_dcache_mmap_unlock(mapping) \
62 write_unlock_irq(&(mapping)->tree_lock)
64 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
66 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
68 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
70 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
71 memcpy(dst, src, len); \
72 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
75 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
77 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
78 memcpy(dst, src, len); \
81 static inline void flush_cache_range(struct vm_area_struct *vma,
82 unsigned long start, unsigned long end)
86 if (!vma->vm_mm->context) {
92 if (vma->vm_mm->context == sr3) {
93 flush_user_dcache_range(start,end);
94 flush_user_icache_range(start,end);
100 /* Simple function to work out if we have an existing address translation
101 * for a user space vma. */
102 static inline int translation_exists(struct vm_area_struct *vma,
103 unsigned long addr, unsigned long pfn)
105 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
112 pmd = pmd_offset(pgd, addr);
113 if(pmd_none(*pmd) || pmd_bad(*pmd))
116 /* We cannot take the pte lock here: flush_cache_page is usually
117 * called with pte lock already held. Whereas flush_dcache_page
118 * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
119 * the vma itself is secure, but the pte might come or go racily.
121 pte = *pte_offset_map(pmd, addr);
122 /* But pte_unmap() does nothing on this architecture */
124 /* Filter out coincidental file entries and swap entries */
125 if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
128 return pte_pfn(pte) == pfn;
131 /* Private function to flush a page from the cache of a non-current
132 * process. cr25 contains the Page Directory of the current user
133 * process; we're going to hijack both it and the user space %sr3 to
134 * temporarily make the non-current process current. We have to do
135 * this because cache flushing may cause a non-access tlb miss which
136 * the handlers have to fill in from the pgd of the non-current
139 flush_user_cache_page_non_current(struct vm_area_struct *vma,
140 unsigned long vmaddr)
142 /* save the current process space and pgd */
143 unsigned long space = mfsp(3), pgd = mfctl(25);
145 /* we don't mind taking interrups since they may not
146 * do anything with user space, but we can't
147 * be preempted here */
150 /* make us current */
151 mtctl(__pa(vma->vm_mm->pgd), 25);
152 mtsp(vma->vm_mm->context, 3);
154 flush_user_dcache_page(vmaddr);
155 if(vma->vm_flags & VM_EXEC)
156 flush_user_icache_page(vmaddr);
158 /* put the old current process back */
165 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
167 if (likely(vma->vm_mm->context == mfsp(3))) {
168 flush_user_dcache_page(vmaddr);
169 if (vma->vm_flags & VM_EXEC)
170 flush_user_icache_page(vmaddr);
172 flush_user_cache_page_non_current(vma, vmaddr);
177 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
179 BUG_ON(!vma->vm_mm->context);
181 if (likely(translation_exists(vma, vmaddr, pfn)))
182 __flush_cache_page(vma, vmaddr);
187 flush_anon_page(struct page *page, unsigned long vmaddr)
190 flush_user_dcache_page(vmaddr);
192 #define ARCH_HAS_FLUSH_ANON_PAGE
194 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
195 void flush_kernel_dcache_page_addr(void *addr);
196 static inline void flush_kernel_dcache_page(struct page *page)
198 flush_kernel_dcache_page_addr(page_address(page));
201 #ifdef CONFIG_DEBUG_RODATA
202 void mark_rodata_ro(void);
206 /* Only pa8800, pa8900 needs this */
207 #define ARCH_HAS_KMAP
209 void kunmap_parisc(void *addr);
211 static inline void *kmap(struct page *page)
214 return page_address(page);
217 #define kunmap(page) kunmap_parisc(page_address(page))
219 #define kmap_atomic(page, idx) page_address(page)
221 #define kunmap_atomic(addr, idx) kunmap_parisc(addr)
223 #define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
224 #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
227 #endif /* _PARISC_CACHEFLUSH_H */