Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
[linux-2.6] / include / asm-parisc / cacheflush.h
1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
3
4 #include <linux/mm.h>
5 #include <asm/cache.h>  /* for flush_user_dcache_range_asm() proto */
6
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8  * Unfortunately, that doesn't apply to PA-RISC. */
9
10 /* Cache flush operations */
11
12 #ifdef CONFIG_SMP
13 #define flush_cache_mm(mm) flush_cache_all()
14 #else
15 #define flush_cache_mm(mm) flush_cache_all_local()
16 #endif
17
18 #define flush_kernel_dcache_range(start,size) \
19         flush_kernel_dcache_range_asm((start), (start)+(size));
20
21 extern void flush_cache_all_local(void);
22
23 static inline void cacheflush_h_tmp_function(void *dummy)
24 {
25         flush_cache_all_local();
26 }
27
28 static inline void flush_cache_all(void)
29 {
30         on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
31 }
32
33 #define flush_cache_vmap(start, end)            flush_cache_all()
34 #define flush_cache_vunmap(start, end)          flush_cache_all()
35
36 extern int parisc_cache_flush_threshold;
37 void parisc_setup_cache_timing(void);
38
39 static inline void
40 flush_user_dcache_range(unsigned long start, unsigned long end)
41 {
42         if ((end - start) < parisc_cache_flush_threshold)
43                 flush_user_dcache_range_asm(start,end);
44         else
45                 flush_data_cache();
46 }
47
48 static inline void
49 flush_user_icache_range(unsigned long start, unsigned long end)
50 {
51         if ((end - start) < parisc_cache_flush_threshold)
52                 flush_user_icache_range_asm(start,end);
53         else
54                 flush_instruction_cache();
55 }
56
57 extern void flush_dcache_page(struct page *page);
58
59 #define flush_dcache_mmap_lock(mapping) \
60         write_lock_irq(&(mapping)->tree_lock)
61 #define flush_dcache_mmap_unlock(mapping) \
62         write_unlock_irq(&(mapping)->tree_lock)
63
64 #define flush_icache_page(vma,page)     do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
65
66 #define flush_icache_range(s,e)         do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
67
68 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
69 do { \
70         flush_cache_page(vma, vaddr, page_to_pfn(page)); \
71         memcpy(dst, src, len); \
72         flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
73 } while (0)
74
75 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
76 do { \
77         flush_cache_page(vma, vaddr, page_to_pfn(page)); \
78         memcpy(dst, src, len); \
79 } while (0)
80
81 static inline void flush_cache_range(struct vm_area_struct *vma,
82                 unsigned long start, unsigned long end)
83 {
84         int sr3;
85
86         if (!vma->vm_mm->context) {
87                 BUG();
88                 return;
89         }
90
91         sr3 = mfsp(3);
92         if (vma->vm_mm->context == sr3) {
93                 flush_user_dcache_range(start,end);
94                 flush_user_icache_range(start,end);
95         } else {
96                 flush_cache_all();
97         }
98 }
99
100 /* Simple function to work out if we have an existing address translation
101  * for a user space vma. */
102 static inline int translation_exists(struct vm_area_struct *vma,
103                                 unsigned long addr, unsigned long pfn)
104 {
105         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
106         pmd_t *pmd;
107         pte_t pte;
108
109         if(pgd_none(*pgd))
110                 return 0;
111
112         pmd = pmd_offset(pgd, addr);
113         if(pmd_none(*pmd) || pmd_bad(*pmd))
114                 return 0;
115
116         /* We cannot take the pte lock here: flush_cache_page is usually
117          * called with pte lock already held.  Whereas flush_dcache_page
118          * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
119          * the vma itself is secure, but the pte might come or go racily.
120          */
121         pte = *pte_offset_map(pmd, addr);
122         /* But pte_unmap() does nothing on this architecture */
123
124         /* Filter out coincidental file entries and swap entries */
125         if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
126                 return 0;
127
128         return pte_pfn(pte) == pfn;
129 }
130
131 /* Private function to flush a page from the cache of a non-current
132  * process.  cr25 contains the Page Directory of the current user
133  * process; we're going to hijack both it and the user space %sr3 to
134  * temporarily make the non-current process current.  We have to do
135  * this because cache flushing may cause a non-access tlb miss which
136  * the handlers have to fill in from the pgd of the non-current
137  * process. */
138 static inline void
139 flush_user_cache_page_non_current(struct vm_area_struct *vma,
140                                   unsigned long vmaddr)
141 {
142         /* save the current process space and pgd */
143         unsigned long space = mfsp(3), pgd = mfctl(25);
144
145         /* we don't mind taking interrups since they may not
146          * do anything with user space, but we can't
147          * be preempted here */
148         preempt_disable();
149
150         /* make us current */
151         mtctl(__pa(vma->vm_mm->pgd), 25);
152         mtsp(vma->vm_mm->context, 3);
153
154         flush_user_dcache_page(vmaddr);
155         if(vma->vm_flags & VM_EXEC)
156                 flush_user_icache_page(vmaddr);
157
158         /* put the old current process back */
159         mtsp(space, 3);
160         mtctl(pgd, 25);
161         preempt_enable();
162 }
163
164 static inline void
165 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
166 {
167         if (likely(vma->vm_mm->context == mfsp(3))) {
168                 flush_user_dcache_page(vmaddr);
169                 if (vma->vm_flags & VM_EXEC)
170                         flush_user_icache_page(vmaddr);
171         } else {
172                 flush_user_cache_page_non_current(vma, vmaddr);
173         }
174 }
175
176 static inline void
177 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
178 {
179         BUG_ON(!vma->vm_mm->context);
180
181         if (likely(translation_exists(vma, vmaddr, pfn)))
182                 __flush_cache_page(vma, vmaddr);
183
184 }
185
186 static inline void
187 flush_anon_page(struct page *page, unsigned long vmaddr)
188 {
189         if (PageAnon(page))
190                 flush_user_dcache_page(vmaddr);
191 }
192 #define ARCH_HAS_FLUSH_ANON_PAGE
193
194 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
195 void flush_kernel_dcache_page_addr(void *addr);
196 static inline void flush_kernel_dcache_page(struct page *page)
197 {
198         flush_kernel_dcache_page_addr(page_address(page));
199 }
200
201 #ifdef CONFIG_DEBUG_RODATA
202 void mark_rodata_ro(void);
203 #endif
204
205 #ifdef CONFIG_PA8X00
206 /* Only pa8800, pa8900 needs this */
207 #define ARCH_HAS_KMAP
208
209 void kunmap_parisc(void *addr);
210
211 static inline void *kmap(struct page *page)
212 {
213         might_sleep();
214         return page_address(page);
215 }
216
217 #define kunmap(page)                    kunmap_parisc(page_address(page))
218
219 #define kmap_atomic(page, idx)          page_address(page)
220
221 #define kunmap_atomic(addr, idx)        kunmap_parisc(addr)
222
223 #define kmap_atomic_pfn(pfn, idx)       page_address(pfn_to_page(pfn))
224 #define kmap_atomic_to_page(ptr)        virt_to_page(ptr)
225 #endif
226
227 #endif /* _PARISC_CACHEFLUSH_H */
228