kdump: make elfcorehdr_addr independent of CONFIG_PROC_VMCORE
[linux-2.6] / include / asm-xtensa / cacheflush.h
1 /*
2  * include/asm-xtensa/cacheflush.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * (C) 2001 - 2007 Tensilica Inc.
9  */
10
11 #ifndef _XTENSA_CACHEFLUSH_H
12 #define _XTENSA_CACHEFLUSH_H
13
14 #ifdef __KERNEL__
15
16 #include <linux/mm.h>
17 #include <asm/processor.h>
18 #include <asm/page.h>
19
20 /*
21  * Lo-level routines for cache flushing.
22  *
23  * invalidate data or instruction cache:
24  *
25  * __invalidate_icache_all()
26  * __invalidate_icache_page(adr)
27  * __invalidate_dcache_page(adr)
28  * __invalidate_icache_range(from,size)
29  * __invalidate_dcache_range(from,size)
30  *
31  * flush data cache:
32  *
33  * __flush_dcache_page(adr)
34  *
35  * flush and invalidate data cache:
36  *
37  * __flush_invalidate_dcache_all()
38  * __flush_invalidate_dcache_page(adr)
39  * __flush_invalidate_dcache_range(from,size)
40  *
41  * specials for cache aliasing:
42  *
43  * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44  * __invalidate_icache_page_alias(vaddr,paddr)
45  */
46
47 extern void __invalidate_dcache_all(void);
48 extern void __invalidate_icache_all(void);
49 extern void __invalidate_dcache_page(unsigned long);
50 extern void __invalidate_icache_page(unsigned long);
51 extern void __invalidate_icache_range(unsigned long, unsigned long);
52 extern void __invalidate_dcache_range(unsigned long, unsigned long);
53
54
55 #if XCHAL_DCACHE_IS_WRITEBACK
56 extern void __flush_invalidate_dcache_all(void);
57 extern void __flush_dcache_page(unsigned long);
58 extern void __flush_dcache_range(unsigned long, unsigned long);
59 extern void __flush_invalidate_dcache_page(unsigned long);
60 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61 #else
62 # define __flush_dcache_range(p,s)              do { } while(0)
63 # define __flush_dcache_page(p)                 do { } while(0)
64 # define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
65 # define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
66 #endif
67
68 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
69 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70 #endif
71 #if (ICACHE_WAY_SIZE > PAGE_SIZE)
72 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
73 #else
74 # define __invalidate_icache_page_alias(v,p)    do { } while(0)
75 #endif
76
77 /*
78  * We have physically tagged caches - nothing to do here -
79  * unless we have cache aliasing.
80  *
81  * Pages can get remapped. Because this might change the 'color' of that page,
82  * we have to flush the cache before the PTE is changed.
83  * (see also Documentation/cachetlb.txt)
84  */
85
86 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
87
88 #define flush_cache_all()                                               \
89         do {                                                            \
90                 __flush_invalidate_dcache_all();                        \
91                 __invalidate_icache_all();                              \
92         } while (0)
93
94 #define flush_cache_mm(mm)              flush_cache_all()
95 #define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
96
97 #define flush_cache_vmap(start,end)     flush_cache_all()
98 #define flush_cache_vunmap(start,end)   flush_cache_all()
99
100 extern void flush_dcache_page(struct page*);
101 extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
102 extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
103
104 #else
105
106 #define flush_cache_all()                               do { } while (0)
107 #define flush_cache_mm(mm)                              do { } while (0)
108 #define flush_cache_dup_mm(mm)                          do { } while (0)
109
110 #define flush_cache_vmap(start,end)                     do { } while (0)
111 #define flush_cache_vunmap(start,end)                   do { } while (0)
112
113 #define flush_dcache_page(page)                         do { } while (0)
114
115 #define flush_cache_page(vma,addr,pfn)                  do { } while (0)
116 #define flush_cache_range(vma,start,end)                do { } while (0)
117
118 #endif
119
120 /* Ensure consistency between data and instruction cache. */
121 #define flush_icache_range(start,end)                                   \
122         do {                                                            \
123                 __flush_dcache_range(start, (end) - (start));           \
124                 __invalidate_icache_range(start,(end) - (start));       \
125         } while (0)
126
127 /* This is not required, see Documentation/cachetlb.txt */
128 #define flush_icache_page(vma,page)                     do { } while (0)
129
130 #define flush_dcache_mmap_lock(mapping)                 do { } while (0)
131 #define flush_dcache_mmap_unlock(mapping)               do { } while (0)
132
133 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
134
135 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
136                 unsigned long, void*, const void*, unsigned long);
137 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
138                 unsigned long, void*, const void*, unsigned long);
139
140 #else
141
142 #define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
143         do {                                                            \
144                 memcpy(dst, src, len);                                  \
145                 __flush_dcache_range((unsigned long) dst, len);         \
146                 __invalidate_icache_range((unsigned long) dst, len);    \
147         } while (0)
148
149 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
150         memcpy(dst, src, len)
151
152 #endif
153
154 #endif /* __KERNEL__ */
155 #endif /* _XTENSA_CACHEFLUSH_H */