block: add helpers to run flush_dcache_page() against a bio and a request's pages
[linux-2.6.git] / arch / xtensa / include / asm / cacheflush.h
1 /*
2  * include/asm-xtensa/cacheflush.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * (C) 2001 - 2007 Tensilica Inc.
9  */
10
11 #ifndef _XTENSA_CACHEFLUSH_H
12 #define _XTENSA_CACHEFLUSH_H
13
14 #ifdef __KERNEL__
15
16 #include <linux/mm.h>
17 #include <asm/processor.h>
18 #include <asm/page.h>
19
20 /*
21  * Lo-level routines for cache flushing.
22  *
23  * invalidate data or instruction cache:
24  *
25  * __invalidate_icache_all()
26  * __invalidate_icache_page(adr)
27  * __invalidate_dcache_page(adr)
28  * __invalidate_icache_range(from,size)
29  * __invalidate_dcache_range(from,size)
30  *
31  * flush data cache:
32  *
33  * __flush_dcache_page(adr)
34  *
35  * flush and invalidate data cache:
36  *
37  * __flush_invalidate_dcache_all()
38  * __flush_invalidate_dcache_page(adr)
39  * __flush_invalidate_dcache_range(from,size)
40  *
41  * specials for cache aliasing:
42  *
43  * __flush_invalidate_dcache_page_alias(vaddr,paddr)
44  * __invalidate_icache_page_alias(vaddr,paddr)
45  */
46
47 extern void __invalidate_dcache_all(void);
48 extern void __invalidate_icache_all(void);
49 extern void __invalidate_dcache_page(unsigned long);
50 extern void __invalidate_icache_page(unsigned long);
51 extern void __invalidate_icache_range(unsigned long, unsigned long);
52 extern void __invalidate_dcache_range(unsigned long, unsigned long);
53
54
55 #if XCHAL_DCACHE_IS_WRITEBACK
56 extern void __flush_invalidate_dcache_all(void);
57 extern void __flush_dcache_page(unsigned long);
58 extern void __flush_dcache_range(unsigned long, unsigned long);
59 extern void __flush_invalidate_dcache_page(unsigned long);
60 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
61 #else
62 # define __flush_dcache_range(p,s)              do { } while(0)
63 # define __flush_dcache_page(p)                 do { } while(0)
64 # define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
65 # define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
66 #endif
67
68 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
69 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
70 #else
71 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
72                                                         unsigned long phys) { }
73 #endif
74 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
75 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
76 #else
77 static inline void __invalidate_icache_page_alias(unsigned long virt,
78                                                 unsigned long phys) { }
79 #endif
80
81 /*
82  * We have physically tagged caches - nothing to do here -
83  * unless we have cache aliasing.
84  *
85  * Pages can get remapped. Because this might change the 'color' of that page,
86  * we have to flush the cache before the PTE is changed.
87  * (see also Documentation/cachetlb.txt)
88  */
89
90 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
91
92 #define flush_cache_all()                                               \
93         do {                                                            \
94                 __flush_invalidate_dcache_all();                        \
95                 __invalidate_icache_all();                              \
96         } while (0)
97
98 #define flush_cache_mm(mm)              flush_cache_all()
99 #define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
100
101 #define flush_cache_vmap(start,end)     flush_cache_all()
102 #define flush_cache_vunmap(start,end)   flush_cache_all()
103
104 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
105 extern void flush_dcache_page(struct page*);
106 extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
107 extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
108
109 #else
110
111 #define flush_cache_all()                               do { } while (0)
112 #define flush_cache_mm(mm)                              do { } while (0)
113 #define flush_cache_dup_mm(mm)                          do { } while (0)
114
115 #define flush_cache_vmap(start,end)                     do { } while (0)
116 #define flush_cache_vunmap(start,end)                   do { } while (0)
117
118 #define flush_dcache_page(page)                         do { } while (0)
119
120 #define flush_cache_page(vma,addr,pfn)                  do { } while (0)
121 #define flush_cache_range(vma,start,end)                do { } while (0)
122
123 #endif
124
125 /* Ensure consistency between data and instruction cache. */
126 #define flush_icache_range(start,end)                                   \
127         do {                                                            \
128                 __flush_dcache_range(start, (end) - (start));           \
129                 __invalidate_icache_range(start,(end) - (start));       \
130         } while (0)
131
132 /* This is not required, see Documentation/cachetlb.txt */
133 #define flush_icache_page(vma,page)                     do { } while (0)
134
135 #define flush_dcache_mmap_lock(mapping)                 do { } while (0)
136 #define flush_dcache_mmap_unlock(mapping)               do { } while (0)
137
138 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
139
140 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
141                 unsigned long, void*, const void*, unsigned long);
142 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
143                 unsigned long, void*, const void*, unsigned long);
144
145 #else
146
147 #define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
148         do {                                                            \
149                 memcpy(dst, src, len);                                  \
150                 __flush_dcache_range((unsigned long) dst, len);         \
151                 __invalidate_icache_range((unsigned long) dst, len);    \
152         } while (0)
153
154 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
155         memcpy(dst, src, len)
156
157 #endif
158
159 #define XTENSA_CACHEBLK_LOG2    29
160 #define XTENSA_CACHEBLK_SIZE    (1 << XTENSA_CACHEBLK_LOG2)
161 #define XTENSA_CACHEBLK_MASK    (7 << XTENSA_CACHEBLK_LOG2)
162
163 #if XCHAL_HAVE_CACHEATTR
164 static inline u32 xtensa_get_cacheattr(void)
165 {
166         u32 r;
167         asm volatile("  rsr %0, CACHEATTR" : "=a"(r));
168         return r;
169 }
170
171 static inline u32 xtensa_get_dtlb1(u32 addr)
172 {
173         u32 r = addr & XTENSA_CACHEBLK_MASK;
174         return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
175                         & 0xF);
176 }
177 #else
178 static inline u32 xtensa_get_dtlb1(u32 addr)
179 {
180         u32 r;
181         asm volatile("  rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
182         asm volatile("  dsync");
183         return r;
184 }
185
186 static inline u32 xtensa_get_cacheattr(void)
187 {
188         u32 r = 0;
189         u32 a = 0;
190         do {
191                 a -= XTENSA_CACHEBLK_SIZE;
192                 r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
193         } while (a);
194         return r;
195 }
196 #endif
197
198 static inline int xtensa_need_flush_dma_source(u32 addr)
199 {
200         return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
201 }
202
203 static inline int xtensa_need_invalidate_dma_destination(u32 addr)
204 {
205         return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
206 }
207
208 static inline void flush_dcache_unaligned(u32 addr, u32 size)
209 {
210         u32 cnt;
211         if (size) {
212                 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
213                         + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
214                 while (cnt--) {
215                         asm volatile("  dhwb %0, 0" : : "a"(addr));
216                         addr += XCHAL_DCACHE_LINESIZE;
217                 }
218                 asm volatile("  dsync");
219         }
220 }
221
222 static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
223 {
224         int cnt;
225         if (size) {
226                 asm volatile("  dhwbi %0, 0 ;" : : "a"(addr));
227                 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
228                         - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
229                 while (cnt-- > 0) {
230                         asm volatile("  dhi %0, %1" : : "a"(addr),
231                                                 "n"(XCHAL_DCACHE_LINESIZE));
232                         addr += XCHAL_DCACHE_LINESIZE;
233                 }
234                 asm volatile("  dhwbi %0, %1" : : "a"(addr),
235                                                 "n"(XCHAL_DCACHE_LINESIZE));
236                 asm volatile("  dsync");
237         }
238 }
239
240 static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
241 {
242         u32 cnt;
243         if (size) {
244                 cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
245                         + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
246                 while (cnt--) {
247                         asm volatile("  dhwbi %0, 0" : : "a"(addr));
248                         addr += XCHAL_DCACHE_LINESIZE;
249                 }
250                 asm volatile("  dsync");
251         }
252 }
253
254 #endif /* __KERNEL__ */
255 #endif /* _XTENSA_CACHEFLUSH_H */