2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001, 2002, 2003, 2004, 2005 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <asm/addrspace.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/cache.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/mmu_context.h>
26 #include <asm/cacheflush.h>
28 extern void __flush_cache_4096(unsigned long addr, unsigned long phys,
29 unsigned long exec_offset);
30 extern void __flush_cache_4096_all(unsigned long start);
31 static void __flush_cache_4096_all_ex(unsigned long start);
32 extern void __flush_dcache_all(void);
33 static void __flush_dcache_all_ex(void);
36 * SH-4 has virtually indexed and physically tagged cache.
39 struct semaphore p3map_sem[4];
41 void __init p3_cache_init(void)
43 if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
44 panic("%s failed.", __FUNCTION__);
46 sema_init (&p3map_sem[0], 1);
47 sema_init (&p3map_sem[1], 1);
48 sema_init (&p3map_sem[2], 1);
49 sema_init (&p3map_sem[3], 1);
53 * Write back the dirty D-caches, but not invalidate them.
55 * START: Virtual Address (U0, P1, or P3)
56 * SIZE: Size of the region.
58 void __flush_wback_region(void *start, int size)
61 unsigned long begin, end;
63 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
64 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
65 & ~(L1_CACHE_BYTES-1);
66 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
67 asm volatile("ocbwb %0"
74 * Write back the dirty D-caches and invalidate them.
76 * START: Virtual Address (U0, P1, or P3)
77 * SIZE: Size of the region.
79 void __flush_purge_region(void *start, int size)
82 unsigned long begin, end;
84 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
85 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
86 & ~(L1_CACHE_BYTES-1);
87 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
88 asm volatile("ocbp %0"
96 * No write back please
98 void __flush_invalidate_region(void *start, int size)
101 unsigned long begin, end;
103 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
104 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
105 & ~(L1_CACHE_BYTES-1);
106 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
107 asm volatile("ocbi %0"
113 static void __flush_dcache_all_ex(void)
115 unsigned long addr, end_addr, entry_offset;
117 end_addr = CACHE_OC_ADDRESS_ARRAY +
118 (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) *
119 cpu_data->dcache.ways;
121 entry_offset = 1 << cpu_data->dcache.entry_shift;
122 for (addr = CACHE_OC_ADDRESS_ARRAY;
124 addr += entry_offset) {
129 static void __flush_cache_4096_all_ex(unsigned long start)
131 unsigned long addr, entry_offset;
134 entry_offset = 1 << cpu_data->dcache.entry_shift;
135 for (i = 0; i < cpu_data->dcache.ways;
136 i++, start += cpu_data->dcache.way_incr) {
137 for (addr = CACHE_OC_ADDRESS_ARRAY + start;
138 addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start;
139 addr += entry_offset) {
145 void flush_cache_4096_all(unsigned long start)
147 if (cpu_data->dcache.ways == 1)
148 __flush_cache_4096_all(start);
150 __flush_cache_4096_all_ex(start);
154 * Write back the range of D-cache, and purge the I-cache.
156 * Called from kernel/module.c:sys_init_module and routine for a.out format.
158 void flush_icache_range(unsigned long start, unsigned long end)
164 * Write back the D-cache and purge the I-cache for signal trampoline.
165 * .. which happens to be the same behavior as flush_icache_range().
166 * So, we simply flush out a line.
168 void flush_cache_sigtramp(unsigned long addr)
170 unsigned long v, index;
174 v = addr & ~(L1_CACHE_BYTES-1);
175 asm volatile("ocbwb %0"
179 index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
181 local_irq_save(flags);
183 for (i = 0; i < cpu_data->icache.ways;
184 i++, index += cpu_data->icache.way_incr)
185 ctrl_outl(0, index); /* Clear out Valid-bit */
187 local_irq_restore(flags);
190 static inline void flush_cache_4096(unsigned long start,
196 * SH7751, SH7751R, and ST40 have no restriction to handle cache.
197 * (While SH7750 must do that at P2 area.)
199 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
200 || start < CACHE_OC_ADDRESS_ARRAY) {
201 local_irq_save(flags);
202 __flush_cache_4096(start | SH_CACHE_ASSOC,
203 P1SEGADDR(phys), 0x20000000);
204 local_irq_restore(flags);
206 __flush_cache_4096(start | SH_CACHE_ASSOC,
212 * Write back & invalidate the D-cache of the page.
213 * (To avoid "alias" issues)
215 void flush_dcache_page(struct page *page)
217 if (test_bit(PG_mapped, &page->flags)) {
218 unsigned long phys = PHYSADDR(page_address(page));
220 /* Loop all the D-cache */
221 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);
222 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
223 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
224 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
228 static inline void flush_icache_all(void)
230 unsigned long flags, ccr;
232 local_irq_save(flags);
237 ccr |= CCR_CACHE_ICI;
241 local_irq_restore(flags);
244 void flush_dcache_all(void)
246 if (cpu_data->dcache.ways == 1)
247 __flush_dcache_all();
249 __flush_dcache_all_ex();
252 void flush_cache_all(void)
258 void flush_cache_mm(struct mm_struct *mm)
264 * Write back and invalidate I/D-caches for the page.
266 * ADDR: Virtual Address (U0 address)
267 * PFN: Physical page number
269 void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
271 unsigned long phys = pfn << PAGE_SHIFT;
273 /* We only need to flush D-cache when we have alias */
274 if ((address^phys) & CACHE_ALIAS) {
275 /* Loop 4K of the D-cache */
277 CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
279 /* Loop another 4K of the D-cache */
281 CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
285 if (vma->vm_flags & VM_EXEC)
286 /* Loop 4K (half) of the I-cache */
288 CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
293 * Write back and invalidate D-caches.
295 * START, END: Virtual Address (U0 address)
297 * NOTE: We need to flush the _physical_ page entry.
298 * Flushing the cache lines for U0 only isn't enough.
299 * We need to flush for P1 too, which may contain aliases.
301 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
304 unsigned long p = start & PAGE_MASK;
314 * Don't bother with the lookup and alias check if we have a
315 * wide range to cover, just blow away the dcache in its
316 * entirety instead. -- PFM.
318 if (((end - start) >> PAGE_SHIFT) >= 64) {
321 if (vma->vm_flags & VM_EXEC)
327 dir = pgd_offset(vma->vm_mm, p);
328 pud = pud_offset(dir, p);
329 pmd = pmd_offset(pud, p);
330 end = PAGE_ALIGN(end);
333 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
334 p &= ~((1 << PMD_SHIFT) -1);
335 p += (1 << PMD_SHIFT);
339 pte = pte_offset_kernel(pmd, p);
342 if ((pte_val(entry) & _PAGE_PRESENT)) {
343 phys = pte_val(entry)&PTE_PHYS_MASK;
344 if ((p^phys) & CACHE_ALIAS) {
345 d |= 1 << ((p & CACHE_ALIAS)>>12);
346 d |= 1 << ((phys & CACHE_ALIAS)>>12);
353 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
358 flush_cache_4096_all(0);
360 flush_cache_4096_all(0x1000);
362 flush_cache_4096_all(0x2000);
364 flush_cache_4096_all(0x3000);
365 if (vma->vm_flags & VM_EXEC)
370 * flush_icache_user_range
371 * @vma: VMA of the process
374 * @len: length of the range (< page size)
376 void flush_icache_user_range(struct vm_area_struct *vma,
377 struct page *page, unsigned long addr, int len)
379 flush_cache_page(vma, addr, page_to_pfn(page));