2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001, 2002, 2003, 2004, 2005 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <asm/addrspace.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/cache.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/mmu_context.h>
26 #include <asm/cacheflush.h>
28 extern void __flush_cache_4096(unsigned long addr, unsigned long phys,
29 unsigned long exec_offset);
30 extern void __flush_cache_4096_all(unsigned long start);
31 static void __flush_cache_4096_all_ex(unsigned long start);
32 extern void __flush_dcache_all(void);
33 static void __flush_dcache_all_ex(void);
36 * SH-4 has virtually indexed and physically tagged cache.
39 struct semaphore p3map_sem[4];
41 void __init p3_cache_init(void)
43 if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
44 panic("%s failed.", __FUNCTION__);
46 sema_init (&p3map_sem[0], 1);
47 sema_init (&p3map_sem[1], 1);
48 sema_init (&p3map_sem[2], 1);
49 sema_init (&p3map_sem[3], 1);
53 * Write back the dirty D-caches, but not invalidate them.
55 * START: Virtual Address (U0, P1, or P3)
56 * SIZE: Size of the region.
58 void __flush_wback_region(void *start, int size)
61 unsigned long begin, end;
63 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
64 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
65 & ~(L1_CACHE_BYTES-1);
66 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
67 asm volatile("ocbwb %0"
74 * Write back the dirty D-caches and invalidate them.
76 * START: Virtual Address (U0, P1, or P3)
77 * SIZE: Size of the region.
79 void __flush_purge_region(void *start, int size)
82 unsigned long begin, end;
84 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
85 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
86 & ~(L1_CACHE_BYTES-1);
87 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
88 asm volatile("ocbp %0"
96 * No write back please
98 void __flush_invalidate_region(void *start, int size)
101 unsigned long begin, end;
103 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
104 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
105 & ~(L1_CACHE_BYTES-1);
106 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
107 asm volatile("ocbi %0"
113 static void __flush_dcache_all_ex(void)
115 unsigned long addr, end_addr, entry_offset;
117 end_addr = CACHE_OC_ADDRESS_ARRAY +
118 (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) *
119 cpu_data->dcache.ways;
121 entry_offset = 1 << cpu_data->dcache.entry_shift;
122 for (addr = CACHE_OC_ADDRESS_ARRAY;
124 addr += entry_offset) {
129 static void __flush_cache_4096_all_ex(unsigned long start)
131 unsigned long addr, entry_offset;
134 entry_offset = 1 << cpu_data->dcache.entry_shift;
135 for (i = 0; i < cpu_data->dcache.ways;
136 i++, start += cpu_data->dcache.way_incr) {
137 for (addr = CACHE_OC_ADDRESS_ARRAY + start;
138 addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start;
139 addr += entry_offset) {
145 void flush_cache_4096_all(unsigned long start)
147 if (cpu_data->dcache.ways == 1)
148 __flush_cache_4096_all(start);
150 __flush_cache_4096_all_ex(start);
154 * Write back the range of D-cache, and purge the I-cache.
156 * Called from kernel/module.c:sys_init_module and routine for a.out format.
158 void flush_icache_range(unsigned long start, unsigned long end)
164 * Write back the D-cache and purge the I-cache for signal trampoline.
165 * .. which happens to be the same behavior as flush_icache_range().
166 * So, we simply flush out a line.
168 void flush_cache_sigtramp(unsigned long addr)
170 unsigned long v, index;
174 v = addr & ~(L1_CACHE_BYTES-1);
175 asm volatile("ocbwb %0"
179 index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
181 local_irq_save(flags);
183 for (i = 0; i < cpu_data->icache.ways;
184 i++, index += cpu_data->icache.way_incr)
185 ctrl_outl(0, index); /* Clear out Valid-bit */
188 local_irq_restore(flags);
191 static inline void flush_cache_4096(unsigned long start,
197 * SH7751, SH7751R, and ST40 have no restriction to handle cache.
198 * (While SH7750 must do that at P2 area.)
200 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
201 || start < CACHE_OC_ADDRESS_ARRAY) {
202 local_irq_save(flags);
203 __flush_cache_4096(start | SH_CACHE_ASSOC,
204 P1SEGADDR(phys), 0x20000000);
205 local_irq_restore(flags);
207 __flush_cache_4096(start | SH_CACHE_ASSOC,
213 * Write back & invalidate the D-cache of the page.
214 * (To avoid "alias" issues)
216 void flush_dcache_page(struct page *page)
218 if (test_bit(PG_mapped, &page->flags)) {
219 unsigned long phys = PHYSADDR(page_address(page));
221 /* Loop all the D-cache */
222 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);
223 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
224 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
225 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
231 static inline void flush_icache_all(void)
233 unsigned long flags, ccr;
235 local_irq_save(flags);
240 ccr |= CCR_CACHE_ICI;
244 local_irq_restore(flags);
247 void flush_dcache_all(void)
249 if (cpu_data->dcache.ways == 1)
250 __flush_dcache_all();
252 __flush_dcache_all_ex();
256 void flush_cache_all(void)
262 void flush_cache_mm(struct mm_struct *mm)
268 * Write back and invalidate I/D-caches for the page.
270 * ADDR: Virtual Address (U0 address)
271 * PFN: Physical page number
273 void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
275 unsigned long phys = pfn << PAGE_SHIFT;
277 /* We only need to flush D-cache when we have alias */
278 if ((address^phys) & CACHE_ALIAS) {
279 /* Loop 4K of the D-cache */
281 CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
283 /* Loop another 4K of the D-cache */
285 CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
289 if (vma->vm_flags & VM_EXEC)
290 /* Loop 4K (half) of the I-cache */
292 CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
297 * Write back and invalidate D-caches.
299 * START, END: Virtual Address (U0 address)
301 * NOTE: We need to flush the _physical_ page entry.
302 * Flushing the cache lines for U0 only isn't enough.
303 * We need to flush for P1 too, which may contain aliases.
305 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
308 unsigned long p = start & PAGE_MASK;
318 * Don't bother with the lookup and alias check if we have a
319 * wide range to cover, just blow away the dcache in its
320 * entirety instead. -- PFM.
322 if (((end - start) >> PAGE_SHIFT) >= 64) {
325 if (vma->vm_flags & VM_EXEC)
331 dir = pgd_offset(vma->vm_mm, p);
332 pud = pud_offset(dir, p);
333 pmd = pmd_offset(pud, p);
334 end = PAGE_ALIGN(end);
337 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
338 p &= ~((1 << PMD_SHIFT) -1);
339 p += (1 << PMD_SHIFT);
343 pte = pte_offset_kernel(pmd, p);
346 if ((pte_val(entry) & _PAGE_PRESENT)) {
347 phys = pte_val(entry)&PTE_PHYS_MASK;
348 if ((p^phys) & CACHE_ALIAS) {
349 d |= 1 << ((p & CACHE_ALIAS)>>12);
350 d |= 1 << ((phys & CACHE_ALIAS)>>12);
357 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
362 flush_cache_4096_all(0);
364 flush_cache_4096_all(0x1000);
366 flush_cache_4096_all(0x2000);
368 flush_cache_4096_all(0x3000);
369 if (vma->vm_flags & VM_EXEC)
374 * flush_icache_user_range
375 * @vma: VMA of the process
378 * @len: length of the range (< page size)
380 void flush_icache_user_range(struct vm_area_struct *vma,
381 struct page *page, unsigned long addr, int len)
383 flush_cache_page(vma, addr, page_to_pfn(page));