Blackfin: decouple unrelated cache settings to get exact behavior
[linux-2.6.git] / arch / blackfin / kernel / cplb-mpu / cplbmgr.c
1 /*
2  *               Blackfin CPLB exception handling.
3  *               Copyright 2004-2007 Analog Devices Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see the file COPYING, or write
17  * to the Free Software Foundation, Inc.,
18  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  */
20 #include <linux/module.h>
21 #include <linux/mm.h>
22
23 #include <asm/blackfin.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cplbinit.h>
26 #include <asm/mmu_context.h>
27
28 /*
29  * WARNING
30  *
31  * This file is compiled with certain -ffixed-reg options.  We have to
32  * make sure not to call any functions here that could clobber these
33  * registers.
34  */
35
36 int page_mask_nelts;
37 int page_mask_order;
38 unsigned long *current_rwx_mask[NR_CPUS];
39
40 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
41 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
42 int nr_cplb_flush[NR_CPUS];
43
44 static inline void disable_dcplb(void)
45 {
46         unsigned long ctrl;
47         SSYNC();
48         ctrl = bfin_read_DMEM_CONTROL();
49         ctrl &= ~ENDCPLB;
50         bfin_write_DMEM_CONTROL(ctrl);
51         SSYNC();
52 }
53
54 static inline void enable_dcplb(void)
55 {
56         unsigned long ctrl;
57         SSYNC();
58         ctrl = bfin_read_DMEM_CONTROL();
59         ctrl |= ENDCPLB;
60         bfin_write_DMEM_CONTROL(ctrl);
61         SSYNC();
62 }
63
64 static inline void disable_icplb(void)
65 {
66         unsigned long ctrl;
67         SSYNC();
68         ctrl = bfin_read_IMEM_CONTROL();
69         ctrl &= ~ENICPLB;
70         bfin_write_IMEM_CONTROL(ctrl);
71         SSYNC();
72 }
73
74 static inline void enable_icplb(void)
75 {
76         unsigned long ctrl;
77         SSYNC();
78         ctrl = bfin_read_IMEM_CONTROL();
79         ctrl |= ENICPLB;
80         bfin_write_IMEM_CONTROL(ctrl);
81         SSYNC();
82 }
83
84 /*
85  * Given the contents of the status register, return the index of the
86  * CPLB that caused the fault.
87  */
88 static inline int faulting_cplb_index(int status)
89 {
90         int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
91         return 30 - signbits;
92 }
93
94 /*
95  * Given the contents of the status register and the DCPLB_DATA contents,
96  * return true if a write access should be permitted.
97  */
98 static inline int write_permitted(int status, unsigned long data)
99 {
100         if (status & FAULT_USERSUPV)
101                 return !!(data & CPLB_SUPV_WR);
102         else
103                 return !!(data & CPLB_USER_WR);
104 }
105
106 /* Counters to implement round-robin replacement.  */
107 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
108
109 /*
110  * Find an ICPLB entry to be evicted and return its index.
111  */
112 static int evict_one_icplb(unsigned int cpu)
113 {
114         int i;
115         for (i = first_switched_icplb; i < MAX_CPLBS; i++)
116                 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
117                         return i;
118         i = first_switched_icplb + icplb_rr_index[cpu];
119         if (i >= MAX_CPLBS) {
120                 i -= MAX_CPLBS - first_switched_icplb;
121                 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
122         }
123         icplb_rr_index[cpu]++;
124         return i;
125 }
126
127 static int evict_one_dcplb(unsigned int cpu)
128 {
129         int i;
130         for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
131                 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
132                         return i;
133         i = first_switched_dcplb + dcplb_rr_index[cpu];
134         if (i >= MAX_CPLBS) {
135                 i -= MAX_CPLBS - first_switched_dcplb;
136                 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
137         }
138         dcplb_rr_index[cpu]++;
139         return i;
140 }
141
142 static noinline int dcplb_miss(unsigned int cpu)
143 {
144         unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
145         int status = bfin_read_DCPLB_STATUS();
146         unsigned long *mask;
147         int idx;
148         unsigned long d_data;
149
150         nr_dcplb_miss[cpu]++;
151
152         d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
153 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
154         if (bfin_addr_dcacheable(addr)) {
155                 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
156 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
157                 d_data |= CPLB_L1_AOW | CPLB_WT;
158 # endif
159         }
160 #endif
161
162         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
163                 addr = L2_START;
164                 d_data = L2_DMEMORY;
165         } else if (addr >= physical_mem_end) {
166                 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
167                     && (status & FAULT_USERSUPV)) {
168                         addr &= ~0x3fffff;
169                         d_data &= ~PAGE_SIZE_4KB;
170                         d_data |= PAGE_SIZE_4MB;
171                 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
172                     && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
173                         addr &= ~(1 * 1024 * 1024 - 1);
174                         d_data &= ~PAGE_SIZE_4KB;
175                         d_data |= PAGE_SIZE_1MB;
176                 } else
177                         return CPLB_PROT_VIOL;
178         } else if (addr >= _ramend) {
179             d_data |= CPLB_USER_RD | CPLB_USER_WR;
180         } else {
181                 mask = current_rwx_mask[cpu];
182                 if (mask) {
183                         int page = addr >> PAGE_SHIFT;
184                         int idx = page >> 5;
185                         int bit = 1 << (page & 31);
186
187                         if (mask[idx] & bit)
188                                 d_data |= CPLB_USER_RD;
189
190                         mask += page_mask_nelts;
191                         if (mask[idx] & bit)
192                                 d_data |= CPLB_USER_WR;
193                 }
194         }
195         idx = evict_one_dcplb(cpu);
196
197         addr &= PAGE_MASK;
198         dcplb_tbl[cpu][idx].addr = addr;
199         dcplb_tbl[cpu][idx].data = d_data;
200
201         disable_dcplb();
202         bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
203         bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
204         enable_dcplb();
205
206         return 0;
207 }
208
209 static noinline int icplb_miss(unsigned int cpu)
210 {
211         unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
212         int status = bfin_read_ICPLB_STATUS();
213         int idx;
214         unsigned long i_data;
215
216         nr_icplb_miss[cpu]++;
217
218         /* If inside the uncached DMA region, fault.  */
219         if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
220                 return CPLB_PROT_VIOL;
221
222         if (status & FAULT_USERSUPV)
223                 nr_icplb_supv_miss[cpu]++;
224
225         /*
226          * First, try to find a CPLB that matches this address.  If we
227          * find one, then the fact that we're in the miss handler means
228          * that the instruction crosses a page boundary.
229          */
230         for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
231                 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
232                         unsigned long this_addr = icplb_tbl[cpu][idx].addr;
233                         if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
234                                 addr += PAGE_SIZE;
235                                 break;
236                         }
237                 }
238         }
239
240         i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
241
242 #ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
243         /*
244          * Normal RAM, and possibly the reserved memory area, are
245          * cacheable.
246          */
247         if (addr < _ramend ||
248             (addr < physical_mem_end && reserved_mem_icache_on))
249                 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
250 #endif
251
252         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
253                 addr = L2_START;
254                 i_data = L2_IMEMORY;
255         } else if (addr >= physical_mem_end) {
256                 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
257                     && (status & FAULT_USERSUPV)) {
258                         addr &= ~(1 * 1024 * 1024 - 1);
259                         i_data &= ~PAGE_SIZE_4KB;
260                         i_data |= PAGE_SIZE_1MB;
261                 } else
262                     return CPLB_PROT_VIOL;
263         } else if (addr >= _ramend) {
264                 i_data |= CPLB_USER_RD;
265         } else {
266                 /*
267                  * Two cases to distinguish - a supervisor access must
268                  * necessarily be for a module page; we grant it
269                  * unconditionally (could do better here in the future).
270                  * Otherwise, check the x bitmap of the current process.
271                  */
272                 if (!(status & FAULT_USERSUPV)) {
273                         unsigned long *mask = current_rwx_mask[cpu];
274
275                         if (mask) {
276                                 int page = addr >> PAGE_SHIFT;
277                                 int idx = page >> 5;
278                                 int bit = 1 << (page & 31);
279
280                                 mask += 2 * page_mask_nelts;
281                                 if (mask[idx] & bit)
282                                         i_data |= CPLB_USER_RD;
283                         }
284                 }
285         }
286         idx = evict_one_icplb(cpu);
287         addr &= PAGE_MASK;
288         icplb_tbl[cpu][idx].addr = addr;
289         icplb_tbl[cpu][idx].data = i_data;
290
291         disable_icplb();
292         bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
293         bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
294         enable_icplb();
295
296         return 0;
297 }
298
299 static noinline int dcplb_protection_fault(unsigned int cpu)
300 {
301         int status = bfin_read_DCPLB_STATUS();
302
303         nr_dcplb_prot[cpu]++;
304
305         if (status & FAULT_RW) {
306                 int idx = faulting_cplb_index(status);
307                 unsigned long data = dcplb_tbl[cpu][idx].data;
308                 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
309                     write_permitted(status, data)) {
310                         data |= CPLB_DIRTY;
311                         dcplb_tbl[cpu][idx].data = data;
312                         bfin_write32(DCPLB_DATA0 + idx * 4, data);
313                         return 0;
314                 }
315         }
316         return CPLB_PROT_VIOL;
317 }
318
319 int cplb_hdr(int seqstat, struct pt_regs *regs)
320 {
321         int cause = seqstat & 0x3f;
322         unsigned int cpu = smp_processor_id();
323         switch (cause) {
324         case 0x23:
325                 return dcplb_protection_fault(cpu);
326         case 0x2C:
327                 return icplb_miss(cpu);
328         case 0x26:
329                 return dcplb_miss(cpu);
330         default:
331                 return 1;
332         }
333 }
334
335 void flush_switched_cplbs(unsigned int cpu)
336 {
337         int i;
338         unsigned long flags;
339
340         nr_cplb_flush[cpu]++;
341
342         local_irq_save_hw(flags);
343         disable_icplb();
344         for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
345                 icplb_tbl[cpu][i].data = 0;
346                 bfin_write32(ICPLB_DATA0 + i * 4, 0);
347         }
348         enable_icplb();
349
350         disable_dcplb();
351         for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
352                 dcplb_tbl[cpu][i].data = 0;
353                 bfin_write32(DCPLB_DATA0 + i * 4, 0);
354         }
355         enable_dcplb();
356         local_irq_restore_hw(flags);
357
358 }
359
360 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
361 {
362         int i;
363         unsigned long addr = (unsigned long)masks;
364         unsigned long d_data;
365         unsigned long flags;
366
367         if (!masks) {
368                 current_rwx_mask[cpu] = masks;
369                 return;
370         }
371
372         local_irq_save_hw(flags);
373         current_rwx_mask[cpu] = masks;
374
375         if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
376                 addr = L2_START;
377                 d_data = L2_DMEMORY;
378         } else {
379                 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
380 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
381                 d_data |= CPLB_L1_CHBL;
382 # ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
383                 d_data |= CPLB_L1_AOW | CPLB_WT;
384 # endif
385 #endif
386         }
387
388         disable_dcplb();
389         for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
390                 dcplb_tbl[cpu][i].addr = addr;
391                 dcplb_tbl[cpu][i].data = d_data;
392                 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
393                 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
394                 addr += PAGE_SIZE;
395         }
396         enable_dcplb();
397         local_irq_restore_hw(flags);
398 }