Blackfin arch: SMP supporting patchset: Blackfin CPLB related code
[linux-2.6.git] / arch / blackfin / kernel / cplb-mpu / cplbmgr.c
1 /*
2  *               Blackfin CPLB exception handling.
3  *               Copyright 2004-2007 Analog Devices Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see the file COPYING, or write
17  * to the Free Software Foundation, Inc.,
18  * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  */
20 #include <linux/module.h>
21 #include <linux/mm.h>
22
23 #include <asm/blackfin.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cplbinit.h>
26 #include <asm/mmu_context.h>
27
28 #define FAULT_RW        (1 << 16)
29 #define FAULT_USERSUPV  (1 << 17)
30
31 int page_mask_nelts;
32 int page_mask_order;
33 unsigned long *current_rwx_mask[NR_CPUS];
34
35 int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
36 int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
37 int nr_cplb_flush[NR_CPUS];
38
39 static inline void disable_dcplb(void)
40 {
41         unsigned long ctrl;
42         SSYNC();
43         ctrl = bfin_read_DMEM_CONTROL();
44         ctrl &= ~ENDCPLB;
45         bfin_write_DMEM_CONTROL(ctrl);
46         SSYNC();
47 }
48
49 static inline void enable_dcplb(void)
50 {
51         unsigned long ctrl;
52         SSYNC();
53         ctrl = bfin_read_DMEM_CONTROL();
54         ctrl |= ENDCPLB;
55         bfin_write_DMEM_CONTROL(ctrl);
56         SSYNC();
57 }
58
59 static inline void disable_icplb(void)
60 {
61         unsigned long ctrl;
62         SSYNC();
63         ctrl = bfin_read_IMEM_CONTROL();
64         ctrl &= ~ENICPLB;
65         bfin_write_IMEM_CONTROL(ctrl);
66         SSYNC();
67 }
68
69 static inline void enable_icplb(void)
70 {
71         unsigned long ctrl;
72         SSYNC();
73         ctrl = bfin_read_IMEM_CONTROL();
74         ctrl |= ENICPLB;
75         bfin_write_IMEM_CONTROL(ctrl);
76         SSYNC();
77 }
78
79 /*
80  * Given the contents of the status register, return the index of the
81  * CPLB that caused the fault.
82  */
83 static inline int faulting_cplb_index(int status)
84 {
85         int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
86         return 30 - signbits;
87 }
88
89 /*
90  * Given the contents of the status register and the DCPLB_DATA contents,
91  * return true if a write access should be permitted.
92  */
93 static inline int write_permitted(int status, unsigned long data)
94 {
95         if (status & FAULT_USERSUPV)
96                 return !!(data & CPLB_SUPV_WR);
97         else
98                 return !!(data & CPLB_USER_WR);
99 }
100
101 /* Counters to implement round-robin replacement.  */
102 static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
103
104 /*
105  * Find an ICPLB entry to be evicted and return its index.
106  */
107 static int evict_one_icplb(unsigned int cpu)
108 {
109         int i;
110         for (i = first_switched_icplb; i < MAX_CPLBS; i++)
111                 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
112                         return i;
113         i = first_switched_icplb + icplb_rr_index[cpu];
114         if (i >= MAX_CPLBS) {
115                 i -= MAX_CPLBS - first_switched_icplb;
116                 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
117         }
118         icplb_rr_index[cpu]++;
119         return i;
120 }
121
122 static int evict_one_dcplb(unsigned int cpu)
123 {
124         int i;
125         for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
126                 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
127                         return i;
128         i = first_switched_dcplb + dcplb_rr_index[cpu];
129         if (i >= MAX_CPLBS) {
130                 i -= MAX_CPLBS - first_switched_dcplb;
131                 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
132         }
133         dcplb_rr_index[cpu]++;
134         return i;
135 }
136
137 static noinline int dcplb_miss(unsigned int cpu)
138 {
139         unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
140         int status = bfin_read_DCPLB_STATUS();
141         unsigned long *mask;
142         int idx;
143         unsigned long d_data;
144
145         nr_dcplb_miss[cpu]++;
146
147         d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
148 #ifdef CONFIG_BFIN_DCACHE
149         if (bfin_addr_dcachable(addr)) {
150                 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
151 #ifdef CONFIG_BFIN_WT
152                 d_data |= CPLB_L1_AOW | CPLB_WT;
153 #endif
154         }
155 #endif
156         if (addr >= physical_mem_end) {
157                 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
158                     && (status & FAULT_USERSUPV)) {
159                         addr &= ~0x3fffff;
160                         d_data &= ~PAGE_SIZE_4KB;
161                         d_data |= PAGE_SIZE_4MB;
162                 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
163                     && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
164                         addr &= ~(1 * 1024 * 1024 - 1);
165                         d_data &= ~PAGE_SIZE_4KB;
166                         d_data |= PAGE_SIZE_1MB;
167                 } else
168                         return CPLB_PROT_VIOL;
169         } else if (addr >= _ramend) {
170             d_data |= CPLB_USER_RD | CPLB_USER_WR;
171         } else {
172                 mask = current_rwx_mask[cpu];
173                 if (mask) {
174                         int page = addr >> PAGE_SHIFT;
175                         int idx = page >> 5;
176                         int bit = 1 << (page & 31);
177
178                         if (mask[idx] & bit)
179                                 d_data |= CPLB_USER_RD;
180
181                         mask += page_mask_nelts;
182                         if (mask[idx] & bit)
183                                 d_data |= CPLB_USER_WR;
184                 }
185         }
186         idx = evict_one_dcplb(cpu);
187
188         addr &= PAGE_MASK;
189         dcplb_tbl[cpu][idx].addr = addr;
190         dcplb_tbl[cpu][idx].data = d_data;
191
192         disable_dcplb();
193         bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
194         bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
195         enable_dcplb();
196
197         return 0;
198 }
199
200 static noinline int icplb_miss(unsigned int cpu)
201 {
202         unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
203         int status = bfin_read_ICPLB_STATUS();
204         int idx;
205         unsigned long i_data;
206
207         nr_icplb_miss[cpu]++;
208
209         /* If inside the uncached DMA region, fault.  */
210         if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
211                 return CPLB_PROT_VIOL;
212
213         if (status & FAULT_USERSUPV)
214                 nr_icplb_supv_miss[cpu]++;
215
216         /*
217          * First, try to find a CPLB that matches this address.  If we
218          * find one, then the fact that we're in the miss handler means
219          * that the instruction crosses a page boundary.
220          */
221         for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
222                 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
223                         unsigned long this_addr = icplb_tbl[cpu][idx].addr;
224                         if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
225                                 addr += PAGE_SIZE;
226                                 break;
227                         }
228                 }
229         }
230
231         i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
232
233 #ifdef CONFIG_BFIN_ICACHE
234         /*
235          * Normal RAM, and possibly the reserved memory area, are
236          * cacheable.
237          */
238         if (addr < _ramend ||
239             (addr < physical_mem_end && reserved_mem_icache_on))
240                 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
241 #endif
242
243         if (addr >= physical_mem_end) {
244                 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
245                     && (status & FAULT_USERSUPV)) {
246                         addr &= ~(1 * 1024 * 1024 - 1);
247                         i_data &= ~PAGE_SIZE_4KB;
248                         i_data |= PAGE_SIZE_1MB;
249                 } else
250                     return CPLB_PROT_VIOL;
251         } else if (addr >= _ramend) {
252                 i_data |= CPLB_USER_RD;
253         } else {
254                 /*
255                  * Two cases to distinguish - a supervisor access must
256                  * necessarily be for a module page; we grant it
257                  * unconditionally (could do better here in the future).
258                  * Otherwise, check the x bitmap of the current process.
259                  */
260                 if (!(status & FAULT_USERSUPV)) {
261                         unsigned long *mask = current_rwx_mask[cpu];
262
263                         if (mask) {
264                                 int page = addr >> PAGE_SHIFT;
265                                 int idx = page >> 5;
266                                 int bit = 1 << (page & 31);
267
268                                 mask += 2 * page_mask_nelts;
269                                 if (mask[idx] & bit)
270                                         i_data |= CPLB_USER_RD;
271                         }
272                 }
273         }
274         idx = evict_one_icplb(cpu);
275         addr &= PAGE_MASK;
276         icplb_tbl[cpu][idx].addr = addr;
277         icplb_tbl[cpu][idx].data = i_data;
278
279         disable_icplb();
280         bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
281         bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
282         enable_icplb();
283
284         return 0;
285 }
286
287 static noinline int dcplb_protection_fault(unsigned int cpu)
288 {
289         int status = bfin_read_DCPLB_STATUS();
290
291         nr_dcplb_prot[cpu]++;
292
293         if (status & FAULT_RW) {
294                 int idx = faulting_cplb_index(status);
295                 unsigned long data = dcplb_tbl[cpu][idx].data;
296                 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
297                     write_permitted(status, data)) {
298                         data |= CPLB_DIRTY;
299                         dcplb_tbl[cpu][idx].data = data;
300                         bfin_write32(DCPLB_DATA0 + idx * 4, data);
301                         return 0;
302                 }
303         }
304         return CPLB_PROT_VIOL;
305 }
306
307 int cplb_hdr(int seqstat, struct pt_regs *regs)
308 {
309         int cause = seqstat & 0x3f;
310         unsigned int cpu = smp_processor_id();
311         switch (cause) {
312         case 0x23:
313                 return dcplb_protection_fault(cpu);
314         case 0x2C:
315                 return icplb_miss(cpu);
316         case 0x26:
317                 return dcplb_miss(cpu);
318         default:
319                 return 1;
320         }
321 }
322
323 void flush_switched_cplbs(unsigned int cpu)
324 {
325         int i;
326         unsigned long flags;
327
328         nr_cplb_flush[cpu]++;
329
330         local_irq_save(flags);
331         disable_icplb();
332         for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
333                 icplb_tbl[cpu][i].data = 0;
334                 bfin_write32(ICPLB_DATA0 + i * 4, 0);
335         }
336         enable_icplb();
337
338         disable_dcplb();
339         for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
340                 dcplb_tbl[cpu][i].data = 0;
341                 bfin_write32(DCPLB_DATA0 + i * 4, 0);
342         }
343         enable_dcplb();
344         local_irq_restore(flags);
345
346 }
347
348 void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
349 {
350         int i;
351         unsigned long addr = (unsigned long)masks;
352         unsigned long d_data;
353         unsigned long flags;
354
355         if (!masks) {
356                 current_rwx_mask[cpu] = masks;
357                 return;
358         }
359
360         local_irq_save(flags);
361         current_rwx_mask[cpu] = masks;
362
363         d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
364 #ifdef CONFIG_BFIN_DCACHE
365         d_data |= CPLB_L1_CHBL;
366 #ifdef CONFIG_BFIN_WT
367         d_data |= CPLB_L1_AOW | CPLB_WT;
368 #endif
369 #endif
370
371         disable_dcplb();
372         for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
373                 dcplb_tbl[cpu][i].addr = addr;
374                 dcplb_tbl[cpu][i].data = d_data;
375                 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
376                 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
377                 addr += PAGE_SIZE;
378         }
379         enable_dcplb();
380         local_irq_restore(flags);
381 }