blob: 7efcb9c5217c91c27294d852d393dda715e8907b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110022#undef DEBUG_LOW
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080034#include <linux/lmb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/processor.h>
37#include <asm/pgtable.h>
38#include <asm/mmu.h>
39#include <asm/mmu_context.h>
40#include <asm/page.h>
41#include <asm/types.h>
42#include <asm/system.h>
43#include <asm/uaccess.h>
44#include <asm/machdep.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080045#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/abs_addr.h>
47#include <asm/tlbflush.h>
48#include <asm/io.h>
49#include <asm/eeh.h>
50#include <asm/tlb.h>
51#include <asm/cacheflush.h>
52#include <asm/cputable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/sections.h>
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100054#include <asm/spu.h>
will schmidtaa39be02007-10-30 06:24:19 +110055#include <asm/udbg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110063#ifdef DEBUG_LOW
64#define DBG_LOW(fmt...) udbg_printf(fmt)
65#else
66#define DBG_LOW(fmt...)
67#endif
68
69#define KB (1024)
70#define MB (1024*KB)
Jon Tollefson658013e2008-07-23 21:27:54 -070071#define GB (1024L*MB)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110072
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/*
74 * Note: pte --> Linux PTE
75 * HPTE --> PowerPC Hashed Page Table Entry
76 *
77 * Execution context:
78 * htab_initialize is called with the MMU off (of course), but
79 * the kernel has been copied down to zero so it can directly
80 * reference global data. At this point it is very difficult
81 * to print debug info.
82 *
83 */
84
85#ifdef CONFIG_U3_DART
86extern unsigned long dart_tablebase;
87#endif /* CONFIG_U3_DART */
88
Paul Mackerras799d6042005-11-10 13:37:51 +110089static unsigned long _SDR1;
90struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
91
David Gibson8e561e72007-06-13 14:52:56 +100092struct hash_pte *htab_address;
Michael Ellerman337a7122006-02-21 17:22:55 +110093unsigned long htab_size_bytes;
David Gibson96e28442005-07-13 01:11:42 -070094unsigned long htab_hash_mask;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110095int mmu_linear_psize = MMU_PAGE_4K;
96int mmu_virtual_psize = MMU_PAGE_4K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100097int mmu_vmalloc_psize = MMU_PAGE_4K;
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100098#ifdef CONFIG_SPARSEMEM_VMEMMAP
99int mmu_vmemmap_psize = MMU_PAGE_4K;
100#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000101int mmu_io_psize = MMU_PAGE_4K;
Paul Mackerras1189be62007-10-11 20:37:10 +1000102int mmu_kernel_ssize = MMU_SEGSIZE_256M;
103int mmu_highuser_ssize = MMU_SEGSIZE_256M;
Michael Neuling584f8b72007-12-06 17:24:48 +1100104u16 mmu_slb_size = 64;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100105#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100106unsigned int HPAGE_SHIFT;
107#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000108#ifdef CONFIG_PPC_64K_PAGES
109int mmu_ci_restrictions;
110#endif
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000111#ifdef CONFIG_DEBUG_PAGEALLOC
112static u8 *linear_map_hash_slots;
113static unsigned long linear_map_hash_count;
Michael Ellermaned166692007-04-18 11:50:09 +1000114static DEFINE_SPINLOCK(linear_map_hash_lock);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000115#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100117/* There are definitions of page sizes arrays to be used when none
118 * is provided by the firmware.
119 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100121/* Pre-POWER4 CPUs (4k pages only)
122 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000123static struct mmu_psize_def mmu_psize_defaults_old[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100124 [MMU_PAGE_4K] = {
125 .shift = 12,
126 .sllp = 0,
127 .penc = 0,
128 .avpnm = 0,
129 .tlbiel = 0,
130 },
131};
132
133/* POWER4, GPUL, POWER5
134 *
135 * Support for 16Mb large pages
136 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +1000137static struct mmu_psize_def mmu_psize_defaults_gp[] = {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100138 [MMU_PAGE_4K] = {
139 .shift = 12,
140 .sllp = 0,
141 .penc = 0,
142 .avpnm = 0,
143 .tlbiel = 1,
144 },
145 [MMU_PAGE_16M] = {
146 .shift = 24,
147 .sllp = SLB_VSID_L,
148 .penc = 0,
149 .avpnm = 0x1UL,
150 .tlbiel = 0,
151 },
152};
153
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000154static unsigned long htab_convert_pte_flags(unsigned long pteflags)
155{
156 unsigned long rflags = pteflags & 0x1fa;
157
158 /* _PAGE_EXEC -> NOEXEC */
159 if ((pteflags & _PAGE_EXEC) == 0)
160 rflags |= HPTE_R_N;
161
162 /* PP bits. PAGE_USER is already PP bit 0x2, so we only
163 * need to add in 0x1 if it's a read-only user page
164 */
165 if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
166 (pteflags & _PAGE_DIRTY)))
167 rflags |= 1;
168
169 /* Always add C */
170 return rflags | HPTE_R_C;
171}
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100172
173int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000174 unsigned long pstart, unsigned long prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000175 int psize, int ssize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100177 unsigned long vaddr, paddr;
178 unsigned int step, shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100179 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100181 shift = mmu_psize_defs[psize].shift;
182 step = 1 << shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000184 prot = htab_convert_pte_flags(prot);
185
186 DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
187 vstart, vend, pstart, prot, psize, ssize);
188
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100189 for (vaddr = vstart, paddr = pstart; vaddr < vend;
190 vaddr += step, paddr += step) {
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000191 unsigned long hash, hpteg;
Paul Mackerras1189be62007-10-11 20:37:10 +1000192 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
193 unsigned long va = hpt_va(vaddr, vsid, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Paul Mackerras1189be62007-10-11 20:37:10 +1000195 hash = hpt_hash(va, shift, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
197
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000198 BUG_ON(!ppc_md.hpte_insert);
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000199 ret = ppc_md.hpte_insert(hpteg, va, paddr, prot,
200 HPTE_V_BOLTED, psize, ssize);
Michael Ellermanc30a4df2006-06-23 18:16:39 +1000201
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100202 if (ret < 0)
203 break;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000204#ifdef CONFIG_DEBUG_PAGEALLOC
205 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
206 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
207#endif /* CONFIG_DEBUG_PAGEALLOC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100209 return ret < 0 ? ret : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Stephen Rothwellae86f002008-03-27 16:08:57 +1100212#ifdef CONFIG_MEMORY_HOTPLUG
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100213static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100214 int psize, int ssize)
215{
216 unsigned long vaddr;
217 unsigned int step, shift;
218
219 shift = mmu_psize_defs[psize].shift;
220 step = 1 << shift;
221
222 if (!ppc_md.hpte_removebolted) {
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100223 printk(KERN_WARNING "Platform doesn't implement "
224 "hpte_removebolted\n");
225 return -EINVAL;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100226 }
227
228 for (vaddr = vstart; vaddr < vend; vaddr += step)
229 ppc_md.hpte_removebolted(vaddr, psize, ssize);
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100230
231 return 0;
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100232}
Stephen Rothwellae86f002008-03-27 16:08:57 +1100233#endif /* CONFIG_MEMORY_HOTPLUG */
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100234
Paul Mackerras1189be62007-10-11 20:37:10 +1000235static int __init htab_dt_scan_seg_sizes(unsigned long node,
236 const char *uname, int depth,
237 void *data)
238{
239 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
240 u32 *prop;
241 unsigned long size = 0;
242
243 /* We are scanning "cpu" nodes only */
244 if (type == NULL || strcmp(type, "cpu") != 0)
245 return 0;
246
247 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
248 &size);
249 if (prop == NULL)
250 return 0;
251 for (; size >= 4; size -= 4, ++prop) {
252 if (prop[0] == 40) {
253 DBG("1T segment support detected\n");
254 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
Olof Johanssonf5534002007-10-12 16:44:55 +1000255 return 1;
Paul Mackerras1189be62007-10-11 20:37:10 +1000256 }
Paul Mackerras1189be62007-10-11 20:37:10 +1000257 }
Olof Johanssonf66bce52007-10-16 00:58:59 +1000258 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
Paul Mackerras1189be62007-10-11 20:37:10 +1000259 return 0;
260}
261
262static void __init htab_init_seg_sizes(void)
263{
264 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
265}
266
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100267static int __init htab_dt_scan_page_sizes(unsigned long node,
268 const char *uname, int depth,
269 void *data)
270{
271 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
272 u32 *prop;
273 unsigned long size = 0;
274
275 /* We are scanning "cpu" nodes only */
276 if (type == NULL || strcmp(type, "cpu") != 0)
277 return 0;
278
279 prop = (u32 *)of_get_flat_dt_prop(node,
280 "ibm,segment-page-sizes", &size);
281 if (prop != NULL) {
282 DBG("Page sizes from device-tree:\n");
283 size /= 4;
284 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
285 while(size > 0) {
286 unsigned int shift = prop[0];
287 unsigned int slbenc = prop[1];
288 unsigned int lpnum = prop[2];
289 unsigned int lpenc = 0;
290 struct mmu_psize_def *def;
291 int idx = -1;
292
293 size -= 3; prop += 3;
294 while(size > 0 && lpnum) {
295 if (prop[0] == shift)
296 lpenc = prop[1];
297 prop += 2; size -= 2;
298 lpnum--;
299 }
300 switch(shift) {
301 case 0xc:
302 idx = MMU_PAGE_4K;
303 break;
304 case 0x10:
305 idx = MMU_PAGE_64K;
306 break;
307 case 0x14:
308 idx = MMU_PAGE_1M;
309 break;
310 case 0x18:
311 idx = MMU_PAGE_16M;
312 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
313 break;
314 case 0x22:
315 idx = MMU_PAGE_16G;
316 break;
317 }
318 if (idx < 0)
319 continue;
320 def = &mmu_psize_defs[idx];
321 def->shift = shift;
322 if (shift <= 23)
323 def->avpnm = 0;
324 else
325 def->avpnm = (1 << (shift - 23)) - 1;
326 def->sllp = slbenc;
327 def->penc = lpenc;
328 /* We don't know for sure what's up with tlbiel, so
329 * for now we only set it for 4K and 64K pages
330 */
331 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
332 def->tlbiel = 1;
333 else
334 def->tlbiel = 0;
335
336 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
337 "tlbiel=%d, penc=%d\n",
338 idx, shift, def->sllp, def->avpnm, def->tlbiel,
339 def->penc);
340 }
341 return 1;
342 }
343 return 0;
344}
345
Tony Breedse16a9c02008-07-31 13:51:42 +1000346#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700347/* Scan for 16G memory blocks that have been set aside for huge pages
348 * and reserve those blocks for 16G huge pages.
349 */
350static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
351 const char *uname, int depth,
352 void *data) {
353 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
354 unsigned long *addr_prop;
355 u32 *page_count_prop;
356 unsigned int expected_pages;
357 long unsigned int phys_addr;
358 long unsigned int block_size;
359
360 /* We are scanning "memory" nodes only */
361 if (type == NULL || strcmp(type, "memory") != 0)
362 return 0;
363
364 /* This property is the log base 2 of the number of virtual pages that
365 * will represent this memory block. */
366 page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
367 if (page_count_prop == NULL)
368 return 0;
369 expected_pages = (1 << page_count_prop[0]);
370 addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
371 if (addr_prop == NULL)
372 return 0;
373 phys_addr = addr_prop[0];
374 block_size = addr_prop[1];
375 if (block_size != (16 * GB))
376 return 0;
377 printk(KERN_INFO "Huge page(16GB) memory: "
378 "addr = 0x%lX size = 0x%lX pages = %d\n",
379 phys_addr, block_size, expected_pages);
380 lmb_reserve(phys_addr, block_size * expected_pages);
381 add_gpage(phys_addr, block_size, expected_pages);
382 return 0;
383}
Tony Breedse16a9c02008-07-31 13:51:42 +1000384#endif /* CONFIG_HUGETLB_PAGE */
Jon Tollefson658013e2008-07-23 21:27:54 -0700385
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100386static void __init htab_init_page_sizes(void)
387{
388 int rc;
389
390 /* Default to 4K pages only */
391 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
392 sizeof(mmu_psize_defaults_old));
393
394 /*
395 * Try to find the available page sizes in the device-tree
396 */
397 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
398 if (rc != 0) /* Found */
399 goto found;
400
401 /*
402 * Not in the device-tree, let's fallback on known size
403 * list for 16M capable GP & GR
404 */
Stephen Rothwell04704662006-11-30 11:46:22 +1100405 if (cpu_has_feature(CPU_FTR_16M_PAGE))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100406 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
407 sizeof(mmu_psize_defaults_gp));
408 found:
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000409#ifndef CONFIG_DEBUG_PAGEALLOC
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100410 /*
411 * Pick a size for the linear mapping. Currently, we only support
412 * 16M, 1M and 4K which is the default
413 */
414 if (mmu_psize_defs[MMU_PAGE_16M].shift)
415 mmu_linear_psize = MMU_PAGE_16M;
416 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
417 mmu_linear_psize = MMU_PAGE_1M;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000418#endif /* CONFIG_DEBUG_PAGEALLOC */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100419
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000420#ifdef CONFIG_PPC_64K_PAGES
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100421 /*
422 * Pick a size for the ordinary pages. Default is 4K, we support
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000423 * 64K for user mappings and vmalloc if supported by the processor.
424 * We only use 64k for ioremap if the processor
425 * (and firmware) support cache-inhibited large pages.
426 * If not, we use 4k and set mmu_ci_restrictions so that
427 * hash_page knows to switch processes that use cache-inhibited
428 * mappings to 4k pages.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100429 */
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000430 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100431 mmu_virtual_psize = MMU_PAGE_64K;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000432 mmu_vmalloc_psize = MMU_PAGE_64K;
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000433 if (mmu_linear_psize == MMU_PAGE_4K)
434 mmu_linear_psize = MMU_PAGE_64K;
Paul Mackerrascfe666b2008-03-24 17:41:22 +1100435 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
436 /*
437 * Don't use 64k pages for ioremap on pSeries, since
438 * that would stop us accessing the HEA ethernet.
439 */
440 if (!machine_is(pseries))
441 mmu_io_psize = MMU_PAGE_64K;
442 } else
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000443 mmu_ci_restrictions = 1;
444 }
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000445#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100446
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000447#ifdef CONFIG_SPARSEMEM_VMEMMAP
448 /* We try to use 16M pages for vmemmap if that is supported
449 * and we have at least 1G of RAM at boot
450 */
451 if (mmu_psize_defs[MMU_PAGE_16M].shift &&
452 lmb_phys_mem_size() >= 0x40000000)
453 mmu_vmemmap_psize = MMU_PAGE_16M;
454 else if (mmu_psize_defs[MMU_PAGE_64K].shift)
455 mmu_vmemmap_psize = MMU_PAGE_64K;
456 else
457 mmu_vmemmap_psize = MMU_PAGE_4K;
458#endif /* CONFIG_SPARSEMEM_VMEMMAP */
459
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000460 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000461 "virtual = %d, io = %d"
462#ifdef CONFIG_SPARSEMEM_VMEMMAP
463 ", vmemmap = %d"
464#endif
465 "\n",
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100466 mmu_psize_defs[mmu_linear_psize].shift,
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000467 mmu_psize_defs[mmu_virtual_psize].shift,
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000468 mmu_psize_defs[mmu_io_psize].shift
469#ifdef CONFIG_SPARSEMEM_VMEMMAP
470 ,mmu_psize_defs[mmu_vmemmap_psize].shift
471#endif
472 );
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100473
474#ifdef CONFIG_HUGETLB_PAGE
Jon Tollefson658013e2008-07-23 21:27:54 -0700475 /* Reserve 16G huge page memory sections for huge pages */
476 of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
477
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700478/* Set default large page size. Currently, we pick 16M or 1M depending
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100479 * on what is available
480 */
481 if (mmu_psize_defs[MMU_PAGE_16M].shift)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700482 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
David Gibson7d24f0b2005-11-07 00:57:52 -0800483 /* With 4k/4level pagetables, we can't (for now) cope with a
484 * huge page size < PMD_SIZE */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100485 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700486 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100487#endif /* CONFIG_HUGETLB_PAGE */
488}
489
490static int __init htab_dt_scan_pftsize(unsigned long node,
491 const char *uname, int depth,
492 void *data)
493{
494 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
495 u32 *prop;
496
497 /* We are scanning "cpu" nodes only */
498 if (type == NULL || strcmp(type, "cpu") != 0)
499 return 0;
500
501 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
502 if (prop != NULL) {
503 /* pft_size[0] is the NUMA CEC cookie */
504 ppc64_pft_size = prop[1];
505 return 1;
506 }
507 return 0;
508}
509
510static unsigned long __init htab_get_table_size(void)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000511{
Paul Mackerras799d6042005-11-10 13:37:51 +1100512 unsigned long mem_size, rnd_mem_size, pteg_count;
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000513
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100514 /* If hash size isn't already provided by the platform, we try to
Adrian Bunk943ffb52006-01-10 00:10:13 +0100515 * retrieve it from the device-tree. If it's not there neither, we
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100516 * calculate it now based on the total RAM size
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000517 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100518 if (ppc64_pft_size == 0)
519 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000520 if (ppc64_pft_size)
521 return 1UL << ppc64_pft_size;
522
523 /* round mem_size up to next power of 2 */
Paul Mackerras799d6042005-11-10 13:37:51 +1100524 mem_size = lmb_phys_mem_size();
525 rnd_mem_size = 1UL << __ilog2(mem_size);
526 if (rnd_mem_size < mem_size)
Paul Mackerras3eac8c62005-10-12 16:58:53 +1000527 rnd_mem_size <<= 1;
528
529 /* # pages / 2 */
530 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
531
532 return pteg_count << 7;
533}
534
Mike Kravetz54b79242005-11-07 16:25:48 -0800535#ifdef CONFIG_MEMORY_HOTPLUG
536void create_section_mapping(unsigned long start, unsigned long end)
537{
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000538 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
539 PAGE_KERNEL, mmu_linear_psize,
540 mmu_kernel_ssize));
Mike Kravetz54b79242005-11-07 16:25:48 -0800541}
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100542
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100543int remove_section_mapping(unsigned long start, unsigned long end)
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100544{
Badari Pulavarty52db9b42008-03-28 11:37:21 +1100545 return htab_remove_mapping(start, end, mmu_linear_psize,
546 mmu_kernel_ssize);
Badari Pulavartyf8c88032008-01-29 09:19:24 +1100547}
Mike Kravetz54b79242005-11-07 16:25:48 -0800548#endif /* CONFIG_MEMORY_HOTPLUG */
549
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000550static inline void make_bl(unsigned int *insn_addr, void *func)
551{
552 unsigned long funcp = *((unsigned long *)func);
553 int offset = funcp - (unsigned long)insn_addr;
554
555 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
556 flush_icache_range((unsigned long)insn_addr, 4+
557 (unsigned long)insn_addr);
558}
559
560static void __init htab_finish_init(void)
561{
562 extern unsigned int *htab_call_hpte_insert1;
563 extern unsigned int *htab_call_hpte_insert2;
564 extern unsigned int *htab_call_hpte_remove;
565 extern unsigned int *htab_call_hpte_updatepp;
566
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000567#ifdef CONFIG_PPC_HAS_HASH_64K
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000568 extern unsigned int *ht64_call_hpte_insert1;
569 extern unsigned int *ht64_call_hpte_insert2;
570 extern unsigned int *ht64_call_hpte_remove;
571 extern unsigned int *ht64_call_hpte_updatepp;
572
573 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
574 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
575 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
576 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
Jon Tollefson5b825832007-05-17 04:43:02 +1000577#endif /* CONFIG_PPC_HAS_HASH_64K */
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000578
579 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
580 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
581 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
582 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
583}
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585void __init htab_initialize(void)
586{
Michael Ellerman337a7122006-02-21 17:22:55 +1100587 unsigned long table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 unsigned long pteg_count;
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000589 unsigned long prot, tprot;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100590 unsigned long base = 0, size = 0, limit;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100591 int i;
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 DBG(" -> htab_initialize()\n");
594
Paul Mackerras1189be62007-10-11 20:37:10 +1000595 /* Initialize segment sizes */
596 htab_init_seg_sizes();
597
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100598 /* Initialize page sizes */
599 htab_init_page_sizes();
600
Paul Mackerras1189be62007-10-11 20:37:10 +1000601 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
602 mmu_kernel_ssize = MMU_SEGSIZE_1T;
603 mmu_highuser_ssize = MMU_SEGSIZE_1T;
604 printk(KERN_INFO "Using 1TB segments\n");
605 }
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 /*
608 * Calculate the required size of the htab. We want the number of
609 * PTEGs to equal one half the number of real pages.
610 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100611 htab_size_bytes = htab_get_table_size();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 pteg_count = htab_size_bytes >> 7;
613
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 htab_hash_mask = pteg_count - 1;
615
Michael Ellerman57cfb812006-03-21 20:45:59 +1100616 if (firmware_has_feature(FW_FEATURE_LPAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 /* Using a hypervisor which owns the htab */
618 htab_address = NULL;
619 _SDR1 = 0;
620 } else {
621 /* Find storage for the HPT. Must be contiguous in
Michael Ellerman41d824b2008-01-30 01:13:59 +1100622 * the absolute address space. On cell we want it to be
Michael Ellerman31bf1112008-03-12 18:03:24 +1100623 * in the first 2 Gig so we can use it for IOMMU hacks.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 */
Michael Ellerman41d824b2008-01-30 01:13:59 +1100625 if (machine_is(cell))
Michael Ellerman31bf1112008-03-12 18:03:24 +1100626 limit = 0x80000000;
Michael Ellerman41d824b2008-01-30 01:13:59 +1100627 else
628 limit = 0;
629
630 table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
632 DBG("Hash table allocated at %lx, size: %lx\n", table,
633 htab_size_bytes);
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 htab_address = abs_to_virt(table);
636
637 /* htab absolute addr + encoded htabsize */
638 _SDR1 = table + __ilog2(pteg_count) - 11;
639
640 /* Initialize the HPT with no entries */
641 memset((void *)table, 0, htab_size_bytes);
Paul Mackerras799d6042005-11-10 13:37:51 +1100642
643 /* Set SDR1 */
644 mtspr(SPRN_SDR1, _SDR1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 }
646
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000647 prot = PAGE_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +1000649#ifdef CONFIG_DEBUG_PAGEALLOC
650 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
651 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
652 1, lmb.rmo_size));
653 memset(linear_map_hash_slots, 0, linear_map_hash_count);
654#endif /* CONFIG_DEBUG_PAGEALLOC */
655
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 /* On U3 based machines, we need to reserve the DART area and
657 * _NOT_ map it to avoid cache paradoxes as it's remapped non
658 * cacheable later on
659 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
661 /* create bolted the linear mapping in the hash table */
662 for (i=0; i < lmb.memory.cnt; i++) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600663 base = (unsigned long)__va(lmb.memory.region[i].base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 size = lmb.memory.region[i].size;
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000665 tprot = prot | (in_kernel_text(base) ? _PAGE_EXEC : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000667 DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
668 base, size, tprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670#ifdef CONFIG_U3_DART
671 /* Do not map the DART space. Fortunately, it will be aligned
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100672 * in such a way that it will not cross two lmb regions and
673 * will fit within a single 16Mb page.
674 * The DART space is assumed to be a full 16Mb region even if
675 * we only use 2Mb of that space. We will use more of it later
676 * for AGP GART. We have to use a full 16Mb large page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 */
678 DBG("DART base: %lx\n", dart_tablebase);
679
680 if (dart_tablebase != 0 && dart_tablebase >= base
681 && dart_tablebase < (base + size)) {
Michael Ellermancaf80e52006-03-21 20:45:51 +1100682 unsigned long dart_table_end = dart_tablebase + 16 * MB;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (base != dart_tablebase)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100684 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000685 __pa(base), tprot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000686 mmu_linear_psize,
687 mmu_kernel_ssize));
Michael Ellermancaf80e52006-03-21 20:45:51 +1100688 if ((base + size) > dart_table_end)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100689 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
Michael Ellermancaf80e52006-03-21 20:45:51 +1100690 base + size,
691 __pa(dart_table_end),
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000692 tprot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000693 mmu_linear_psize,
694 mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 continue;
696 }
697#endif /* CONFIG_U3_DART */
Michael Ellermancaf80e52006-03-21 20:45:51 +1100698 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000699 tprot, mmu_linear_psize, mmu_kernel_ssize));
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100700 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 /*
703 * If we have a memory_limit and we've allocated TCEs then we need to
704 * explicitly map the TCE area at the top of RAM. We also cope with the
705 * case that the TCEs start below memory_limit.
706 * tce_alloc_start/end are 16MB aligned so the mapping should work
707 * for either 4K or 16MB pages.
708 */
709 if (tce_alloc_start) {
Michael Ellermanb5666f72005-12-05 10:24:33 -0600710 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
711 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713 if (base + size >= tce_alloc_start)
714 tce_alloc_start = base + size + 1;
715
Michael Ellermancaf80e52006-03-21 20:45:51 +1100716 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +1000717 __pa(tce_alloc_start), prot,
Paul Mackerras1189be62007-10-11 20:37:10 +1000718 mmu_linear_psize, mmu_kernel_ssize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720
Michael Ellerman7d0daae2006-06-23 18:16:38 +1000721 htab_finish_init();
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 DBG(" <- htab_initialize()\n");
724}
725#undef KB
726#undef MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
Anton Blancharde597cb322005-12-29 10:46:29 +1100728void htab_initialize_secondary(void)
Paul Mackerras799d6042005-11-10 13:37:51 +1100729{
Michael Ellerman57cfb812006-03-21 20:45:59 +1100730 if (!firmware_has_feature(FW_FEATURE_LPAR))
Paul Mackerras799d6042005-11-10 13:37:51 +1100731 mtspr(SPRN_SDR1, _SDR1);
732}
733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734/*
735 * Called by asm hashtable.S for doing lazy icache flush
736 */
737unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
738{
739 struct page *page;
740
Benjamin Herrenschmidt76c8e252005-11-08 11:21:05 +1100741 if (!pfn_valid(pte_pfn(pte)))
742 return pp;
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 page = pte_page(pte);
745
746 /* page is dirty */
747 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
748 if (trap == 0x400) {
749 __flush_dcache_icache(page_address(page));
750 set_bit(PG_arch_1, &page->flags);
751 } else
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100752 pp |= HPTE_R_N;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 }
754 return pp;
755}
756
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000757#ifdef CONFIG_PPC_MM_SLICES
758unsigned int get_paca_psize(unsigned long addr)
759{
760 unsigned long index, slices;
761
762 if (addr < SLICE_LOW_TOP) {
763 slices = get_paca()->context.low_slices_psize;
764 index = GET_LOW_SLICE_INDEX(addr);
765 } else {
766 slices = get_paca()->context.high_slices_psize;
767 index = GET_HIGH_SLICE_INDEX(addr);
768 }
769 return (slices >> (index * 4)) & 0xF;
770}
771
772#else
773unsigned int get_paca_psize(unsigned long addr)
774{
775 return get_paca()->context.user_psize;
776}
777#endif
778
Paul Mackerras721151d2007-04-03 21:24:02 +1000779/*
780 * Demote a segment to using 4k pages.
781 * For now this makes the whole process use 4k pages.
782 */
Paul Mackerras721151d2007-04-03 21:24:02 +1000783#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerrasfa282372008-01-24 08:35:13 +1100784void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000785{
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000786 if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
Paul Mackerras721151d2007-04-03 21:24:02 +1000787 return;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000788 slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000789#ifdef CONFIG_SPU_BASE
Paul Mackerras721151d2007-04-03 21:24:02 +1000790 spu_flush_all_slbs(mm);
791#endif
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000792 if (get_paca_psize(addr) != MMU_PAGE_4K) {
Paul Mackerrasfa282372008-01-24 08:35:13 +1100793 get_paca()->context = mm->context;
794 slb_flush_and_rebolt();
795 }
Paul Mackerras721151d2007-04-03 21:24:02 +1000796}
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000797#endif /* CONFIG_PPC_64K_PAGES */
Paul Mackerras721151d2007-04-03 21:24:02 +1000798
Paul Mackerrasfa282372008-01-24 08:35:13 +1100799#ifdef CONFIG_PPC_SUBPAGE_PROT
800/*
801 * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
802 * Userspace sets the subpage permissions using the subpage_prot system call.
803 *
804 * Result is 0: full permissions, _PAGE_RW: read-only,
805 * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
806 */
807static int subpage_protection(pgd_t *pgdir, unsigned long ea)
808{
809 struct subpage_prot_table *spt = pgd_subpage_prot(pgdir);
810 u32 spp = 0;
811 u32 **sbpm, *sbpp;
812
813 if (ea >= spt->maxaddr)
814 return 0;
815 if (ea < 0x100000000) {
816 /* addresses below 4GB use spt->low_prot */
817 sbpm = spt->low_prot;
818 } else {
819 sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
820 if (!sbpm)
821 return 0;
822 }
823 sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
824 if (!sbpp)
825 return 0;
826 spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
827
828 /* extract 2-bit bitfield for this 4k subpage */
829 spp >>= 30 - 2 * ((ea >> 12) & 0xf);
830
831 /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
832 spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
833 return spp;
834}
835
836#else /* CONFIG_PPC_SUBPAGE_PROT */
837static inline int subpage_protection(pgd_t *pgdir, unsigned long ea)
838{
839 return 0;
840}
841#endif
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843/* Result code is:
844 * 0 - handled
845 * 1 - normal page fault
846 * -1 - critical hash insertion error
Paul Mackerrasfa282372008-01-24 08:35:13 +1100847 * -2 - access not permitted by subpage protection mechanism
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 */
849int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
850{
851 void *pgdir;
852 unsigned long vsid;
853 struct mm_struct *mm;
854 pte_t *ptep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 cpumask_t tmp;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100856 int rc, user_region = 0, local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +1000857 int psize, ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100859 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
860 ea, access, trap);
David Gibson1f8d4192005-05-05 16:15:13 -0700861
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100862 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
863 DBG_LOW(" out of pgtable range !\n");
864 return 1;
865 }
866
867 /* Get region & vsid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 switch (REGION_ID(ea)) {
869 case USER_REGION_ID:
870 user_region = 1;
871 mm = current->mm;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100872 if (! mm) {
873 DBG_LOW(" user region with no mm !\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 return 1;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100875 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000876 psize = get_slice_psize(mm, ea);
Paul Mackerras1189be62007-10-11 20:37:10 +1000877 ssize = user_segment_size(ea);
878 vsid = get_vsid(mm->context.id, ea, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 case VMALLOC_REGION_ID:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 mm = &init_mm;
Paul Mackerras1189be62007-10-11 20:37:10 +1000882 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000883 if (ea < VMALLOC_END)
884 psize = mmu_vmalloc_psize;
885 else
886 psize = mmu_io_psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000887 ssize = mmu_kernel_ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 default:
890 /* Not a valid range
891 * Send the problem up to do_page_fault
892 */
893 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100895 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100897 /* Get pgdir */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 pgdir = mm->pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 if (pgdir == NULL)
900 return 1;
901
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100902 /* Check CPU locality */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 tmp = cpumask_of_cpu(smp_processor_id());
904 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
905 local = 1;
906
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000907#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100908 /* Handle hugepage regions */
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700909 if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100910 DBG_LOW(" -> huge page !\n");
David Gibsoncbf52af2005-12-09 14:20:52 +1100911 return hash_huge_page(mm, access, ea, vsid, local, trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 }
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000913#endif /* CONFIG_HUGETLB_PAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000915#ifndef CONFIG_PPC_64K_PAGES
916 /* If we use 4K pages and our psize is not 4K, then we are hitting
917 * a special driver mapping, we need to align the address before
918 * we fetch the PTE
919 */
920 if (psize != MMU_PAGE_4K)
921 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
922#endif /* CONFIG_PPC_64K_PAGES */
923
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100924 /* Get PTE and page size from page tables */
925 ptep = find_linux_pte(pgdir, ea);
926 if (ptep == NULL || !pte_present(*ptep)) {
927 DBG_LOW(" no PTE !\n");
928 return 1;
929 }
930
931#ifndef CONFIG_PPC_64K_PAGES
932 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
933#else
934 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
935 pte_val(*(ptep + PTRS_PER_PTE)));
936#endif
937 /* Pre-check access permissions (will be re-checked atomically
938 * in __hash_page_XX but this pre-check is a fast path
939 */
940 if (access & ~pte_val(*ptep)) {
941 DBG_LOW(" no access !\n");
942 return 1;
943 }
944
945 /* Do actual hashing */
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000946#ifdef CONFIG_PPC_64K_PAGES
Paul Mackerras721151d2007-04-03 21:24:02 +1000947 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000948 if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
Paul Mackerras721151d2007-04-03 21:24:02 +1000949 demote_segment_4k(mm, ea);
950 psize = MMU_PAGE_4K;
951 }
952
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000953 /* If this PTE is non-cacheable and we have restrictions on
954 * using non cacheable large pages, then we switch to 4k
955 */
956 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
957 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
958 if (user_region) {
959 demote_segment_4k(mm, ea);
960 psize = MMU_PAGE_4K;
961 } else if (ea < VMALLOC_END) {
962 /*
963 * some driver did a non-cacheable mapping
964 * in vmalloc space, so switch vmalloc
965 * to 4k pages
966 */
967 printk(KERN_ALERT "Reducing vmalloc segment "
968 "to 4kB pages because of "
969 "non-cacheable mapping\n");
970 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
Geert Uytterhoeven1e57ba82007-07-17 02:35:38 +1000971#ifdef CONFIG_SPU_BASE
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100972 spu_flush_all_slbs(mm);
973#endif
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000974 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000975 }
976 if (user_region) {
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000977 if (psize != get_paca_psize(ea)) {
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +1100978 get_paca()->context = mm->context;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000979 slb_flush_and_rebolt();
980 }
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000981 } else if (get_paca()->vmalloc_sllp !=
982 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
983 get_paca()->vmalloc_sllp =
984 mmu_psize_defs[mmu_vmalloc_psize].sllp;
Michael Neuling67439b72007-08-03 11:55:39 +1000985 slb_vmalloc_update();
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000986 }
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000987#endif /* CONFIG_PPC_64K_PAGES */
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +1000988
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000989#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +1000990 if (psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +1000991 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100992 else
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +1000993#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +1100994 {
995 int spp = subpage_protection(pgdir, ea);
996 if (access & spp)
997 rc = -2;
998 else
999 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
1000 local, ssize, spp);
1001 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001002
1003#ifndef CONFIG_PPC_64K_PAGES
1004 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
1005#else
1006 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
1007 pte_val(*(ptep + PTRS_PER_PTE)));
1008#endif
1009 DBG_LOW(" -> rc=%d\n", rc);
1010 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011}
Arnd Bergmann67207b92005-11-15 15:53:48 -05001012EXPORT_SYMBOL_GPL(hash_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001014void hash_preload(struct mm_struct *mm, unsigned long ea,
1015 unsigned long access, unsigned long trap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001017 unsigned long vsid;
1018 void *pgdir;
1019 pte_t *ptep;
1020 cpumask_t mask;
1021 unsigned long flags;
1022 int local = 0;
Paul Mackerras1189be62007-10-11 20:37:10 +10001023 int ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001025 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1026
1027#ifdef CONFIG_PPC_MM_SLICES
1028 /* We only prefault standard pages for now */
Ilpo Järvinen2b02d132007-08-16 08:03:35 +10001029 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001030 return;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001031#endif
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001032
1033 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
1034 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
1035
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001036 /* Get Linux PTE if available */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001037 pgdir = mm->pgd;
1038 if (pgdir == NULL)
1039 return;
1040 ptep = find_linux_pte(pgdir, ea);
1041 if (!ptep)
1042 return;
Benjamin Herrenschmidt16f1c742007-05-08 16:27:27 +10001043
1044#ifdef CONFIG_PPC_64K_PAGES
1045 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
1046 * a 64K kernel), then we don't preload, hash_page() will take
1047 * care of it once we actually try to access the page.
1048 * That way we don't have to duplicate all of the logic for segment
1049 * page size demotion here
1050 */
1051 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
1052 return;
1053#endif /* CONFIG_PPC_64K_PAGES */
1054
1055 /* Get VSID */
Paul Mackerras1189be62007-10-11 20:37:10 +10001056 ssize = user_segment_size(ea);
1057 vsid = get_vsid(mm->context.id, ea, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001058
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001059 /* Hash doesn't like irqs */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001060 local_irq_save(flags);
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001061
1062 /* Is that local to this CPU ? */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001063 mask = cpumask_of_cpu(smp_processor_id());
1064 if (cpus_equal(mm->cpu_vm_mask, mask))
1065 local = 1;
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001066
1067 /* Hash it in */
1068#ifdef CONFIG_PPC_HAS_HASH_64K
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +10001069 if (mm->context.user_psize == MMU_PAGE_64K)
Paul Mackerras1189be62007-10-11 20:37:10 +10001070 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 else
Jon Tollefson5b825832007-05-17 04:43:02 +10001072#endif /* CONFIG_PPC_HAS_HASH_64K */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001073 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
1074 subpage_protection(pgdir, ea));
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +10001075
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001076 local_irq_restore(flags);
1077}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Benjamin Herrenschmidtf6ab0b92007-10-29 12:05:18 +11001079/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1080 * do not forget to update the assembly call site !
1081 */
Paul Mackerras1189be62007-10-11 20:37:10 +10001082void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
1083 int local)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001084{
1085 unsigned long hash, index, shift, hidx, slot;
1086
1087 DBG_LOW("flush_hash_page(va=%016x)\n", va);
1088 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
Paul Mackerras1189be62007-10-11 20:37:10 +10001089 hash = hpt_hash(va, shift, ssize);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001090 hidx = __rpte_to_hidx(pte, index);
1091 if (hidx & _PTEIDX_SECONDARY)
1092 hash = ~hash;
1093 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1094 slot += hidx & _PTEIDX_GROUP_IX;
1095 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
Paul Mackerras1189be62007-10-11 20:37:10 +10001096 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001097 } pte_iterate_hashed_end();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098}
1099
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001100void flush_hash_range(unsigned long number, int local)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101{
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001102 if (ppc_md.flush_hash_range)
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001103 ppc_md.flush_hash_range(number, local);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001104 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 int i;
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +10001106 struct ppc64_tlb_batch *batch =
1107 &__get_cpu_var(ppc64_tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 for (i = 0; i < number; i++)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +11001110 flush_hash_page(batch->vaddr[i], batch->pte[i],
Paul Mackerras1189be62007-10-11 20:37:10 +10001111 batch->psize, batch->ssize, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 }
1113}
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115/*
1116 * low_hash_fault is called when we the low level hash code failed
1117 * to instert a PTE due to an hypervisor error
1118 */
Paul Mackerrasfa282372008-01-24 08:35:13 +11001119void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120{
1121 if (user_mode(regs)) {
Paul Mackerrasfa282372008-01-24 08:35:13 +11001122#ifdef CONFIG_PPC_SUBPAGE_PROT
1123 if (rc == -2)
1124 _exception(SIGSEGV, regs, SEGV_ACCERR, address);
1125 else
1126#endif
1127 _exception(SIGBUS, regs, BUS_ADRERR, address);
1128 } else
1129 bad_page_fault(regs, address, SIGBUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130}
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001131
1132#ifdef CONFIG_DEBUG_PAGEALLOC
1133static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1134{
Paul Mackerras1189be62007-10-11 20:37:10 +10001135 unsigned long hash, hpteg;
1136 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1137 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidtbc033b62008-08-05 16:19:56 +10001138 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001139 int ret;
1140
Paul Mackerras1189be62007-10-11 20:37:10 +10001141 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001142 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1143
1144 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
Paul Mackerras1189be62007-10-11 20:37:10 +10001145 mode, HPTE_V_BOLTED,
1146 mmu_linear_psize, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001147 BUG_ON (ret < 0);
1148 spin_lock(&linear_map_hash_lock);
1149 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
1150 linear_map_hash_slots[lmi] = ret | 0x80;
1151 spin_unlock(&linear_map_hash_lock);
1152}
1153
1154static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1155{
Paul Mackerras1189be62007-10-11 20:37:10 +10001156 unsigned long hash, hidx, slot;
1157 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1158 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001159
Paul Mackerras1189be62007-10-11 20:37:10 +10001160 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001161 spin_lock(&linear_map_hash_lock);
1162 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1163 hidx = linear_map_hash_slots[lmi] & 0x7f;
1164 linear_map_hash_slots[lmi] = 0;
1165 spin_unlock(&linear_map_hash_lock);
1166 if (hidx & _PTEIDX_SECONDARY)
1167 hash = ~hash;
1168 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1169 slot += hidx & _PTEIDX_GROUP_IX;
Paul Mackerras1189be62007-10-11 20:37:10 +10001170 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
Benjamin Herrenschmidt370a9082007-04-12 15:30:23 +10001171}
1172
1173void kernel_map_pages(struct page *page, int numpages, int enable)
1174{
1175 unsigned long flags, vaddr, lmi;
1176 int i;
1177
1178 local_irq_save(flags);
1179 for (i = 0; i < numpages; i++, page++) {
1180 vaddr = (unsigned long)page_address(page);
1181 lmi = __pa(vaddr) >> PAGE_SHIFT;
1182 if (lmi >= linear_map_hash_count)
1183 continue;
1184 if (enable)
1185 kernel_map_linear_page(vaddr, lmi);
1186 else
1187 kernel_unmap_linear_page(vaddr, lmi);
1188 }
1189 local_irq_restore(flags);
1190}
1191#endif /* CONFIG_DEBUG_PAGEALLOC */