powerpc/numa: Use form 1 affinity to setup node distance
[linux-2.6.git] / arch / powerpc / mm / hash_utils_64.c
index b6970c9..3ecdcec 100644 (file)
@@ -19,8 +19,8 @@
  */
 
 #undef DEBUG
+#undef DEBUG_LOW
 
-#include <linux/config.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -31,6 +31,7 @@
 #include <linux/cache.h>
 #include <linux/init.h>
 #include <linux/signal.h>
+#include <linux/lmb.h>
 
 #include <asm/processor.h>
 #include <asm/pgtable.h>
@@ -41,7 +42,7 @@
 #include <asm/system.h>
 #include <asm/uaccess.h>
 #include <asm/machdep.h>
-#include <asm/lmb.h>
+#include <asm/prom.h>
 #include <asm/abs_addr.h>
 #include <asm/tlbflush.h>
 #include <asm/io.h>
@@ -49,8 +50,9 @@
 #include <asm/tlb.h>
 #include <asm/cacheflush.h>
 #include <asm/cputable.h>
-#include <asm/abs_addr.h>
 #include <asm/sections.h>
+#include <asm/spu.h>
+#include <asm/udbg.h>
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
 #define DBG(fmt...)
 #endif
 
+#ifdef DEBUG_LOW
+#define DBG_LOW(fmt...) udbg_printf(fmt)
+#else
+#define DBG_LOW(fmt...)
+#endif
+
+#define KB (1024)
+#define MB (1024*KB)
+#define GB (1024L*MB)
+
 /*
  * Note:  pte   --> Linux PTE
  *        HPTE  --> PowerPC Hashed Page Table Entry
 extern unsigned long dart_tablebase;
 #endif /* CONFIG_U3_DART */
 
-hpte_t *htab_address;
+static unsigned long _SDR1;
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+struct hash_pte *htab_address;
+unsigned long htab_size_bytes;
 unsigned long htab_hash_mask;
+EXPORT_SYMBOL_GPL(htab_hash_mask);
+int mmu_linear_psize = MMU_PAGE_4K;
+int mmu_virtual_psize = MMU_PAGE_4K;
+int mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
+int mmu_io_psize = MMU_PAGE_4K;
+int mmu_kernel_ssize = MMU_SEGSIZE_256M;
+int mmu_highuser_ssize = MMU_SEGSIZE_256M;
+u16 mmu_slb_size = 64;
+EXPORT_SYMBOL_GPL(mmu_slb_size);
+#ifdef CONFIG_HUGETLB_PAGE
+unsigned int HPAGE_SHIFT;
+#endif
+#ifdef CONFIG_PPC_64K_PAGES
+int mmu_ci_restrictions;
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static u8 *linear_map_hash_slots;
+static unsigned long linear_map_hash_count;
+static DEFINE_SPINLOCK(linear_map_hash_lock);
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
+/* There are definitions of page sizes arrays to be used when none
+ * is provided by the firmware.
+ */
 
-unsigned long _SDR1;
+/* Pre-POWER4 CPUs (4k pages only)
+ */
+static struct mmu_psize_def mmu_psize_defaults_old[] = {
+       [MMU_PAGE_4K] = {
+               .shift  = 12,
+               .sllp   = 0,
+               .penc   = 0,
+               .avpnm  = 0,
+               .tlbiel = 0,
+       },
+};
+
+/* POWER4, GPUL, POWER5
+ *
+ * Support for 16Mb large pages
+ */
+static struct mmu_psize_def mmu_psize_defaults_gp[] = {
+       [MMU_PAGE_4K] = {
+               .shift  = 12,
+               .sllp   = 0,
+               .penc   = 0,
+               .avpnm  = 0,
+               .tlbiel = 1,
+       },
+       [MMU_PAGE_16M] = {
+               .shift  = 24,
+               .sllp   = SLB_VSID_L,
+               .penc   = 0,
+               .avpnm  = 0x1UL,
+               .tlbiel = 0,
+       },
+};
+
+static unsigned long htab_convert_pte_flags(unsigned long pteflags)
+{
+       unsigned long rflags = pteflags & 0x1fa;
 
-#define KB (1024)
-#define MB (1024*KB)
+       /* _PAGE_EXEC -> NOEXEC */
+       if ((pteflags & _PAGE_EXEC) == 0)
+               rflags |= HPTE_R_N;
 
-static inline void loop_forever(void)
+       /* PP bits. PAGE_USER is already PP bit 0x2, so we only
+        * need to add in 0x1 if it's a read-only user page
+        */
+       if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
+                                        (pteflags & _PAGE_DIRTY)))
+               rflags |= 1;
+
+       /* Always add C */
+       return rflags | HPTE_R_C;
+}
+
+int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+                     unsigned long pstart, unsigned long prot,
+                     int psize, int ssize)
 {
-       volatile unsigned long x = 1;
-       for(;x;x|=1)
-               ;
+       unsigned long vaddr, paddr;
+       unsigned int step, shift;
+       int ret = 0;
+
+       shift = mmu_psize_defs[psize].shift;
+       step = 1 << shift;
+
+       prot = htab_convert_pte_flags(prot);
+
+       DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
+           vstart, vend, pstart, prot, psize, ssize);
+
+       for (vaddr = vstart, paddr = pstart; vaddr < vend;
+            vaddr += step, paddr += step) {
+               unsigned long hash, hpteg;
+               unsigned long vsid = get_kernel_vsid(vaddr, ssize);
+               unsigned long va = hpt_va(vaddr, vsid, ssize);
+               unsigned long tprot = prot;
+
+               /* Make kernel text executable */
+               if (overlaps_kernel_text(vaddr, vaddr + step))
+                       tprot &= ~HPTE_R_N;
+
+               hash = hpt_hash(va, shift, ssize);
+               hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+
+               BUG_ON(!ppc_md.hpte_insert);
+               ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
+                                        HPTE_V_BOLTED, psize, ssize);
+
+               if (ret < 0)
+                       break;
+#ifdef CONFIG_DEBUG_PAGEALLOC
+               if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
+                       linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+       }
+       return ret < 0 ? ret : 0;
 }
 
-static inline void create_pte_mapping(unsigned long start, unsigned long end,
-                                     unsigned long mode, int large)
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int htab_remove_mapping(unsigned long vstart, unsigned long vend,
+                     int psize, int ssize)
 {
-       unsigned long addr;
-       unsigned int step;
-       unsigned long tmp_mode;
-       unsigned long vflags;
-
-       if (large) {
-               step = 16*MB;
-               vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
-       } else {
-               step = 4*KB;
-               vflags = HPTE_V_BOLTED;
+       unsigned long vaddr;
+       unsigned int step, shift;
+
+       shift = mmu_psize_defs[psize].shift;
+       step = 1 << shift;
+
+       if (!ppc_md.hpte_removebolted) {
+               printk(KERN_WARNING "Platform doesn't implement "
+                               "hpte_removebolted\n");
+               return -EINVAL;
        }
 
-       for (addr = start; addr < end; addr += step) {
-               unsigned long vpn, hash, hpteg;
-               unsigned long vsid = get_kernel_vsid(addr);
-               unsigned long va = (vsid << 28) | (addr & 0xfffffff);
-               int ret = -1;
+       for (vaddr = vstart; vaddr < vend; vaddr += step)
+               ppc_md.hpte_removebolted(vaddr, psize, ssize);
 
-               if (large)
-                       vpn = va >> HPAGE_SHIFT;
-               else
-                       vpn = va >> PAGE_SHIFT;
+       return 0;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
 
+static int __init htab_dt_scan_seg_sizes(unsigned long node,
+                                        const char *uname, int depth,
+                                        void *data)
+{
+       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+       u32 *prop;
+       unsigned long size = 0;
+
+       /* We are scanning "cpu" nodes only */
+       if (type == NULL || strcmp(type, "cpu") != 0)
+               return 0;
+
+       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
+                                         &size);
+       if (prop == NULL)
+               return 0;
+       for (; size >= 4; size -= 4, ++prop) {
+               if (prop[0] == 40) {
+                       DBG("1T segment support detected\n");
+                       cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
+                       return 1;
+               }
+       }
+       cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
+       return 0;
+}
 
-               tmp_mode = mode;
-               
-               /* Make non-kernel text non-executable */
-               if (!in_kernel_text(addr))
-                       tmp_mode = mode | HW_NO_EXEC;
+static void __init htab_init_seg_sizes(void)
+{
+       of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
+}
 
-               hash = hpt_hash(vpn, large);
+static int __init htab_dt_scan_page_sizes(unsigned long node,
+                                         const char *uname, int depth,
+                                         void *data)
+{
+       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+       u32 *prop;
+       unsigned long size = 0;
+
+       /* We are scanning "cpu" nodes only */
+       if (type == NULL || strcmp(type, "cpu") != 0)
+               return 0;
+
+       prop = (u32 *)of_get_flat_dt_prop(node,
+                                         "ibm,segment-page-sizes", &size);
+       if (prop != NULL) {
+               DBG("Page sizes from device-tree:\n");
+               size /= 4;
+               cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
+               while(size > 0) {
+                       unsigned int shift = prop[0];
+                       unsigned int slbenc = prop[1];
+                       unsigned int lpnum = prop[2];
+                       unsigned int lpenc = 0;
+                       struct mmu_psize_def *def;
+                       int idx = -1;
+
+                       size -= 3; prop += 3;
+                       while(size > 0 && lpnum) {
+                               if (prop[0] == shift)
+                                       lpenc = prop[1];
+                               prop += 2; size -= 2;
+                               lpnum--;
+                       }
+                       switch(shift) {
+                       case 0xc:
+                               idx = MMU_PAGE_4K;
+                               break;
+                       case 0x10:
+                               idx = MMU_PAGE_64K;
+                               break;
+                       case 0x14:
+                               idx = MMU_PAGE_1M;
+                               break;
+                       case 0x18:
+                               idx = MMU_PAGE_16M;
+                               cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
+                               break;
+                       case 0x22:
+                               idx = MMU_PAGE_16G;
+                               break;
+                       }
+                       if (idx < 0)
+                               continue;
+                       def = &mmu_psize_defs[idx];
+                       def->shift = shift;
+                       if (shift <= 23)
+                               def->avpnm = 0;
+                       else
+                               def->avpnm = (1 << (shift - 23)) - 1;
+                       def->sllp = slbenc;
+                       def->penc = lpenc;
+                       /* We don't know for sure what's up with tlbiel, so
+                        * for now we only set it for 4K and 64K pages
+                        */
+                       if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
+                               def->tlbiel = 1;
+                       else
+                               def->tlbiel = 0;
+
+                       DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, "
+                           "tlbiel=%d, penc=%d\n",
+                           idx, shift, def->sllp, def->avpnm, def->tlbiel,
+                           def->penc);
+               }
+               return 1;
+       }
+       return 0;
+}
 
-               hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+#ifdef CONFIG_HUGETLB_PAGE
+/* Scan for 16G memory blocks that have been set aside for huge pages
+ * and reserve those blocks for 16G huge pages.
+ */
+static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
+                                       const char *uname, int depth,
+                                       void *data) {
+       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+       unsigned long *addr_prop;
+       u32 *page_count_prop;
+       unsigned int expected_pages;
+       long unsigned int phys_addr;
+       long unsigned int block_size;
+
+       /* We are scanning "memory" nodes only */
+       if (type == NULL || strcmp(type, "memory") != 0)
+               return 0;
+
+       /* This property is the log base 2 of the number of virtual pages that
+        * will represent this memory block. */
+       page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
+       if (page_count_prop == NULL)
+               return 0;
+       expected_pages = (1 << page_count_prop[0]);
+       addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
+       if (addr_prop == NULL)
+               return 0;
+       phys_addr = addr_prop[0];
+       block_size = addr_prop[1];
+       if (block_size != (16 * GB))
+               return 0;
+       printk(KERN_INFO "Huge page(16GB) memory: "
+                       "addr = 0x%lX size = 0x%lX pages = %d\n",
+                       phys_addr, block_size, expected_pages);
+       if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
+               lmb_reserve(phys_addr, block_size * expected_pages);
+               add_gpage(phys_addr, block_size, expected_pages);
+       }
+       return 0;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
 
-#ifdef CONFIG_PPC_ISERIES
-               if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
-                       ret = iSeries_hpte_bolt_or_insert(hpteg, va,
-                               virt_to_abs(addr) >> PAGE_SHIFT,
-                               vflags, tmp_mode);
-               else
-#endif
-#ifdef CONFIG_PPC_PSERIES
-               if (systemcfg->platform & PLATFORM_LPAR)
-                       ret = pSeries_lpar_hpte_insert(hpteg, va,
-                               virt_to_abs(addr) >> PAGE_SHIFT,
-                               vflags, tmp_mode);
-               else
+static void __init htab_init_page_sizes(void)
+{
+       int rc;
+
+       /* Default to 4K pages only */
+       memcpy(mmu_psize_defs, mmu_psize_defaults_old,
+              sizeof(mmu_psize_defaults_old));
+
+       /*
+        * Try to find the available page sizes in the device-tree
+        */
+       rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
+       if (rc != 0)  /* Found */
+               goto found;
+
+       /*
+        * Not in the device-tree, let's fallback on known size
+        * list for 16M capable GP & GR
+        */
+       if (cpu_has_feature(CPU_FTR_16M_PAGE))
+               memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
+                      sizeof(mmu_psize_defaults_gp));
+ found:
+#ifndef CONFIG_DEBUG_PAGEALLOC
+       /*
+        * Pick a size for the linear mapping. Currently, we only support
+        * 16M, 1M and 4K which is the default
+        */
+       if (mmu_psize_defs[MMU_PAGE_16M].shift)
+               mmu_linear_psize = MMU_PAGE_16M;
+       else if (mmu_psize_defs[MMU_PAGE_1M].shift)
+               mmu_linear_psize = MMU_PAGE_1M;
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
+#ifdef CONFIG_PPC_64K_PAGES
+       /*
+        * Pick a size for the ordinary pages. Default is 4K, we support
+        * 64K for user mappings and vmalloc if supported by the processor.
+        * We only use 64k for ioremap if the processor
+        * (and firmware) support cache-inhibited large pages.
+        * If not, we use 4k and set mmu_ci_restrictions so that
+        * hash_page knows to switch processes that use cache-inhibited
+        * mappings to 4k pages.
+        */
+       if (mmu_psize_defs[MMU_PAGE_64K].shift) {
+               mmu_virtual_psize = MMU_PAGE_64K;
+               mmu_vmalloc_psize = MMU_PAGE_64K;
+               if (mmu_linear_psize == MMU_PAGE_4K)
+                       mmu_linear_psize = MMU_PAGE_64K;
+               if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
+                       /*
+                        * Don't use 64k pages for ioremap on pSeries, since
+                        * that would stop us accessing the HEA ethernet.
+                        */
+                       if (!machine_is(pseries))
+                               mmu_io_psize = MMU_PAGE_64K;
+               } else
+                       mmu_ci_restrictions = 1;
+       }
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       /* We try to use 16M pages for vmemmap if that is supported
+        * and we have at least 1G of RAM at boot
+        */
+       if (mmu_psize_defs[MMU_PAGE_16M].shift &&
+           lmb_phys_mem_size() >= 0x40000000)
+               mmu_vmemmap_psize = MMU_PAGE_16M;
+       else if (mmu_psize_defs[MMU_PAGE_64K].shift)
+               mmu_vmemmap_psize = MMU_PAGE_64K;
+       else
+               mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+       printk(KERN_DEBUG "Page orders: linear mapping = %d, "
+              "virtual = %d, io = %d"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+              ", vmemmap = %d"
 #endif
-#ifdef CONFIG_PPC_MULTIPLATFORM
-                       ret = native_hpte_insert(hpteg, va,
-                               virt_to_abs(addr) >> PAGE_SHIFT,
-                               vflags, tmp_mode);
+              "\n",
+              mmu_psize_defs[mmu_linear_psize].shift,
+              mmu_psize_defs[mmu_virtual_psize].shift,
+              mmu_psize_defs[mmu_io_psize].shift
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+              ,mmu_psize_defs[mmu_vmemmap_psize].shift
 #endif
+              );
 
-               if (ret == -1) {
-                       ppc64_terminate_msg(0x20, "create_pte_mapping");
-                       loop_forever();
-               }
+#ifdef CONFIG_HUGETLB_PAGE
+       /* Reserve 16G huge page memory sections for huge pages */
+       of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
+#endif /* CONFIG_HUGETLB_PAGE */
+}
+
+static int __init htab_dt_scan_pftsize(unsigned long node,
+                                      const char *uname, int depth,
+                                      void *data)
+{
+       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+       u32 *prop;
+
+       /* We are scanning "cpu" nodes only */
+       if (type == NULL || strcmp(type, "cpu") != 0)
+               return 0;
+
+       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
+       if (prop != NULL) {
+               /* pft_size[0] is the NUMA CEC cookie */
+               ppc64_pft_size = prop[1];
+               return 1;
        }
+       return 0;
 }
 
-static unsigned long get_hashtable_size(void)
+static unsigned long __init htab_get_table_size(void)
 {
-       unsigned long rnd_mem_size, pteg_count;
+       unsigned long mem_size, rnd_mem_size, pteg_count, psize;
 
-       /* If hash size wasn't obtained in prom.c, we calculate it now based on
-        * the total RAM size
+       /* If hash size isn't already provided by the platform, we try to
+        * retrieve it from the device-tree. If it's not there neither, we
+        * calculate it now based on the total RAM size
         */
+       if (ppc64_pft_size == 0)
+               of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
        if (ppc64_pft_size)
                return 1UL << ppc64_pft_size;
 
        /* round mem_size up to next power of 2 */
-       rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
-       if (rnd_mem_size < systemcfg->physicalMemorySize)
+       mem_size = lmb_phys_mem_size();
+       rnd_mem_size = 1UL << __ilog2(mem_size);
+       if (rnd_mem_size < mem_size)
                rnd_mem_size <<= 1;
 
        /* # pages / 2 */
-       pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
+       psize = mmu_psize_defs[mmu_virtual_psize].shift;
+       pteg_count = max(rnd_mem_size >> (psize + 1), 1UL << 11);
 
        return pteg_count << 7;
 }
 
-void __init htab_initialize(void)
+#ifdef CONFIG_MEMORY_HOTPLUG
+void create_section_mapping(unsigned long start, unsigned long end)
+{
+       BUG_ON(htab_bolt_mapping(start, end, __pa(start),
+                                pgprot_val(PAGE_KERNEL), mmu_linear_psize,
+                                mmu_kernel_ssize));
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+       return htab_remove_mapping(start, end, mmu_linear_psize,
+                       mmu_kernel_ssize);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+static inline void make_bl(unsigned int *insn_addr, void *func)
+{
+       unsigned long funcp = *((unsigned long *)func);
+       int offset = funcp - (unsigned long)insn_addr;
+
+       *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
+       flush_icache_range((unsigned long)insn_addr, 4+
+                          (unsigned long)insn_addr);
+}
+
+static void __init htab_finish_init(void)
+{
+       extern unsigned int *htab_call_hpte_insert1;
+       extern unsigned int *htab_call_hpte_insert2;
+       extern unsigned int *htab_call_hpte_remove;
+       extern unsigned int *htab_call_hpte_updatepp;
+
+#ifdef CONFIG_PPC_HAS_HASH_64K
+       extern unsigned int *ht64_call_hpte_insert1;
+       extern unsigned int *ht64_call_hpte_insert2;
+       extern unsigned int *ht64_call_hpte_remove;
+       extern unsigned int *ht64_call_hpte_updatepp;
+
+       make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
+       make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
+       make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
+       make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
+#endif /* CONFIG_PPC_HAS_HASH_64K */
+
+       make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
+       make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
+       make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
+       make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
+}
+
+static void __init htab_initialize(void)
 {
-       unsigned long table, htab_size_bytes;
+       unsigned long table;
        unsigned long pteg_count;
-       unsigned long mode_rw;
-       int i, use_largepages = 0;
-       unsigned long base = 0, size = 0;
-       extern unsigned long tce_alloc_start, tce_alloc_end;
+       unsigned long prot;
+       unsigned long base = 0, size = 0, limit;
+       int i;
 
        DBG(" -> htab_initialize()\n");
 
+       /* Initialize segment sizes */
+       htab_init_seg_sizes();
+
+       /* Initialize page sizes */
+       htab_init_page_sizes();
+
+       if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
+               mmu_kernel_ssize = MMU_SEGSIZE_1T;
+               mmu_highuser_ssize = MMU_SEGSIZE_1T;
+               printk(KERN_INFO "Using 1TB segments\n");
+       }
+
        /*
         * Calculate the required size of the htab.  We want the number of
         * PTEGs to equal one half the number of real pages.
         */ 
-       htab_size_bytes = get_hashtable_size();
+       htab_size_bytes = htab_get_table_size();
        pteg_count = htab_size_bytes >> 7;
 
        htab_hash_mask = pteg_count - 1;
 
-       if (systemcfg->platform & PLATFORM_LPAR) {
+       if (firmware_has_feature(FW_FEATURE_LPAR)) {
                /* Using a hypervisor which owns the htab */
                htab_address = NULL;
                _SDR1 = 0; 
        } else {
                /* Find storage for the HPT.  Must be contiguous in
-                * the absolute address space.
+                * the absolute address space. On cell we want it to be
+                * in the first 2 Gig so we can use it for IOMMU hacks.
                 */
-               table = lmb_alloc(htab_size_bytes, htab_size_bytes);
+               if (machine_is(cell))
+                       limit = 0x80000000;
+               else
+                       limit = 0;
+
+               table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
 
                DBG("Hash table allocated at %lx, size: %lx\n", table,
                    htab_size_bytes);
 
-               if ( !table ) {
-                       ppc64_terminate_msg(0x20, "hpt space");
-                       loop_forever();
-               }
                htab_address = abs_to_virt(table);
 
                /* htab absolute addr + encoded htabsize */
@@ -219,47 +639,64 @@ void __init htab_initialize(void)
 
                /* Initialize the HPT with no entries */
                memset((void *)table, 0, htab_size_bytes);
+
+               /* Set SDR1 */
+               mtspr(SPRN_SDR1, _SDR1);
        }
 
-       mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
+       prot = pgprot_val(PAGE_KERNEL);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
+                                                   1, lmb.rmo_size));
+       memset(linear_map_hash_slots, 0, linear_map_hash_count);
+#endif /* CONFIG_DEBUG_PAGEALLOC */
 
        /* On U3 based machines, we need to reserve the DART area and
         * _NOT_ map it to avoid cache paradoxes as it's remapped non
         * cacheable later on
         */
-       if (cpu_has_feature(CPU_FTR_16M_PAGE))
-               use_largepages = 1;
 
        /* create bolted the linear mapping in the hash table */
        for (i=0; i < lmb.memory.cnt; i++) {
-               base = lmb.memory.region[i].base + KERNELBASE;
+               base = (unsigned long)__va(lmb.memory.region[i].base);
                size = lmb.memory.region[i].size;
 
-               DBG("creating mapping for region: %lx : %lx\n", base, size);
+               DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
+                   base, size, prot);
 
 #ifdef CONFIG_U3_DART
                /* Do not map the DART space. Fortunately, it will be aligned
-                * in such a way that it will not cross two lmb regions and will
-                * fit within a single 16Mb page.
-                * The DART space is assumed to be a full 16Mb region even if we
-                * only use 2Mb of that space. We will use more of it later for
-                * AGP GART. We have to use a full 16Mb large page.
+                * in such a way that it will not cross two lmb regions and
+                * will fit within a single 16Mb page.
+                * The DART space is assumed to be a full 16Mb region even if
+                * we only use 2Mb of that space. We will use more of it later
+                * for AGP GART. We have to use a full 16Mb large page.
                 */
                DBG("DART base: %lx\n", dart_tablebase);
 
                if (dart_tablebase != 0 && dart_tablebase >= base
                    && dart_tablebase < (base + size)) {
+                       unsigned long dart_table_end = dart_tablebase + 16 * MB;
                        if (base != dart_tablebase)
-                               create_pte_mapping(base, dart_tablebase, mode_rw,
-                                                  use_largepages);
-                       if ((base + size) > (dart_tablebase + 16*MB))
-                               create_pte_mapping(dart_tablebase + 16*MB, base + size,
-                                                  mode_rw, use_largepages);
+                               BUG_ON(htab_bolt_mapping(base, dart_tablebase,
+                                                       __pa(base), prot,
+                                                       mmu_linear_psize,
+                                                       mmu_kernel_ssize));
+                       if ((base + size) > dart_table_end)
+                               BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
+                                                       base + size,
+                                                       __pa(dart_table_end),
+                                                        prot,
+                                                        mmu_linear_psize,
+                                                        mmu_kernel_ssize));
                        continue;
                }
 #endif /* CONFIG_U3_DART */
-               create_pte_mapping(base, base + size, mode_rw, use_largepages);
-       }
+               BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
+                               prot, mmu_linear_psize, mmu_kernel_ssize));
+       }
 
        /*
         * If we have a memory_limit and we've allocated TCEs then we need to
@@ -269,21 +706,62 @@ void __init htab_initialize(void)
         * for either 4K or 16MB pages.
         */
        if (tce_alloc_start) {
-               tce_alloc_start += KERNELBASE;
-               tce_alloc_end += KERNELBASE;
+               tce_alloc_start = (unsigned long)__va(tce_alloc_start);
+               tce_alloc_end = (unsigned long)__va(tce_alloc_end);
 
                if (base + size >= tce_alloc_start)
                        tce_alloc_start = base + size + 1;
 
-               create_pte_mapping(tce_alloc_start, tce_alloc_end,
-                       mode_rw, use_largepages);
+               BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
+                                        __pa(tce_alloc_start), prot,
+                                        mmu_linear_psize, mmu_kernel_ssize));
        }
 
+       htab_finish_init();
+
        DBG(" <- htab_initialize()\n");
 }
 #undef KB
 #undef MB
 
+void __init early_init_mmu(void)
+{
+       /* Setup initial STAB address in the PACA */
+       get_paca()->stab_real = __pa((u64)&initial_stab);
+       get_paca()->stab_addr = (u64)&initial_stab;
+
+       /* Initialize the MMU Hash table and create the linear mapping
+        * of memory. Has to be done before stab/slb initialization as
+        * this is currently where the page size encoding is obtained
+        */
+       htab_initialize();
+
+       /* Initialize stab / SLB management except on iSeries
+        */
+       if (cpu_has_feature(CPU_FTR_SLB))
+               slb_initialize();
+       else if (!firmware_has_feature(FW_FEATURE_ISERIES))
+               stab_initialize(get_paca()->stab_real);
+}
+
+#ifdef CONFIG_SMP
+void __cpuinit early_init_mmu_secondary(void)
+{
+       /* Initialize hash table for that CPU */
+       if (!firmware_has_feature(FW_FEATURE_LPAR))
+               mtspr(SPRN_SDR1, _SDR1);
+
+       /* Initialize STAB/SLB. We use a virtual address as it works
+        * in real mode on pSeries and we want a virutal address on
+        * iSeries anyway
+        */
+       if (cpu_has_feature(CPU_FTR_SLB))
+               slb_initialize();
+       else
+               stab_initialize(get_paca()->stab_addr);
+}
+#endif /* CONFIG_SMP */
+
 /*
  * Called by asm hashtable.S for doing lazy icache flush
  */
@@ -299,154 +777,446 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
        /* page is dirty */
        if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
                if (trap == 0x400) {
-                       __flush_dcache_icache(page_address(page));
+                       flush_dcache_icache_page(page);
                        set_bit(PG_arch_1, &page->flags);
                } else
-                       pp |= HW_NO_EXEC;
+                       pp |= HPTE_R_N;
        }
        return pp;
 }
 
+#ifdef CONFIG_PPC_MM_SLICES
+unsigned int get_paca_psize(unsigned long addr)
+{
+       unsigned long index, slices;
+
+       if (addr < SLICE_LOW_TOP) {
+               slices = get_paca()->context.low_slices_psize;
+               index = GET_LOW_SLICE_INDEX(addr);
+       } else {
+               slices = get_paca()->context.high_slices_psize;
+               index = GET_HIGH_SLICE_INDEX(addr);
+       }
+       return (slices >> (index * 4)) & 0xF;
+}
+
+#else
+unsigned int get_paca_psize(unsigned long addr)
+{
+       return get_paca()->context.user_psize;
+}
+#endif
+
+/*
+ * Demote a segment to using 4k pages.
+ * For now this makes the whole process use 4k pages.
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+{
+       if (get_slice_psize(mm, addr) == MMU_PAGE_4K)
+               return;
+       slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
+#ifdef CONFIG_SPU_BASE
+       spu_flush_all_slbs(mm);
+#endif
+       if (get_paca_psize(addr) != MMU_PAGE_4K) {
+               get_paca()->context = mm->context;
+               slb_flush_and_rebolt();
+       }
+}
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+/*
+ * This looks up a 2-bit protection code for a 4k subpage of a 64k page.
+ * Userspace sets the subpage permissions using the subpage_prot system call.
+ *
+ * Result is 0: full permissions, _PAGE_RW: read-only,
+ * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access.
+ */
+static int subpage_protection(struct mm_struct *mm, unsigned long ea)
+{
+       struct subpage_prot_table *spt = &mm->context.spt;
+       u32 spp = 0;
+       u32 **sbpm, *sbpp;
+
+       if (ea >= spt->maxaddr)
+               return 0;
+       if (ea < 0x100000000) {
+               /* addresses below 4GB use spt->low_prot */
+               sbpm = spt->low_prot;
+       } else {
+               sbpm = spt->protptrs[ea >> SBP_L3_SHIFT];
+               if (!sbpm)
+                       return 0;
+       }
+       sbpp = sbpm[(ea >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
+       if (!sbpp)
+               return 0;
+       spp = sbpp[(ea >> PAGE_SHIFT) & (SBP_L1_COUNT - 1)];
+
+       /* extract 2-bit bitfield for this 4k subpage */
+       spp >>= 30 - 2 * ((ea >> 12) & 0xf);
+
+       /* turn 0,1,2,3 into combination of _PAGE_USER and _PAGE_RW */
+       spp = ((spp & 2) ? _PAGE_USER : 0) | ((spp & 1) ? _PAGE_RW : 0);
+       return spp;
+}
+
+#else /* CONFIG_PPC_SUBPAGE_PROT */
+static inline int subpage_protection(struct mm_struct *mm, unsigned long ea)
+{
+       return 0;
+}
+#endif
+
 /* Result code is:
  *  0 - handled
  *  1 - normal page fault
  * -1 - critical hash insertion error
+ * -2 - access not permitted by subpage protection mechanism
  */
 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 {
-       void *pgdir;
+       pgd_t *pgdir;
        unsigned long vsid;
        struct mm_struct *mm;
        pte_t *ptep;
-       int ret;
-       int user_region = 0;
-       int local = 0;
-       cpumask_t tmp;
+       unsigned hugeshift;
+       const struct cpumask *tmp;
+       int rc, user_region = 0, local = 0;
+       int psize, ssize;
 
-       if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
-               return 1;
+       DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
+               ea, access, trap);
 
+       if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
+               DBG_LOW(" out of pgtable range !\n");
+               return 1;
+       }
+
+       /* Get region & vsid */
        switch (REGION_ID(ea)) {
        case USER_REGION_ID:
                user_region = 1;
                mm = current->mm;
-               if (! mm)
+               if (! mm) {
+                       DBG_LOW(" user region with no mm !\n");
                        return 1;
-
-               vsid = get_vsid(mm->context.id, ea);
+               }
+               psize = get_slice_psize(mm, ea);
+               ssize = user_segment_size(ea);
+               vsid = get_vsid(mm->context.id, ea, ssize);
                break;
        case VMALLOC_REGION_ID:
                mm = &init_mm;
-               vsid = get_kernel_vsid(ea);
+               vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
+               if (ea < VMALLOC_END)
+                       psize = mmu_vmalloc_psize;
+               else
+                       psize = mmu_io_psize;
+               ssize = mmu_kernel_ssize;
                break;
-#if 0
-       case KERNEL_REGION_ID:
-               /*
-                * Should never get here - entire 0xC0... region is bolted.
-                * Send the problem up to do_page_fault 
-                */
-#endif
        default:
                /* Not a valid range
                 * Send the problem up to do_page_fault 
                 */
                return 1;
-               break;
        }
+       DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
 
+       /* Get pgdir */
        pgdir = mm->pgd;
-
        if (pgdir == NULL)
                return 1;
 
-       tmp = cpumask_of_cpu(smp_processor_id());
-       if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
+       /* Check CPU locality */
+       tmp = cpumask_of(smp_processor_id());
+       if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
                local = 1;
 
-       /* Is this a huge page ? */
-       if (unlikely(in_hugepage_area(mm->context, ea)))
-               ret = hash_huge_page(mm, access, ea, vsid, local);
-       else {
-               ptep = find_linux_pte(pgdir, ea);
-               if (ptep == NULL)
-                       return 1;
-               ret = __hash_page(ea, access, vsid, ptep, trap, local);
+#ifndef CONFIG_PPC_64K_PAGES
+       /* If we use 4K pages and our psize is not 4K, then we might
+        * be hitting a special driver mapping, and need to align the
+        * address before we fetch the PTE.
+        *
+        * It could also be a hugepage mapping, in which case this is
+        * not necessary, but it's not harmful, either.
+        */
+       if (psize != MMU_PAGE_4K)
+               ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+#endif /* CONFIG_PPC_64K_PAGES */
+
+       /* Get PTE and page size from page tables */
+       ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
+       if (ptep == NULL || !pte_present(*ptep)) {
+               DBG_LOW(" no PTE !\n");
+               return 1;
+       }
+
+#ifdef CONFIG_HUGETLB_PAGE
+       if (hugeshift)
+               return __hash_page_huge(ea, access, vsid, ptep, trap, local,
+                                       ssize, hugeshift, psize);
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifndef CONFIG_PPC_64K_PAGES
+       DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
+#else
+       DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
+               pte_val(*(ptep + PTRS_PER_PTE)));
+#endif
+       /* Pre-check access permissions (will be re-checked atomically
+        * in __hash_page_XX but this pre-check is a fast path
+        */
+       if (access & ~pte_val(*ptep)) {
+               DBG_LOW(" no access !\n");
+               return 1;
+       }
+
+       /* Do actual hashing */
+#ifdef CONFIG_PPC_64K_PAGES
+       /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
+       if ((pte_val(*ptep) & _PAGE_4K_PFN) && psize == MMU_PAGE_64K) {
+               demote_segment_4k(mm, ea);
+               psize = MMU_PAGE_4K;
+       }
+
+       /* If this PTE is non-cacheable and we have restrictions on
+        * using non cacheable large pages, then we switch to 4k
+        */
+       if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
+           (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+               if (user_region) {
+                       demote_segment_4k(mm, ea);
+                       psize = MMU_PAGE_4K;
+               } else if (ea < VMALLOC_END) {
+                       /*
+                        * some driver did a non-cacheable mapping
+                        * in vmalloc space, so switch vmalloc
+                        * to 4k pages
+                        */
+                       printk(KERN_ALERT "Reducing vmalloc segment "
+                              "to 4kB pages because of "
+                              "non-cacheable mapping\n");
+                       psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPU_BASE
+                       spu_flush_all_slbs(mm);
+#endif
+               }
+       }
+       if (user_region) {
+               if (psize != get_paca_psize(ea)) {
+                       get_paca()->context = mm->context;
+                       slb_flush_and_rebolt();
+               }
+       } else if (get_paca()->vmalloc_sllp !=
+                  mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+               get_paca()->vmalloc_sllp =
+                       mmu_psize_defs[mmu_vmalloc_psize].sllp;
+               slb_vmalloc_update();
        }
+#endif /* CONFIG_PPC_64K_PAGES */
 
-       return ret;
+#ifdef CONFIG_PPC_HAS_HASH_64K
+       if (psize == MMU_PAGE_64K)
+               rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
+       else
+#endif /* CONFIG_PPC_HAS_HASH_64K */
+       {
+               int spp = subpage_protection(mm, ea);
+               if (access & spp)
+                       rc = -2;
+               else
+                       rc = __hash_page_4K(ea, access, vsid, ptep, trap,
+                                           local, ssize, spp);
+       }
+
+#ifndef CONFIG_PPC_64K_PAGES
+       DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
+#else
+       DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
+               pte_val(*(ptep + PTRS_PER_PTE)));
+#endif
+       DBG_LOW(" -> rc=%d\n", rc);
+       return rc;
 }
+EXPORT_SYMBOL_GPL(hash_page);
 
-void flush_hash_page(unsigned long va, pte_t pte, int local)
+void hash_preload(struct mm_struct *mm, unsigned long ea,
+                 unsigned long access, unsigned long trap)
 {
-       unsigned long vpn, hash, secondary, slot;
-       unsigned long huge = pte_huge(pte);
+       unsigned long vsid;
+       void *pgdir;
+       pte_t *ptep;
+       unsigned long flags;
+       int local = 0;
+       int ssize;
+
+       BUG_ON(REGION_ID(ea) != USER_REGION_ID);
+
+#ifdef CONFIG_PPC_MM_SLICES
+       /* We only prefault standard pages for now */
+       if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
+               return;
+#endif
+
+       DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
+               " trap=%lx\n", mm, mm->pgd, ea, access, trap);
+
+       /* Get Linux PTE if available */
+       pgdir = mm->pgd;
+       if (pgdir == NULL)
+               return;
+       ptep = find_linux_pte(pgdir, ea);
+       if (!ptep)
+               return;
 
-       if (huge)
-               vpn = va >> HPAGE_SHIFT;
+#ifdef CONFIG_PPC_64K_PAGES
+       /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
+        * a 64K kernel), then we don't preload, hash_page() will take
+        * care of it once we actually try to access the page.
+        * That way we don't have to duplicate all of the logic for segment
+        * page size demotion here
+        */
+       if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
+               return;
+#endif /* CONFIG_PPC_64K_PAGES */
+
+       /* Get VSID */
+       ssize = user_segment_size(ea);
+       vsid = get_vsid(mm->context.id, ea, ssize);
+
+       /* Hash doesn't like irqs */
+       local_irq_save(flags);
+
+       /* Is that local to this CPU ? */
+       if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+               local = 1;
+
+       /* Hash it in */
+#ifdef CONFIG_PPC_HAS_HASH_64K
+       if (mm->context.user_psize == MMU_PAGE_64K)
+               __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
        else
-               vpn = va >> PAGE_SHIFT;
-       hash = hpt_hash(vpn, huge);
-       secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
-       if (secondary)
-               hash = ~hash;
-       slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
-       slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
+#endif /* CONFIG_PPC_HAS_HASH_64K */
+               __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize,
+                              subpage_protection(pgdir, ea));
+
+       local_irq_restore(flags);
+}
 
-       ppc_md.hpte_invalidate(slot, va, huge, local);
+/* WARNING: This is called from hash_low_64.S, if you change this prototype,
+ *          do not forget to update the assembly call site !
+ */
+void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
+                    int local)
+{
+       unsigned long hash, index, shift, hidx, slot;
+
+       DBG_LOW("flush_hash_page(va=%016lx)\n", va);
+       pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+               hash = hpt_hash(va, shift, ssize);
+               hidx = __rpte_to_hidx(pte, index);
+               if (hidx & _PTEIDX_SECONDARY)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += hidx & _PTEIDX_GROUP_IX;
+               DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
+               ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
+       } pte_iterate_hashed_end();
 }
 
 void flush_hash_range(unsigned long number, int local)
 {
-       if (ppc_md.flush_hash_range) {
+       if (ppc_md.flush_hash_range)
                ppc_md.flush_hash_range(number, local);
-       } else {
+       else {
                int i;
                struct ppc64_tlb_batch *batch =
                        &__get_cpu_var(ppc64_tlb_batch);
 
                for (i = 0; i < number; i++)
-                       flush_hash_page(batch->vaddr[i], batch->pte[i], local);
+                       flush_hash_page(batch->vaddr[i], batch->pte[i],
+                                       batch->psize, batch->ssize, local);
        }
 }
 
-static inline void make_bl(unsigned int *insn_addr, void *func)
-{
-       unsigned long funcp = *((unsigned long *)func);
-       int offset = funcp - (unsigned long)insn_addr;
-
-       *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
-       flush_icache_range((unsigned long)insn_addr, 4+
-                          (unsigned long)insn_addr);
-}
-
 /*
  * low_hash_fault is called when we the low level hash code failed
  * to instert a PTE due to an hypervisor error
  */
-void low_hash_fault(struct pt_regs *regs, unsigned long address)
+void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
 {
        if (user_mode(regs)) {
-               siginfo_t info;
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+               if (rc == -2)
+                       _exception(SIGSEGV, regs, SEGV_ACCERR, address);
+               else
+#endif
+                       _exception(SIGBUS, regs, BUS_ADRERR, address);
+       } else
+               bad_page_fault(regs, address, SIGBUS);
+}
 
-               info.si_signo = SIGBUS;
-               info.si_errno = 0;
-               info.si_code = BUS_ADRERR;
-               info.si_addr = (void __user *)address;
-               force_sig_info(SIGBUS, &info, current);
-               return;
-       }
-       bad_page_fault(regs, address, SIGBUS);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
+{
+       unsigned long hash, hpteg;
+       unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
+       unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+       unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
+       int ret;
+
+       hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+       hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+
+       ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
+                                mode, HPTE_V_BOLTED,
+                                mmu_linear_psize, mmu_kernel_ssize);
+       BUG_ON (ret < 0);
+       spin_lock(&linear_map_hash_lock);
+       BUG_ON(linear_map_hash_slots[lmi] & 0x80);
+       linear_map_hash_slots[lmi] = ret | 0x80;
+       spin_unlock(&linear_map_hash_lock);
 }
 
-void __init htab_finish_init(void)
+static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 {
-       extern unsigned int *htab_call_hpte_insert1;
-       extern unsigned int *htab_call_hpte_insert2;
-       extern unsigned int *htab_call_hpte_remove;
-       extern unsigned int *htab_call_hpte_updatepp;
+       unsigned long hash, hidx, slot;
+       unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
+       unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+
+       hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+       spin_lock(&linear_map_hash_lock);
+       BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+       hidx = linear_map_hash_slots[lmi] & 0x7f;
+       linear_map_hash_slots[lmi] = 0;
+       spin_unlock(&linear_map_hash_lock);
+       if (hidx & _PTEIDX_SECONDARY)
+               hash = ~hash;
+       slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+       slot += hidx & _PTEIDX_GROUP_IX;
+       ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
+}
 
-       make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
-       make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
-       make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
-       make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       unsigned long flags, vaddr, lmi;
+       int i;
+
+       local_irq_save(flags);
+       for (i = 0; i < numpages; i++, page++) {
+               vaddr = (unsigned long)page_address(page);
+               lmi = __pa(vaddr) >> PAGE_SHIFT;
+               if (lmi >= linear_map_hash_count)
+                       continue;
+               if (enable)
+                       kernel_map_linear_page(vaddr, lmi);
+               else
+                       kernel_unmap_linear_page(vaddr, lmi);
+       }
+       local_irq_restore(flags);
 }
+#endif /* CONFIG_DEBUG_PAGEALLOC */