hugetlb: introduce pud_huge
[linux-2.6.git] / arch / powerpc / mm / hugetlbpage.c
index 8508f97..63db7ad 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/sysctl.h>
 #include <asm/mmu_context.h>
 #include <asm/machdep.h>
 #include <asm/cputable.h>
-#include <asm/tlb.h>
 #include <asm/spu.h>
 
-#include <linux/sysctl.h>
+#define HPAGE_SHIFT_64K        16
+#define HPAGE_SHIFT_16M        24
 
 #define NUM_LOW_AREAS  (0x100000000UL >> SID_SHIFT)
 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
 
-#ifdef CONFIG_PPC_64K_PAGES
-#define HUGEPTE_INDEX_SIZE     (PMD_SHIFT-HPAGE_SHIFT)
-#else
-#define HUGEPTE_INDEX_SIZE     (PUD_SHIFT-HPAGE_SHIFT)
-#endif
-#define PTRS_PER_HUGEPTE       (1 << HUGEPTE_INDEX_SIZE)
-#define HUGEPTE_TABLE_SIZE     (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
+unsigned int hugepte_shift;
+#define PTRS_PER_HUGEPTE       (1 << hugepte_shift)
+#define HUGEPTE_TABLE_SIZE     (sizeof(pte_t) << hugepte_shift)
 
-#define HUGEPD_SHIFT           (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
+#define HUGEPD_SHIFT           (HPAGE_SHIFT + hugepte_shift)
 #define HUGEPD_SIZE            (1UL << HUGEPD_SHIFT)
 #define HUGEPD_MASK            (~(HUGEPD_SIZE-1))
 
@@ -86,13 +81,37 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        return 0;
 }
 
+/* Base page size affects how we walk hugetlb page tables */
+#ifdef CONFIG_PPC_64K_PAGES
+#define hpmd_offset(pud, addr)         pmd_offset(pud, addr)
+#define hpmd_alloc(mm, pud, addr)      pmd_alloc(mm, pud, addr)
+#else
+static inline
+pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
+{
+       if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
+               return pmd_offset(pud, addr);
+       else
+               return (pmd_t *) pud;
+}
+static inline
+pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
+{
+       if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
+               return pmd_alloc(mm, pud, addr);
+       else
+               return (pmd_t *) pud;
+}
+#endif
+
 /* Modelled after find_linux_pte() */
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pg;
        pud_t *pu;
+       pmd_t *pm;
 
-       BUG_ON(! in_hugepage_area(mm->context, addr));
+       BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
 
        addr &= HPAGE_MASK;
 
@@ -100,27 +119,24 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        if (!pgd_none(*pg)) {
                pu = pud_offset(pg, addr);
                if (!pud_none(*pu)) {
-#ifdef CONFIG_PPC_64K_PAGES
-                       pmd_t *pm;
-                       pm = pmd_offset(pu, addr);
+                       pm = hpmd_offset(pu, addr);
                        if (!pmd_none(*pm))
                                return hugepte_offset((hugepd_t *)pm, addr);
-#else
-                       return hugepte_offset((hugepd_t *)pu, addr);
-#endif
                }
        }
 
        return NULL;
 }
 
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+                       unsigned long addr, unsigned long sz)
 {
        pgd_t *pg;
        pud_t *pu;
+       pmd_t *pm;
        hugepd_t *hpdp = NULL;
 
-       BUG_ON(! in_hugepage_area(mm->context, addr));
+       BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
 
        addr &= HPAGE_MASK;
 
@@ -128,14 +144,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
        pu = pud_alloc(mm, pg, addr);
 
        if (pu) {
-#ifdef CONFIG_PPC_64K_PAGES
-               pmd_t *pm;
-               pm = pmd_alloc(mm, pu, addr);
+               pm = hpmd_alloc(mm, pu, addr);
                if (pm)
                        hpdp = (hugepd_t *)pm;
-#else
-               hpdp = (hugepd_t *)pu;
-#endif
        }
 
        if (! hpdp)
@@ -162,7 +173,6 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
                                                 PGF_CACHENUM_MASK));
 }
 
-#ifdef CONFIG_PPC_64K_PAGES
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
                                   unsigned long addr, unsigned long end,
                                   unsigned long floor, unsigned long ceiling)
@@ -195,7 +205,6 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        pud_clear(pud);
        pmd_free_tlb(tlb, pmd);
 }
-#endif
 
 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
                                   unsigned long addr, unsigned long end,
@@ -214,9 +223,15 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
                        continue;
                hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
 #else
-               if (pud_none(*pud))
-                       continue;
-               free_hugepte_range(tlb, (hugepd_t *)pud);
+               if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
+                       if (pud_none_or_clear_bad(pud))
+                               continue;
+                       hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
+               } else {
+                       if (pud_none(*pud))
+                               continue;
+                       free_hugepte_range(tlb, (hugepd_t *)pud);
+               }
 #endif
        } while (pud++, addr = next, addr != end);
 
@@ -241,7 +256,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  *
  * Must be called with pagetable lock held.
  */
-void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
                            unsigned long addr, unsigned long end,
                            unsigned long floor, unsigned long ceiling)
 {
@@ -301,13 +316,13 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
                return;
 
        start = addr;
-       pgd = pgd_offset((*tlb)->mm, addr);
+       pgd = pgd_offset(tlb->mm, addr);
        do {
-               BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
+               BUG_ON(get_slice_psize(tlb->mm, addr) != mmu_huge_psize);
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
+               hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -332,203 +347,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        return __pte(old);
 }
 
-struct slb_flush_info {
-       struct mm_struct *mm;
-       u16 newareas;
-};
-
-static void flush_low_segments(void *parm)
-{
-       struct slb_flush_info *fi = parm;
-       unsigned long i;
-
-       BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
-
-       if (current->active_mm != fi->mm)
-               return;
-
-       /* Only need to do anything if this CPU is working in the same
-        * mm as the one which has changed */
-
-       /* update the paca copy of the context struct */
-       get_paca()->context = current->active_mm->context;
-
-       asm volatile("isync" : : : "memory");
-       for (i = 0; i < NUM_LOW_AREAS; i++) {
-               if (! (fi->newareas & (1U << i)))
-                       continue;
-               asm volatile("slbie %0"
-                            : : "r" ((i << SID_SHIFT) | SLBIE_C));
-       }
-       asm volatile("isync" : : : "memory");
-}
-
-static void flush_high_segments(void *parm)
-{
-       struct slb_flush_info *fi = parm;
-       unsigned long i, j;
-
-
-       BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
-
-       if (current->active_mm != fi->mm)
-               return;
-
-       /* Only need to do anything if this CPU is working in the same
-        * mm as the one which has changed */
-
-       /* update the paca copy of the context struct */
-       get_paca()->context = current->active_mm->context;
-
-       asm volatile("isync" : : : "memory");
-       for (i = 0; i < NUM_HIGH_AREAS; i++) {
-               if (! (fi->newareas & (1U << i)))
-                       continue;
-               for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
-                       asm volatile("slbie %0"
-                                    :: "r" (((i << HTLB_AREA_SHIFT)
-                                             + (j << SID_SHIFT)) | SLBIE_C));
-       }
-       asm volatile("isync" : : : "memory");
-}
-
-static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
-{
-       unsigned long start = area << SID_SHIFT;
-       unsigned long end = (area+1) << SID_SHIFT;
-       struct vm_area_struct *vma;
-
-       BUG_ON(area >= NUM_LOW_AREAS);
-
-       /* Check no VMAs are in the region */
-       vma = find_vma(mm, start);
-       if (vma && (vma->vm_start < end))
-               return -EBUSY;
-
-       return 0;
-}
-
-static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
-{
-       unsigned long start = area << HTLB_AREA_SHIFT;
-       unsigned long end = (area+1) << HTLB_AREA_SHIFT;
-       struct vm_area_struct *vma;
-
-       BUG_ON(area >= NUM_HIGH_AREAS);
-
-       /* Hack, so that each addresses is controlled by exactly one
-        * of the high or low area bitmaps, the first high area starts
-        * at 4GB, not 0 */
-       if (start == 0)
-               start = 0x100000000UL;
-
-       /* Check no VMAs are in the region */
-       vma = find_vma(mm, start);
-       if (vma && (vma->vm_start < end))
-               return -EBUSY;
-
-       return 0;
-}
-
-static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
-{
-       unsigned long i;
-       struct slb_flush_info fi;
-
-       BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
-       BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
-
-       newareas &= ~(mm->context.low_htlb_areas);
-       if (! newareas)
-               return 0; /* The segments we want are already open */
-
-       for (i = 0; i < NUM_LOW_AREAS; i++)
-               if ((1 << i) & newareas)
-                       if (prepare_low_area_for_htlb(mm, i) != 0)
-                               return -EBUSY;
-
-       mm->context.low_htlb_areas |= newareas;
-
-       /* the context change must make it to memory before the flush,
-        * so that further SLB misses do the right thing. */
-       mb();
-
-       fi.mm = mm;
-       fi.newareas = newareas;
-       on_each_cpu(flush_low_segments, &fi, 0, 1);
-
-       return 0;
-}
-
-static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
-{
-       struct slb_flush_info fi;
-       unsigned long i;
-
-       BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
-       BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
-                    != NUM_HIGH_AREAS);
-
-       newareas &= ~(mm->context.high_htlb_areas);
-       if (! newareas)
-               return 0; /* The areas we want are already open */
-
-       for (i = 0; i < NUM_HIGH_AREAS; i++)
-               if ((1 << i) & newareas)
-                       if (prepare_high_area_for_htlb(mm, i) != 0)
-                               return -EBUSY;
-
-       mm->context.high_htlb_areas |= newareas;
-
-       /* the context change must make it to memory before the flush,
-        * so that further SLB misses do the right thing. */
-       mb();
-
-       fi.mm = mm;
-       fi.newareas = newareas;
-       on_each_cpu(flush_high_segments, &fi, 0, 1);
-
-       return 0;
-}
-
-int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
-{
-       int err = 0;
-
-       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
-               return -EINVAL;
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-       if (addr & ~HPAGE_MASK)
-               return -EINVAL;
-
-       if (addr < 0x100000000UL)
-               err = open_low_hpage_areas(current->mm,
-                                         LOW_ESID_MASK(addr, len));
-       if ((addr + len) > 0x100000000UL)
-               err = open_high_hpage_areas(current->mm,
-                                           HTLB_AREA_MASK(addr, len));
-#ifdef CONFIG_SPE_BASE
-       spu_flush_all_slbs(current->mm);
-#endif
-       if (err) {
-               printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
-                      " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
-                      addr, len,
-                      LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
-               return err;
-       }
-
-       return 0;
-}
-
 struct page *
 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 {
        pte_t *ptep;
        struct page *page;
 
-       if (! in_hugepage_area(mm->context, address))
+       if (get_slice_psize(mm, address) != mmu_huge_psize)
                return ERR_PTR(-EINVAL);
 
        ptep = huge_pte_offset(mm, address);
@@ -544,6 +369,11 @@ int pmd_huge(pmd_t pmd)
        return 0;
 }
 
+int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
 struct page *
 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
                pmd_t *pmd, int write)
@@ -552,338 +382,13 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
        return NULL;
 }
 
-/* Because we have an exclusive hugepage region which lies within the
- * normal user address space, we have to take special measures to make
- * non-huge mmap()s evade the hugepage reserved regions. */
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
-                                    unsigned long len, unsigned long pgoff,
-                                    unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long start_addr;
-
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (((TASK_SIZE - len) >= addr)
-                   && (!vma || (addr+len) <= vma->vm_start)
-                   && !is_hugepage_only_range(mm, addr,len))
-                       return addr;
-       }
-       if (len > mm->cached_hole_size) {
-               start_addr = addr = mm->free_area_cache;
-       } else {
-               start_addr = addr = TASK_UNMAPPED_BASE;
-               mm->cached_hole_size = 0;
-       }
-
-full_search:
-       vma = find_vma(mm, addr);
-       while (TASK_SIZE - len >= addr) {
-               BUG_ON(vma && (addr >= vma->vm_end));
-
-               if (touches_hugepage_low_range(mm, addr, len)) {
-                       addr = ALIGN(addr+1, 1<<SID_SHIFT);
-                       vma = find_vma(mm, addr);
-                       continue;
-               }
-               if (touches_hugepage_high_range(mm, addr, len)) {
-                       addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
-                       vma = find_vma(mm, addr);
-                       continue;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Remember the place where we stopped the search:
-                        */
-                       mm->free_area_cache = addr + len;
-                       return addr;
-               }
-               if (addr + mm->cached_hole_size < vma->vm_start)
-                       mm->cached_hole_size = vma->vm_start - addr;
-               addr = vma->vm_end;
-               vma = vma->vm_next;
-       }
-
-       /* Make sure we didn't miss any holes */
-       if (start_addr != TASK_UNMAPPED_BASE) {
-               start_addr = addr = TASK_UNMAPPED_BASE;
-               mm->cached_hole_size = 0;
-               goto full_search;
-       }
-       return -ENOMEM;
-}
-
-/*
- * This mmap-allocator allocates new areas top-down from below the
- * stack's low limit (the base):
- *
- * Because we have an exclusive hugepage region which lies within the
- * normal user address space, we have to take special measures to make
- * non-huge mmap()s evade the hugepage reserved regions.
- */
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-                         const unsigned long len, const unsigned long pgoff,
-                         const unsigned long flags)
-{
-       struct vm_area_struct *vma, *prev_vma;
-       struct mm_struct *mm = current->mm;
-       unsigned long base = mm->mmap_base, addr = addr0;
-       unsigned long largest_hole = mm->cached_hole_size;
-       int first_time = 1;
-
-       /* requested length too big for entire address space */
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       /* dont allow allocations above current base */
-       if (mm->free_area_cache > base)
-               mm->free_area_cache = base;
-
-       /* requesting a specific address */
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start)
-                               && !is_hugepage_only_range(mm, addr,len))
-                       return addr;
-       }
-
-       if (len <= largest_hole) {
-               largest_hole = 0;
-               mm->free_area_cache = base;
-       }
-try_again:
-       /* make sure it can fit in the remaining address space */
-       if (mm->free_area_cache < len)
-               goto fail;
-
-       /* either no address requested or cant fit in requested address hole */
-       addr = (mm->free_area_cache - len) & PAGE_MASK;
-       do {
-hugepage_recheck:
-               if (touches_hugepage_low_range(mm, addr, len)) {
-                       addr = (addr & ((~0) << SID_SHIFT)) - len;
-                       goto hugepage_recheck;
-               } else if (touches_hugepage_high_range(mm, addr, len)) {
-                       addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
-                       goto hugepage_recheck;
-               }
-
-               /*
-                * Lookup failure means no vma is above this address,
-                * i.e. return with success:
-                */
-               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
-                       return addr;
-
-               /*
-                * new region fits between prev_vma->vm_end and
-                * vma->vm_start, use it:
-                */
-               if (addr+len <= vma->vm_start &&
-                         (!prev_vma || (addr >= prev_vma->vm_end))) {
-                       /* remember the address as a hint for next time */
-                       mm->cached_hole_size = largest_hole;
-                       return (mm->free_area_cache = addr);
-               } else {
-                       /* pull free_area_cache down to the first hole */
-                       if (mm->free_area_cache == vma->vm_end) {
-                               mm->free_area_cache = vma->vm_start;
-                               mm->cached_hole_size = largest_hole;
-                       }
-               }
-
-               /* remember the largest hole we saw so far */
-               if (addr + largest_hole < vma->vm_start)
-                       largest_hole = vma->vm_start - addr;
-
-               /* try just below the current vma->vm_start */
-               addr = vma->vm_start-len;
-       } while (len <= vma->vm_start);
-
-fail:
-       /*
-        * if hint left us with no space for the requested
-        * mapping then try again:
-        */
-       if (first_time) {
-               mm->free_area_cache = base;
-               largest_hole = 0;
-               first_time = 0;
-               goto try_again;
-       }
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
-       mm->cached_hole_size = ~0UL;
-       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-       /*
-        * Restore the topdown base:
-        */
-       mm->free_area_cache = base;
-       mm->cached_hole_size = ~0UL;
-
-       return addr;
-}
-
-static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
-{
-       struct vm_area_struct *vma;
-
-       vma = find_vma(current->mm, addr);
-       if (TASK_SIZE - len >= addr &&
-           (!vma || ((addr + len) <= vma->vm_start)))
-               return 0;
-
-       return -ENOMEM;
-}
-
-static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
-{
-       unsigned long addr = 0;
-       struct vm_area_struct *vma;
-
-       vma = find_vma(current->mm, addr);
-       while (addr + len <= 0x100000000UL) {
-               BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
-
-               if (! __within_hugepage_low_range(addr, len, segmask)) {
-                       addr = ALIGN(addr+1, 1<<SID_SHIFT);
-                       vma = find_vma(current->mm, addr);
-                       continue;
-               }
-
-               if (!vma || (addr + len) <= vma->vm_start)
-                       return addr;
-               addr = ALIGN(vma->vm_end, HPAGE_SIZE);
-               /* Depending on segmask this might not be a confirmed
-                * hugepage region, so the ALIGN could have skipped
-                * some VMAs */
-               vma = find_vma(current->mm, addr);
-       }
-
-       return -ENOMEM;
-}
-
-static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
-{
-       unsigned long addr = 0x100000000UL;
-       struct vm_area_struct *vma;
-
-       vma = find_vma(current->mm, addr);
-       while (addr + len <= TASK_SIZE_USER64) {
-               BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
-
-               if (! __within_hugepage_high_range(addr, len, areamask)) {
-                       addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
-                       vma = find_vma(current->mm, addr);
-                       continue;
-               }
-
-               if (!vma || (addr + len) <= vma->vm_start)
-                       return addr;
-               addr = ALIGN(vma->vm_end, HPAGE_SIZE);
-               /* Depending on segmask this might not be a confirmed
-                * hugepage region, so the ALIGN could have skipped
-                * some VMAs */
-               vma = find_vma(current->mm, addr);
-       }
-
-       return -ENOMEM;
-}
 
 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                                        unsigned long len, unsigned long pgoff,
                                        unsigned long flags)
 {
-       int lastshift;
-       u16 areamask, curareas;
-
-       if (HPAGE_SHIFT == 0)
-               return -EINVAL;
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-       if (len > TASK_SIZE)
-               return -ENOMEM;
-
-       if (!cpu_has_feature(CPU_FTR_16M_PAGE))
-               return -EINVAL;
-
-       /* Paranoia, caller should have dealt with this */
-       BUG_ON((addr + len)  < addr);
-
-       if (test_thread_flag(TIF_32BIT)) {
-               curareas = current->mm->context.low_htlb_areas;
-
-               /* First see if we can use the hint address */
-               if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
-                       areamask = LOW_ESID_MASK(addr, len);
-                       if (open_low_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-
-               /* Next see if we can map in the existing low areas */
-               addr = htlb_get_low_area(len, curareas);
-               if (addr != -ENOMEM)
-                       return addr;
-
-               /* Finally go looking for areas to open */
-               lastshift = 0;
-               for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
-                    ! lastshift; areamask >>=1) {
-                       if (areamask & 1)
-                               lastshift = 1;
-
-                       addr = htlb_get_low_area(len, curareas | areamask);
-                       if ((addr != -ENOMEM)
-                           && open_low_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-       } else {
-               curareas = current->mm->context.high_htlb_areas;
-
-               /* First see if we can use the hint address */
-               /* We discourage 64-bit processes from doing hugepage
-                * mappings below 4GB (must use MAP_FIXED) */
-               if ((addr >= 0x100000000UL)
-                   && (htlb_check_hinted_area(addr, len) == 0)) {
-                       areamask = HTLB_AREA_MASK(addr, len);
-                       if (open_high_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-
-               /* Next see if we can map in the existing high areas */
-               addr = htlb_get_high_area(len, curareas);
-               if (addr != -ENOMEM)
-                       return addr;
-
-               /* Finally go looking for areas to open */
-               lastshift = 0;
-               for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
-                    ! lastshift; areamask >>=1) {
-                       if (areamask & 1)
-                               lastshift = 1;
-
-                       addr = htlb_get_high_area(len, curareas | areamask);
-                       if ((addr != -ENOMEM)
-                           && open_high_hpage_areas(current->mm, areamask) == 0)
-                               return addr;
-               }
-       }
-       printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
-              " enough areas\n");
-       return -ENOMEM;
+       return slice_get_unmapped_area(addr, len, flags,
+                                      mmu_huge_psize, 1, 0);
 }
 
 /*
@@ -922,11 +427,12 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
        unsigned long va, rflags, pa;
        long slot;
        int err = 1;
+       int ssize = user_segment_size(ea);
 
        ptep = huge_pte_offset(mm, ea);
 
        /* Search the Linux page table for a match with va */
-       va = (vsid << 28) | (ea & 0x0fffffff);
+       va = hpt_va(ea, vsid, ssize);
 
        /*
         * If no pte found or not present, send the problem up to
@@ -958,8 +464,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
                old_pte = pte_val(*ptep);
                if (old_pte & _PAGE_BUSY)
                        goto out;
-               new_pte = old_pte | _PAGE_BUSY |
-                       _PAGE_ACCESSED | _PAGE_HASHPTE;
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
        } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
                                         old_pte, new_pte));
 
@@ -977,19 +482,19 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
                /* There MIGHT be an HPTE for this pte */
                unsigned long hash, slot;
 
-               hash = hpt_hash(va, HPAGE_SHIFT);
+               hash = hpt_hash(va, HPAGE_SHIFT, ssize);
                if (old_pte & _PAGE_F_SECOND)
                        hash = ~hash;
                slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
                slot += (old_pte & _PAGE_F_GIX) >> 12;
 
                if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
-                                        local) == -1)
+                                        ssize, local) == -1)
                        old_pte &= ~_PAGE_HPTEFLAGS;
        }
 
        if (likely(!(old_pte & _PAGE_HASHPTE))) {
-               unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
+               unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize);
                unsigned long hpte_group;
 
                pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -999,16 +504,18 @@ repeat:
                              HPTES_PER_GROUP) & ~0x7UL;
 
                /* clear HPTE slot informations in new PTE */
+#ifdef CONFIG_PPC_64K_PAGES
+               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
+#else
                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
-
+#endif
                /* Add in WIMG bits */
-               /* XXX We should store these in the pte */
-               /* --BenH: I think they are ... */
-               rflags |= _PAGE_COHERENT;
+               rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
+                                     _PAGE_COHERENT | _PAGE_GUARDED));
 
                /* Insert into the hash table, primary slot */
                slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
-                                         mmu_huge_psize);
+                                         mmu_huge_psize, ssize);
 
                /* Primary is full, try the secondary */
                if (unlikely(slot == -1)) {
@@ -1016,7 +523,7 @@ repeat:
                                      HPTES_PER_GROUP) & ~0x7UL; 
                        slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
                                                  HPTE_V_SECONDARY,
-                                                 mmu_huge_psize);
+                                                 mmu_huge_psize, ssize);
                        if (slot == -1) {
                                if (mftb() & 0x1)
                                        hpte_group = ((hash & htab_hash_mask) *
@@ -1044,7 +551,58 @@ repeat:
        return err;
 }
 
-static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
+void set_huge_psize(int psize)
+{
+       /* Check that it is a page size supported by the hardware and
+        * that it fits within pagetable limits. */
+       if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT &&
+               (mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
+                       mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
+               HPAGE_SHIFT = mmu_psize_defs[psize].shift;
+               mmu_huge_psize = psize;
+#ifdef CONFIG_PPC_64K_PAGES
+               hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
+#else
+               if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
+                       hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
+               else
+                       hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
+#endif
+
+       } else
+               HPAGE_SHIFT = 0;
+}
+
+static int __init hugepage_setup_sz(char *str)
+{
+       unsigned long long size;
+       int mmu_psize = -1;
+       int shift;
+
+       size = memparse(str, &str);
+
+       shift = __ffs(size);
+       switch (shift) {
+#ifndef CONFIG_PPC_64K_PAGES
+       case HPAGE_SHIFT_64K:
+               mmu_psize = MMU_PAGE_64K;
+               break;
+#endif
+       case HPAGE_SHIFT_16M:
+               mmu_psize = MMU_PAGE_16M;
+               break;
+       }
+
+       if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
+               set_huge_psize(mmu_psize);
+       else
+               printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
+
+       return 1;
+}
+__setup("hugepagesz=", hugepage_setup_sz);
+
+static void zero_ctor(struct kmem_cache *cache, void *addr)
 {
        memset(addr, 0, kmem_cache_size(cache));
 }
@@ -1057,9 +615,8 @@ static int __init hugetlbpage_init(void)
        huge_pgtable_cache = kmem_cache_create("hugepte_cache",
                                               HUGEPTE_TABLE_SIZE,
                                               HUGEPTE_TABLE_SIZE,
-                                              SLAB_HWCACHE_ALIGN |
-                                              SLAB_MUST_HWCACHE_ALIGN,
-                                              zero_ctor, NULL);
+                                              0,
+                                              zero_ctor);
        if (! huge_pgtable_cache)
                panic("hugetlbpage_init(): could not create hugepte cache\n");