]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - arch/ia64/mm/hugetlbpage.c
[IA64] MAX_PGT_FREES_PER_PASS must be 'L' to avoid warning
[linux-2.6.git] / arch / ia64 / mm / hugetlbpage.c
index 40ad8328ffd5c9d97d7ac44679d066e1e9f27827..df08ae7634b61d2efadf065e246c4bfb7f342b2c 100644 (file)
@@ -186,46 +186,30 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
        return NULL;
 }
 
-/*
- * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
- * are hugetlb region specific.
- */
-void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
-       unsigned long start, unsigned long end)
+void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+                       unsigned long addr, unsigned long end,
+                       unsigned long floor, unsigned long ceiling)
 {
-       unsigned long first = start & HUGETLB_PGDIR_MASK;
-       unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
-       struct mm_struct *mm = tlb->mm;
-
-       if (!prev) {
-               prev = mm->mmap;
-               if (!prev)
-                       goto no_mmaps;
-               if (prev->vm_end > start) {
-                       if (last > prev->vm_start)
-                               last = prev->vm_start;
-                       goto no_mmaps;
-               }
-       }
-       for (;;) {
-               struct vm_area_struct *next = prev->vm_next;
+       /*
+        * This is called only when is_hugepage_only_range(addr,),
+        * and it follows that is_hugepage_only_range(end,) also.
+        *
+        * The offset of these addresses from the base of the hugetlb
+        * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
+        * the standard free_pgd_range will free the right page tables.
+        *
+        * If floor and ceiling are also in the hugetlb region, they
+        * must likewise be scaled down; but if outside, left unchanged.
+        */
 
-               if (next) {
-                       if (next->vm_start < start) {
-                               prev = next;
-                               continue;
-                       }
-                       if (last > next->vm_start)
-                               last = next->vm_start;
-               }
-               if (prev->vm_end > first)
-                       first = prev->vm_end;
-               break;
-       }
-no_mmaps:
-       if (last < first)       /* for arches with discontiguous pgd indices */
-               return;
-       clear_page_range(tlb, first, last);
+       addr = htlbpage_to_page(addr);
+       end  = htlbpage_to_page(end);
+       if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
+               floor = htlbpage_to_page(floor);
+       if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
+               ceiling = htlbpage_to_page(ceiling);
+
+       free_pgd_range(tlb, addr, end, floor, ceiling);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)