mm: get_user_pages: migrate out CMA pages when FOLL_DURABLE flag is set
[linux-3.10.git] / mm / pagewalk.c
index f792940..2beeabf 100644 (file)
@@ -58,8 +58,8 @@ again:
                if (!walk->pte_entry)
                        continue;
 
-               split_huge_page_pmd(walk->mm, pmd);
-               if (pmd_none_or_clear_bad(pmd))
+               split_huge_page_pmd_mm(walk->mm, addr, pmd);
+               if (pmd_none_or_trans_huge_or_clear_bad(pmd))
                        goto again;
                err = walk_pte_range(pmd, addr, next, walk);
                if (err)
@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
        return 0;
 }
 
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       struct vm_area_struct *vma;
-
-       /* We don't need vma lookup at all. */
-       if (!walk->hugetlb_entry)
-               return NULL;
-
-       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
-       vma = find_vma(walk->mm, addr);
-       if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
-               return vma;
-
-       return NULL;
-}
-
 #else /* CONFIG_HUGETLB_PAGE */
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       return NULL;
-}
-
 static int walk_hugetlb_range(struct vm_area_struct *vma,
                              unsigned long addr, unsigned long end,
                              struct mm_walk *walk)
@@ -162,7 +141,6 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
 
 /**
  * walk_page_range - walk a memory map's page tables with a callback
- * @mm: memory map to walk
  * @addr: starting address
  * @end: ending address
  * @walk: set of callbacks to invoke for each level of the tree
@@ -176,7 +154,8 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
  * associated range, and a copy of the original mm_walk for access to
  * the ->private or ->mm fields.
  *
- * No locks are taken, but the bottom level iterator will map PTE
+ * Usually no locks are taken, but splitting transparent huge page may
+ * take page table lock. And the bottom level iterator will map PTE
  * directories from highmem if necessary.
  *
  * If any callback returns a non-zero value, the walk is aborted and
@@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
        if (!walk->mm)
                return -EINVAL;
 
+       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+
        pgd = pgd_offset(walk->mm, addr);
        do {
-               struct vm_area_struct *vma;
+               struct vm_area_struct *vma = NULL;
 
                next = pgd_addr_end(addr, end);
 
                /*
-                * handle hugetlb vma individually because pagetable walk for
-                * the hugetlb page is dependent on the architecture and
-                * we can't handled it in the same manner as non-huge pages.
+                * This function was not intended to be vma based.
+                * But there are vma special cases to be handled:
+                * - hugetlb vma's
+                * - VM_PFNMAP vma's
                 */
-               vma = hugetlb_vma(addr, walk);
+               vma = find_vma(walk->mm, addr);
                if (vma) {
-                       if (vma->vm_end < next)
+                       /*
+                        * There are no page structures backing a VM_PFNMAP
+                        * range, so do not allow split_huge_page_pmd().
+                        */
+                       if ((vma->vm_start <= addr) &&
+                           (vma->vm_flags & VM_PFNMAP)) {
                                next = vma->vm_end;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                        /*
-                        * Hugepage is very tightly coupled with vma, so
-                        * walk through hugetlb entries within a given vma.
+                        * Handle hugetlb vma individually because pagetable
+                        * walk for the hugetlb page is dependent on the
+                        * architecture and we can't handled it in the same
+                        * manner as non-huge pages.
                         */
-                       err = walk_hugetlb_range(vma, addr, next, walk);
-                       if (err)
-                               break;
-                       pgd = pgd_offset(walk->mm, next);
-                       continue;
+                       if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
+                           is_vm_hugetlb_page(vma)) {
+                               if (vma->vm_end < next)
+                                       next = vma->vm_end;
+                               /*
+                                * Hugepage is very tightly coupled with vma,
+                                * so walk through hugetlb entries within a
+                                * given vma.
+                                */
+                               err = walk_hugetlb_range(vma, addr, next, walk);
+                               if (err)
+                                       break;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                }
 
                if (pgd_none_or_clear_bad(pgd)) {
@@ -240,7 +242,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
                if (err)
                        break;
                pgd++;
-       } while (addr = next, addr != end);
+       } while (addr = next, addr < end);
 
        return err;
 }