mm: use __GFP_OTHER_NODE for transparent huge pages
[linux-2.6.git] / mm / huge_memory.c
index 113e35c..0a619e0 100644 (file)
@@ -643,23 +643,24 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        return ret;
 }
 
-static inline gfp_t alloc_hugepage_gfpmask(int defrag)
+static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 {
-       return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
+       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 }
 
 static inline struct page *alloc_hugepage_vma(int defrag,
                                              struct vm_area_struct *vma,
-                                             unsigned long haddr, int nd)
+                                             unsigned long haddr, int nd,
+                                             gfp_t extra_gfp)
 {
-       return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
+       return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
                               HPAGE_PMD_ORDER, vma, haddr, nd);
 }
 
 #ifndef CONFIG_NUMA
 static inline struct page *alloc_hugepage(int defrag)
 {
-       return alloc_pages(alloc_hugepage_gfpmask(defrag),
+       return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
                           HPAGE_PMD_ORDER);
 }
 #endif
@@ -678,7 +679,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (unlikely(khugepaged_enter(vma)))
                        return VM_FAULT_OOM;
                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                         vma, haddr, numa_node_id());
+                                         vma, haddr, numa_node_id(), 0);
                if (unlikely(!page))
                        goto out;
                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -799,7 +800,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
-               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
+               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
+                                              __GFP_OTHER_NODE,
                                               vma, address, page_to_nid(page));
                if (unlikely(!pages[i] ||
                             mem_cgroup_newpage_charge(pages[i], mm,
@@ -902,7 +904,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow())
                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                             vma, haddr, numa_node_id());
+                                             vma, haddr, numa_node_id(), 0);
        else
                new_page = NULL;
 
@@ -1779,7 +1781,7 @@ static void collapse_huge_page(struct mm_struct *mm,
         * scalability.
         */
        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
-                                     node);
+                                     node, __GFP_OTHER_NODE);
        if (unlikely(!new_page)) {
                up_read(&mm->mmap_sem);
                *hpage = ERR_PTR(-ENOMEM);