mm: avoid allocating CMA memory for stack
Krishna Reddy [Wed, 3 Sep 2014 21:07:41 +0000 (14:07 -0700)]
allocation of CMA memory for user stack would cause
permanent migration failures and result in CMA
allocation failures.

Bug 1550455

Change-Id: I75ac13416dbcf1810c89641cefdd0d56726cc36a
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/495322
(cherry picked from commit 3c59fb443605d5975b9d8e250c4ca52ae1650fe5)
Reviewed-on: http://git-master/r/592897
GVS: Gerrit_Virtual_Submit

mm/memory.c

index 07e1987..76399cf 100644 (file)
@@ -3195,6 +3195,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
        return 0;
 }
 
+bool is_vma_temporary_stack(struct vm_area_struct *vma);
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -3227,7 +3228,12 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /* Allocate our own private page. */
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       page = alloc_zeroed_user_highpage_movable(vma, address);
+       if (vma->vm_flags & VM_LOCKED || flags & FAULT_FLAG_NO_CMA ||
+           is_vma_temporary_stack(vma)) {
+               page = alloc_zeroed_user_highpage(GFP_HIGHUSER, vma, address);
+       } else {
+               page = alloc_zeroed_user_highpage_movable(vma, address);
+       }
        if (!page)
                goto oom;
        /*