mm: migration: avoid race between shift_arg_pages() and rmap_walk() during migration...
[linux-2.6.git] / mm / rmap.c
index b5c320f..38a336e 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1131,6 +1131,20 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
        return ret;
 }
 
+static bool is_vma_temporary_stack(struct vm_area_struct *vma)
+{
+       int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+       if (!maybe_stack)
+               return false;
+
+       if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+                                               VM_STACK_INCOMPLETE_SETUP)
+               return true;
+
+       return false;
+}
+
 /**
  * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  * rmap method
@@ -1159,7 +1173,21 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
 
        list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
                struct vm_area_struct *vma = avc->vma;
-               unsigned long address = vma_address(page, vma);
+               unsigned long address;
+
+               /*
+                * During exec, a temporary VMA is setup and later moved.
+                * The VMA is moved under the anon_vma lock but not the
+                * page tables leading to a race where migration cannot
+                * find the migration ptes. Rather than increasing the
+                * locking requirements of exec(), migration skips
+                * temporary VMAs until after exec() completes.
+                */
+               if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
+                               is_vma_temporary_stack(vma))
+                       continue;
+
+               address = vma_address(page, vma);
                if (address == -EFAULT)
                        continue;
                ret = try_to_unmap_one(page, vma, address, flags);