mm: move VM_LOCKED check to __mlock_vma_pages_range()
Michel Lespinasse [Thu, 13 Jan 2011 23:46:12 +0000 (15:46 -0800)]
Use a single code path for faulting in pages during mlock.

The reason to have it in this patch series is that I did not want to
update both code paths in a later change that releases mmap_sem when
blocking on disk.

Signed-off-by: Michel Lespinasse <walken@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

mm/mlock.c

index 25cc9e8..84da66b 100644 (file)
@@ -169,7 +169,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   > vma->vm_end);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       gup_flags = FOLL_TOUCH | FOLL_MLOCK;
+       gup_flags = FOLL_TOUCH;
        /*
         * We want to touch writable mappings with a write fault in order
         * to break COW, except for shared mappings because these don't COW
@@ -178,6 +178,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
                gup_flags |= FOLL_WRITE;
 
+       if (vma->vm_flags & VM_LOCKED)
+               gup_flags |= FOLL_MLOCK;
+
        /* We don't try to access the guard page of a stack vma */
        if (stack_guard_page(vma, start)) {
                addr += PAGE_SIZE;
@@ -456,18 +459,15 @@ static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
                /*
                 * Now fault in a range of pages within the first VMA.
                 */
-               if (vma->vm_flags & VM_LOCKED) {
-                       ret = __mlock_vma_pages_range(vma, nstart, nend);
-                       if (ret < 0 && ignore_errors) {
-                               ret = 0;
-                               continue;       /* continue at next VMA */
-                       }
-                       if (ret) {
-                               ret = __mlock_posix_error_return(ret);
-                               break;
-                       }
-               } else
-                       make_pages_present(nstart, nend);
+               ret = __mlock_vma_pages_range(vma, nstart, nend);
+               if (ret < 0 && ignore_errors) {
+                       ret = 0;
+                       continue;       /* continue at next VMA */
+               }
+               if (ret) {
+                       ret = __mlock_posix_error_return(ret);
+                       break;
+               }
        }
        up_read(&mm->mmap_sem);
        return ret;     /* 0 or negative error code */