mm/migrate.c: cleanup comment for migration_entry_wait()
[linux-2.6.git] / mm / mlock.c
index 6b55e3e..4f4f53b 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/mempolicy.h>
 #include <linux/syscalls.h>
 #include <linux/sched.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/rmap.h>
 #include <linux/mmzone.h>
 #include <linux/hugetlb.h>
@@ -110,7 +110,15 @@ void munlock_vma_page(struct page *page)
        if (TestClearPageMlocked(page)) {
                dec_zone_page_state(page, NR_MLOCK);
                if (!isolate_lru_page(page)) {
-                       int ret = try_to_munlock(page);
+                       int ret = SWAP_AGAIN;
+
+                       /*
+                        * Optimization: if the page was mapped just once,
+                        * that's our mapping and we don't need to check all the
+                        * other vmas.
+                        */
+                       if (page_mapcount(page) > 1)
+                               ret = try_to_munlock(page);
                        /*
                         * did try_to_unlock() succeed or punt?
                         */
@@ -162,7 +170,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   > vma->vm_end);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       gup_flags = FOLL_TOUCH;
+       gup_flags = FOLL_TOUCH | FOLL_MLOCK;
        /*
         * We want to touch writable mappings with a write fault in order
         * to break COW, except for shared mappings because these don't COW
@@ -178,9 +186,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
                gup_flags |= FOLL_FORCE;
 
-       if (vma->vm_flags & VM_LOCKED)
-               gup_flags |= FOLL_MLOCK;
-
        return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
                                NULL, NULL, nonblocking);
 }
@@ -310,13 +315,13 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
  * For vmas that pass the filters, merge/split as appropriate.
  */
 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
-       unsigned long start, unsigned long end, unsigned int newflags)
+       unsigned long start, unsigned long end, vm_flags_t newflags)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgoff_t pgoff;
        int nr_pages;
        int ret = 0;
-       int lock = newflags & VM_LOCKED;
+       int lock = !!(newflags & VM_LOCKED);
 
        if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
            is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
@@ -388,7 +393,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
                prev = vma;
 
        for (nstart = start ; ; ) {
-               unsigned int newflags;
+               vm_flags_t newflags;
 
                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
@@ -527,7 +532,7 @@ static int do_mlockall(int flags)
                goto out;
 
        for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
-               unsigned int newflags;
+               vm_flags_t newflags;
 
                newflags = vma->vm_flags | VM_LOCKED;
                if (!(flags & MCL_CURRENT))
@@ -552,7 +557,8 @@ SYSCALL_DEFINE1(mlockall, int, flags)
        if (!can_do_mlock())
                goto out;
 
-       lru_add_drain_all();    /* flush pagevec */
+       if (flags & MCL_CURRENT)
+               lru_add_drain_all();    /* flush pagevec */
 
        down_write(&current->mm->mmap_sem);