mm: nommu: find vma using the sorted vma list
[linux-2.6.git] / mm / mlock.c
index 04d5e74..516b2c2 100644 (file)
@@ -25,13 +25,12 @@ int can_do_mlock(void)
 {
        if (capable(CAP_IPC_LOCK))
                return 1;
-       if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
+       if (rlimit(RLIMIT_MEMLOCK) != 0)
                return 1;
        return 0;
 }
 EXPORT_SYMBOL(can_do_mlock);
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * Mlocked pages are marked with PageMlocked() flag for efficient testing
  * in vmscan and, possibly, the fault path; and to support semi-accurate
@@ -89,25 +88,22 @@ void mlock_vma_page(struct page *page)
        }
 }
 
-/*
- * called from munlock()/munmap() path with page supposedly on the LRU.
+/**
+ * munlock_vma_page - munlock a vma page
+ * @page - page to be unlocked
  *
- * Note:  unlike mlock_vma_page(), we can't just clear the PageMlocked
- * [in try_to_munlock()] and then attempt to isolate the page.  We must
- * isolate the page to keep others from messing with its unevictable
- * and mlocked state while trying to munlock.  However, we pre-clear the
- * mlocked state anyway as we might lose the isolation race and we might
- * not get another chance to clear PageMlocked.  If we successfully
- * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
- * mapping the page, it will restore the PageMlocked state, unless the page
- * is mapped in a non-linear vma.  So, we go ahead and SetPageMlocked(),
- * perhaps redundantly.
- * If we lose the isolation race, and the page is mapped by other VM_LOCKED
- * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
- * either of which will restore the PageMlocked state by calling
- * mlock_vma_page() above, if it can grab the vma's mmap sem.
+ * called from munlock()/munmap() path with page supposedly on the LRU.
+ * When we munlock a page, because the vma where we found the page is being
+ * munlock()ed or munmap()ed, we want to check whether other vmas hold the
+ * page locked so that we can leave it on the unevictable lru list and not
+ * bother vmscan with it.  However, to walk the page's rmap list in
+ * try_to_munlock() we must isolate the page from the LRU.  If some other
+ * task has removed the page from the LRU, we won't be able to do that.
+ * So we clear the PageMlocked as we might not get another chance.  If we
+ * can't isolate the page, we leave it for putback_lru_page() and vmscan
+ * [page_referenced()/try_to_unmap()] to deal with.
  */
-static void munlock_vma_page(struct page *page)
+void munlock_vma_page(struct page *page)
 {
        BUG_ON(!PageLocked(page));
 
@@ -118,18 +114,18 @@ static void munlock_vma_page(struct page *page)
                        /*
                         * did try_to_unlock() succeed or punt?
                         */
-                       if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
+                       if (ret != SWAP_MLOCK)
                                count_vm_event(UNEVICTABLE_PGMUNLOCKED);
 
                        putback_lru_page(page);
                } else {
                        /*
-                        * We lost the race.  let try_to_unmap() deal
-                        * with it.  At least we get the page state and
-                        * mlock stats right.  However, page is still on
-                        * the noreclaim list.  We'll fix that up when
-                        * the page is eventually freed or we scan the
-                        * noreclaim list.
+                        * Some other task has removed the page from the LRU.
+                        * putback_lru_page() will take care of removing the
+                        * page from the unevictable list, if necessary.
+                        * vmscan [page_referenced()] will move the page back
+                        * to the unevictable list if some other vma has it
+                        * mlocked.
                         */
                        if (PageUnevictable(page))
                                count_vm_event(UNEVICTABLE_PGSTRANDED);
@@ -140,113 +136,50 @@ static void munlock_vma_page(struct page *page)
 }
 
 /**
- * __mlock_vma_pages_range() -  mlock/munlock a range of pages in the vma.
+ * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
  * @vma:   target vma
  * @start: start address
  * @end:   end address
- * @mlock: 0 indicate munlock, otherwise mlock.
  *
- * If @mlock == 0, unlock an mlocked range;
- * else mlock the range of pages.  This takes care of making the pages present ,
- * too.
+ * This takes care of making the pages present too.
  *
  * return 0 on success, negative error code on error.
  *
  * vma->vm_mm->mmap_sem must be held for at least read.
  */
 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  int mlock)
+                                   unsigned long start, unsigned long end,
+                                   int *nonblocking)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long addr = start;
-       struct page *pages[16]; /* 16 gives a reasonable batch */
        int nr_pages = (end - start) / PAGE_SIZE;
-       int ret = 0;
-       int gup_flags = 0;
+       int gup_flags;
 
        VM_BUG_ON(start & ~PAGE_MASK);
        VM_BUG_ON(end   & ~PAGE_MASK);
        VM_BUG_ON(start < vma->vm_start);
        VM_BUG_ON(end   > vma->vm_end);
-       VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
-                 (atomic_read(&mm->mm_users) != 0));
+       VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
+       gup_flags = FOLL_TOUCH | FOLL_MLOCK;
        /*
-        * mlock:   don't page populate if vma has PROT_NONE permission.
-        * munlock: always do munlock although the vma has PROT_NONE
-        *          permission, or SIGKILL is pending.
+        * We want to touch writable mappings with a write fault in order
+        * to break COW, except for shared mappings because these don't COW
+        * and we would not want to dirty them for nothing.
         */
-       if (!mlock)
-               gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
-                            GUP_FLAGS_IGNORE_SIGKILL;
-
-       if (vma->vm_flags & VM_WRITE)
-               gup_flags |= GUP_FLAGS_WRITE;
-
-       while (nr_pages > 0) {
-               int i;
-
-               cond_resched();
-
-               /*
-                * get_user_pages makes pages present if we are
-                * setting mlock. and this extra reference count will
-                * disable migration of this page.  However, page may
-                * still be truncated out from under us.
-                */
-               ret = __get_user_pages(current, mm, addr,
-                               min_t(int, nr_pages, ARRAY_SIZE(pages)),
-                               gup_flags, pages, NULL);
-               /*
-                * This can happen for, e.g., VM_NONLINEAR regions before
-                * a page has been allocated and mapped at a given offset,
-                * or for addresses that map beyond end of a file.
-                * We'll mlock the the pages if/when they get faulted in.
-                */
-               if (ret < 0)
-                       break;
-               if (ret == 0) {
-                       /*
-                        * We know the vma is there, so the only time
-                        * we cannot get a single page should be an
-                        * error (ret < 0) case.
-                        */
-                       WARN_ON(1);
-                       break;
-               }
-
-               lru_add_drain();        /* push cached pages to LRU */
-
-               for (i = 0; i < ret; i++) {
-                       struct page *page = pages[i];
-
-                       lock_page(page);
-                       /*
-                        * Because we lock page here and migration is blocked
-                        * by the elevated reference, we need only check for
-                        * page truncation (file-cache only).
-                        */
-                       if (page->mapping) {
-                               if (mlock)
-                                       mlock_vma_page(page);
-                               else
-                                       munlock_vma_page(page);
-                       }
-                       unlock_page(page);
-                       put_page(page);         /* ref from get_user_pages() */
+       if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+               gup_flags |= FOLL_WRITE;
 
-                       /*
-                        * here we assume that get_user_pages() has given us
-                        * a list of virtually contiguous pages.
-                        */
-                       addr += PAGE_SIZE;      /* for next get_user_pages() */
-                       nr_pages--;
-               }
-               ret = 0;
-       }
+       /*
+        * We want mlock to succeed for regions that have any permissions
+        * other than PROT_NONE.
+        */
+       if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+               gup_flags |= FOLL_FORCE;
 
-       return ret;     /* count entire vma as locked_vm */
+       return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+                               NULL, NULL, nonblocking);
 }
 
 /*
@@ -261,27 +194,6 @@ static int __mlock_posix_error_return(long retval)
        return retval;
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-/*
- * Just make pages present if VM_LOCKED.  No-op if unlocking.
- */
-static long __mlock_vma_pages_range(struct vm_area_struct *vma,
-                                  unsigned long start, unsigned long end,
-                                  int mlock)
-{
-       if (mlock && (vma->vm_flags & VM_LOCKED))
-               return make_pages_present(start, end);
-       return 0;
-}
-
-static inline int __mlock_posix_error_return(long retval)
-{
-       return 0;
-}
-
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
 /**
  * mlock_vma_pages_range() - mlock pages in specified vma range.
  * @vma - the vma containing the specfied address range
@@ -294,14 +206,10 @@ static inline int __mlock_posix_error_return(long retval)
  *
  * return number of pages [> 0] to be removed from locked_vm on success
  * of "special" vmas.
- *
- * return negative error if vma spanning @start-@range disappears while
- * mmap semaphore is dropped.  Unlikely?
  */
 long mlock_vma_pages_range(struct vm_area_struct *vma,
                        unsigned long start, unsigned long end)
 {
-       struct mm_struct *mm = vma->vm_mm;
        int nr_pages = (end - start) / PAGE_SIZE;
        BUG_ON(!(vma->vm_flags & VM_LOCKED));
 
@@ -313,21 +221,12 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
 
        if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
                        is_vm_hugetlb_page(vma) ||
-                       vma == get_gate_vma(current))) {
-               long error;
-               downgrade_write(&mm->mmap_sem);
+                       vma == get_gate_vma(current->mm))) {
 
-               error = __mlock_vma_pages_range(vma, start, end, 1);
+               __mlock_vma_pages_range(vma, start, end, NULL);
 
-               up_read(&mm->mmap_sem);
-               /* vma can change or disappear */
-               down_write(&mm->mmap_sem);
-               vma = find_vma(mm, start);
-               /* non-NULL vma must contain @start, but need to check @end */
-               if (!vma ||  end > vma->vm_end)
-                       return -ENOMEM;
-
-               return 0;       /* hide other errors from mmap(), et al */
+               /* Hide errors from mmap() and other callers */
+               return 0;
        }
 
        /*
@@ -345,7 +244,6 @@ no_mlock:
        return nr_pages;                /* error or pages NOT mlocked */
 }
 
-
 /*
  * munlock_vma_pages_range() - munlock all pages in the vma range.'
  * @vma - vma containing range to be munlock()ed.
@@ -365,10 +263,38 @@ no_mlock:
  * free them.  This will result in freeing mlocked pages.
  */
 void munlock_vma_pages_range(struct vm_area_struct *vma,
-                          unsigned long start, unsigned long end)
+                            unsigned long start, unsigned long end)
 {
+       unsigned long addr;
+
+       lru_add_drain();
        vma->vm_flags &= ~VM_LOCKED;
-       __mlock_vma_pages_range(vma, start, end, 0);
+
+       for (addr = start; addr < end; addr += PAGE_SIZE) {
+               struct page *page;
+               /*
+                * Although FOLL_DUMP is intended for get_dump_page(),
+                * it just so happens that its special treatment of the
+                * ZERO_PAGE (returning an error instead of doing get_page)
+                * suits munlock very well (and if somehow an abnormal page
+                * has sneaked into the range, we won't oops here: great).
+                */
+               page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+               if (page && !IS_ERR(page)) {
+                       lock_page(page);
+                       /*
+                        * Like in __mlock_vma_pages_range(),
+                        * because we lock page here and migration is
+                        * blocked by the elevated reference, we need
+                        * only check for file-cache page truncation.
+                        */
+                       if (page->mapping)
+                               munlock_vma_page(page);
+                       unlock_page(page);
+                       put_page(page);
+               }
+               cond_resched();
+       }
 }
 
 /*
@@ -389,18 +315,10 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
        int ret = 0;
        int lock = newflags & VM_LOCKED;
 
-       if (newflags == vma->vm_flags ||
-                       (vma->vm_flags & (VM_IO | VM_PFNMAP)))
+       if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
+           is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
                goto out;       /* don't set VM_LOCKED,  don't count */
 
-       if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
-                       is_vm_hugetlb_page(vma) ||
-                       vma == get_gate_vma(current)) {
-               if (lock)
-                       make_pages_present(start, end);
-               goto out;       /* don't set VM_LOCKED,  don't count */
-       }
-
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
                          vma->vm_file, pgoff, vma_policy(vma));
@@ -435,46 +353,11 @@ success:
         * It's okay if try_to_unmap_one unmaps a page just after we
         * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
         */
-       vma->vm_flags = newflags;
-
-       if (lock) {
-               /*
-                * mmap_sem is currently held for write.  Downgrade the write
-                * lock to a read lock so that other faults, mmap scans, ...
-                * while we fault in all pages.
-                */
-               downgrade_write(&mm->mmap_sem);
-
-               ret = __mlock_vma_pages_range(vma, start, end, 1);
 
-               /*
-                * Need to reacquire mmap sem in write mode, as our callers
-                * expect this.  We have no support for atomically upgrading
-                * a sem to write, so we need to check for ranges while sem
-                * is unlocked.
-                */
-               up_read(&mm->mmap_sem);
-               /* vma can change or disappear */
-               down_write(&mm->mmap_sem);
-               *prev = find_vma(mm, start);
-               /* non-NULL *prev must contain @start, but need to check @end */
-               if (!(*prev) || end > (*prev)->vm_end)
-                       ret = -ENOMEM;
-               else if (ret > 0) {
-                       mm->locked_vm -= ret;
-                       ret = 0;
-               } else
-                       ret = __mlock_posix_error_return(ret); /* translate if needed */
-       } else {
-               /*
-                * TODO:  for unlocking, pages will already be resident, so
-                * we don't need to wait for allocations/reclaim/pagein, ...
-                * However, unlocking a very large region can still take a
-                * while.  Should we downgrade the semaphore for both lock
-                * AND unlock ?
-                */
-               __mlock_vma_pages_range(vma, start, end, 0);
-       }
+       if (lock)
+               vma->vm_flags = newflags;
+       else
+               munlock_vma_pages_range(vma, start, end);
 
 out:
        *prev = vma;
@@ -487,7 +370,8 @@ static int do_mlock(unsigned long start, size_t len, int on)
        struct vm_area_struct * vma, * prev;
        int error;
 
-       len = PAGE_ALIGN(len);
+       VM_BUG_ON(start & ~PAGE_MASK);
+       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
        if (end < start)
                return -EINVAL;
@@ -530,6 +414,62 @@ static int do_mlock(unsigned long start, size_t len, int on)
        return error;
 }
 
+static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
+{
+       struct mm_struct *mm = current->mm;
+       unsigned long end, nstart, nend;
+       struct vm_area_struct *vma = NULL;
+       int locked = 0;
+       int ret = 0;
+
+       VM_BUG_ON(start & ~PAGE_MASK);
+       VM_BUG_ON(len != PAGE_ALIGN(len));
+       end = start + len;
+
+       for (nstart = start; nstart < end; nstart = nend) {
+               /*
+                * We want to fault in pages for [nstart; end) address range.
+                * Find first corresponding VMA.
+                */
+               if (!locked) {
+                       locked = 1;
+                       down_read(&mm->mmap_sem);
+                       vma = find_vma(mm, nstart);
+               } else if (nstart >= vma->vm_end)
+                       vma = vma->vm_next;
+               if (!vma || vma->vm_start >= end)
+                       break;
+               /*
+                * Set [nstart; nend) to intersection of desired address
+                * range with the first VMA. Also, skip undesirable VMA types.
+                */
+               nend = min(end, vma->vm_end);
+               if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+                       continue;
+               if (nstart < vma->vm_start)
+                       nstart = vma->vm_start;
+               /*
+                * Now fault in a range of pages. __mlock_vma_pages_range()
+                * double checks the vma flags, so that it won't mlock pages
+                * if the vma was already munlocked.
+                */
+               ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
+               if (ret < 0) {
+                       if (ignore_errors) {
+                               ret = 0;
+                               continue;       /* continue at next VMA */
+                       }
+                       ret = __mlock_posix_error_return(ret);
+                       break;
+               }
+               nend = nstart + ret * PAGE_SIZE;
+               ret = 0;
+       }
+       if (locked)
+               up_read(&mm->mmap_sem);
+       return ret;     /* 0 or negative error code */
+}
+
 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 {
        unsigned long locked;
@@ -548,13 +488,15 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
        locked = len >> PAGE_SHIFT;
        locked += current->mm->locked_vm;
 
-       lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+       lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
 
        /* check against resource limits */
        if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
                error = do_mlock(start, len, 1);
        up_write(&current->mm->mmap_sem);
+       if (!error)
+               error = do_mlock_pages(start, len, 0);
        return error;
 }
 
@@ -595,7 +537,7 @@ out:
        return 0;
 }
 
-asmlinkage long sys_mlockall(int flags)
+SYSCALL_DEFINE1(mlockall, int, flags)
 {
        unsigned long lock_limit;
        int ret = -EINVAL;
@@ -611,7 +553,7 @@ asmlinkage long sys_mlockall(int flags)
 
        down_write(&current->mm->mmap_sem);
 
-       lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+       lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
 
        ret = -ENOMEM;
@@ -619,11 +561,15 @@ asmlinkage long sys_mlockall(int flags)
            capable(CAP_IPC_LOCK))
                ret = do_mlockall(flags);
        up_write(&current->mm->mmap_sem);
+       if (!ret && (flags & MCL_CURRENT)) {
+               /* Ignore errors */
+               do_mlock_pages(0, TASK_SIZE, 1);
+       }
 out:
        return ret;
 }
 
-asmlinkage long sys_munlockall(void)
+SYSCALL_DEFINE0(munlockall)
 {
        int ret;
 
@@ -645,7 +591,7 @@ int user_shm_lock(size_t size, struct user_struct *user)
        int allowed = 0;
 
        locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+       lock_limit = rlimit(RLIMIT_MEMLOCK);
        if (lock_limit == RLIM_INFINITY)
                allowed = 1;
        lock_limit >>= PAGE_SHIFT;
@@ -668,48 +614,3 @@ void user_shm_unlock(size_t size, struct user_struct *user)
        spin_unlock(&shmlock_user_lock);
        free_uid(user);
 }
-
-void *alloc_locked_buffer(size_t size)
-{
-       unsigned long rlim, vm, pgsz;
-       void *buffer = NULL;
-
-       pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       down_write(&current->mm->mmap_sem);
-
-       rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->total_vm + pgsz;
-       if (rlim < vm)
-               goto out;
-
-       rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
-       vm   = current->mm->locked_vm + pgsz;
-       if (rlim < vm)
-               goto out;
-
-       buffer = kzalloc(size, GFP_KERNEL);
-       if (!buffer)
-               goto out;
-
-       current->mm->total_vm  += pgsz;
-       current->mm->locked_vm += pgsz;
-
- out:
-       up_write(&current->mm->mmap_sem);
-       return buffer;
-}
-
-void free_locked_buffer(void *buffer, size_t size)
-{
-       unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       down_write(&current->mm->mmap_sem);
-
-       current->mm->total_vm  -= pgsz;
-       current->mm->locked_vm -= pgsz;
-
-       up_write(&current->mm->mmap_sem);
-
-       kfree(buffer);
-}