mm: vmscan: fix force-scanning small targets without swap
[linux-2.6.git] / mm / mmap.c
index 5d09d08..a65efd4 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3,7 +3,7 @@
  *
  * Written by obz.
  *
- * Address space accounting code       <alan@redhat.com>
+ * Address space accounting code       <alan@lxorguk.ukuu.org.uk>
  */
 
 #include <linux/slab.h>
@@ -27,6 +27,9 @@
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
 #include <linux/mmu_notifier.h>
+#include <linux/perf_event.h>
+#include <linux/audit.h>
+#include <linux/khugepaged.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -81,10 +84,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
-int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
-int sysctl_overcommit_ratio = 50;      /* default is 50% */
+int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
+int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
+/*
+ * Make sure vm_committed_as in one cacheline and not cacheline shared with
+ * other variables. It can be updated by several CPUs frequently.
+ */
+struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 
 /*
  * Check that a process has enough memory to allocate a new virtual
@@ -115,9 +122,17 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                return 0;
 
        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
-               unsigned long n;
+               free = global_page_state(NR_FREE_PAGES);
+               free += global_page_state(NR_FILE_PAGES);
+
+               /*
+                * shmem pages shouldn't be counted as free in this
+                * case, they can't be purged, only swapped out, and
+                * that won't affect the overall amount of available
+                * memory in the system.
+                */
+               free -= global_page_state(NR_SHMEM);
 
-               free = global_page_state(NR_FILE_PAGES);
                free += nr_swap_pages;
 
                /*
@@ -129,34 +144,18 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                free += global_page_state(NR_SLAB_RECLAIMABLE);
 
                /*
-                * Leave the last 3% for root
-                */
-               if (!cap_sys_admin)
-                       free -= free / 32;
-
-               if (free > pages)
-                       return 0;
-
-               /*
-                * nr_free_pages() is very expensive on large systems,
-                * only call if we're about to fail.
-                */
-               n = nr_free_pages();
-
-               /*
                 * Leave reserved pages. The pages are not for anonymous pages.
                 */
-               if (n <= totalreserve_pages)
+               if (free <= totalreserve_pages)
                        goto error;
                else
-                       n -= totalreserve_pages;
+                       free -= totalreserve_pages;
 
                /*
                 * Leave the last 3% for root
                 */
                if (!cap_sys_admin)
-                       n -= n / 32;
-               free += n;
+                       free -= free / 32;
 
                if (free > pages)
                        return 0;
@@ -175,13 +174,10 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 
        /* Don't let a single process grow too big:
           leave 3% of the size of this process for other processes */
-       allowed -= mm->total_vm / 32;
+       if (mm)
+               allowed -= mm->total_vm / 32;
 
-       /*
-        * cast `allowed' as a signed long because vm_committed_space
-        * sometimes has a negative value
-        */
-       if (atomic_long_read(&vm_committed_space) < (long)allowed)
+       if (percpu_counter_read_positive(&vm_committed_as) < allowed)
                return 0;
 error:
        vm_unacct_memory(pages);
@@ -190,7 +186,7 @@ error:
 }
 
 /*
- * Requires inode->i_mapping->i_mmap_lock
+ * Requires inode->i_mapping->i_mmap_mutex
  */
 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
@@ -218,9 +214,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
 
        if (file) {
                struct address_space *mapping = file->f_mapping;
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
                __remove_shared_vm_struct(vma, file, mapping);
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 }
 
@@ -244,7 +240,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        return next;
 }
 
-asmlinkage unsigned long sys_brk(unsigned long brk)
+SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
@@ -254,7 +250,15 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
        down_write(&mm->mmap_sem);
 
 #ifdef CONFIG_COMPAT_BRK
-       min_brk = mm->end_code;
+       /*
+        * CONFIG_COMPAT_BRK can still be overridden by setting
+        * randomize_va_space to 2, which will still cause mm->start_brk
+        * to be arbitrarily shifted
+        */
+       if (current->brk_randomized)
+               min_brk = mm->start_brk;
+       else
+               min_brk = mm->end_data;
 #else
        min_brk = mm->start_brk;
 #endif
@@ -267,7 +271,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
         * segment grow beyond its set limit the in case where the limit is
         * not page aligned -Ram Gupta
         */
-       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+       rlim = rlimit(RLIMIT_DATA);
        if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
                        (mm->end_data - mm->start_data) > rlim)
                goto out;
@@ -370,7 +374,7 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
                if (vma_tmp->vm_end > addr) {
                        vma = vma_tmp;
                        if (vma_tmp->vm_start <= addr)
-                               return vma;
+                               break;
                        __rb_link = &__rb_parent->rb_left;
                } else {
                        rb_prev = __rb_parent;
@@ -386,23 +390,6 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
        return vma;
 }
 
-static inline void
-__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
-               struct vm_area_struct *prev, struct rb_node *rb_parent)
-{
-       if (prev) {
-               vma->vm_next = prev->vm_next;
-               prev->vm_next = vma;
-       } else {
-               mm->mmap = vma;
-               if (rb_parent)
-                       vma->vm_next = rb_entry(rb_parent,
-                                       struct vm_area_struct, vm_rb);
-               else
-                       vma->vm_next = NULL;
-       }
-}
-
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
                struct rb_node **rb_link, struct rb_node *rb_parent)
 {
@@ -410,9 +397,9 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 }
 
-static inline void __vma_link_file(struct vm_area_struct *vma)
+static void __vma_link_file(struct vm_area_struct *vma)
 {
-       struct file * file;
+       struct file *file;
 
        file = vma->vm_file;
        if (file) {
@@ -439,7 +426,6 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        __vma_link_list(mm, vma, prev, rb_parent);
        __vma_link_rb(mm, vma, rb_link, rb_parent);
-       __anon_vma_link(vma);
 }
 
 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -451,18 +437,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_file)
                mapping = vma->vm_file->f_mapping;
 
-       if (mapping) {
-               spin_lock(&mapping->i_mmap_lock);
-               vma->vm_truncate_count = mapping->truncate_count;
-       }
-       anon_vma_lock(vma);
+       if (mapping)
+               mutex_lock(&mapping->i_mmap_mutex);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
-       anon_vma_unlock(vma);
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
 
        mm->map_count++;
        validate_mm(mm);
@@ -473,11 +455,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
  * insert vm structure into list and rbtree and anon_vma,
  * but it has already been inserted into prio_tree earlier.
  */
-static void
-__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct * __vma, * prev;
-       struct rb_node ** rb_link, * rb_parent;
+       struct vm_area_struct *__vma, *prev;
+       struct rb_node **rb_link, *rb_parent;
 
        __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
        BUG_ON(__vma && __vma->vm_start < vma->vm_end);
@@ -489,7 +470,11 @@ static inline void
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       prev->vm_next = vma->vm_next;
+       struct vm_area_struct *next = vma->vm_next;
+
+       prev->vm_next = next;
+       if (next)
+               next->vm_prev = prev;
        rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
@@ -502,7 +487,7 @@ __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
  * are necessary.  The "insert" vma (if any) is to be inserted
  * before we drop the necessary locks.
  */
-void vma_adjust(struct vm_area_struct *vma, unsigned long start,
+int vma_adjust(struct vm_area_struct *vma, unsigned long start,
        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -510,12 +495,14 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
        struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
        struct prio_tree_root *root = NULL;
-       struct file *file = vma->vm_file;
        struct anon_vma *anon_vma = NULL;
+       struct file *file = vma->vm_file;
        long adjust_next = 0;
        int remove_next = 0;
 
        if (next && !insert) {
+               struct vm_area_struct *exporter = NULL;
+
                if (end >= next->vm_end) {
                        /*
                         * vma expands, overlapping all the next, and
@@ -523,7 +510,7 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
                         */
 again:                 remove_next = 1 + (end > next->vm_end);
                        end = next->vm_end;
-                       anon_vma = next->anon_vma;
+                       exporter = next;
                        importer = vma;
                } else if (end > next->vm_start) {
                        /*
@@ -531,7 +518,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                         * mprotect case 5 shifting the boundary up.
                         */
                        adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
-                       anon_vma = next->anon_vma;
+                       exporter = next;
                        importer = vma;
                } else if (end < vma->vm_end) {
                        /*
@@ -540,26 +527,28 @@ again:                    remove_next = 1 + (end > next->vm_end);
                         * mprotect case 4 shifting the boundary down.
                         */
                        adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
-                       anon_vma = next->anon_vma;
+                       exporter = vma;
                        importer = next;
                }
+
+               /*
+                * Easily overlooked: when mprotect shifts the boundary,
+                * make sure the expanding vma has anon_vma set if the
+                * shrinking vma had, to cover any anon pages imported.
+                */
+               if (exporter && exporter->anon_vma && !importer->anon_vma) {
+                       if (anon_vma_clone(importer, exporter))
+                               return -ENOMEM;
+                       importer->anon_vma = exporter->anon_vma;
+               }
        }
 
        if (file) {
                mapping = file->f_mapping;
                if (!(vma->vm_flags & VM_NONLINEAR))
                        root = &mapping->i_mmap;
-               spin_lock(&mapping->i_mmap_lock);
-               if (importer &&
-                   vma->vm_truncate_count != next->vm_truncate_count) {
-                       /*
-                        * unmap_mapping_range might be in progress:
-                        * ensure that the expanding vma is rescanned.
-                        */
-                       importer->vm_truncate_count = 0;
-               }
+               mutex_lock(&mapping->i_mmap_mutex);
                if (insert) {
-                       insert->vm_truncate_count = vma->vm_truncate_count;
                        /*
                         * Put into prio_tree now, so instantiated pages
                         * are visible to arm/parisc __flush_dcache_page
@@ -570,23 +559,17 @@ again:                    remove_next = 1 + (end > next->vm_end);
                }
        }
 
+       vma_adjust_trans_huge(vma, start, end, adjust_next);
+
        /*
-        * When changing only vma->vm_end, we don't really need
-        * anon_vma lock: but is that case worth optimizing out?
+        * When changing only vma->vm_end, we don't really need anon_vma
+        * lock. This is a fairly rare case by itself, but the anon_vma
+        * lock may be shared between many sibling processes.  Skipping
+        * the lock for brk adjustments makes a difference sometimes.
         */
-       if (vma->anon_vma)
+       if (vma->anon_vma && (importer || start != vma->vm_start)) {
                anon_vma = vma->anon_vma;
-       if (anon_vma) {
-               spin_lock(&anon_vma->lock);
-               /*
-                * Easily overlooked: when mprotect shifts the boundary,
-                * make sure the expanding vma has anon_vma set if the
-                * shrinking vma had, to cover any anon pages imported.
-                */
-               if (importer && !importer->anon_vma) {
-                       importer->anon_vma = anon_vma;
-                       __anon_vma_link(importer);
-               }
+               anon_vma_lock(anon_vma);
        }
 
        if (root) {
@@ -619,8 +602,6 @@ again:                      remove_next = 1 + (end > next->vm_end);
                __vma_unlink(mm, next, vma);
                if (file)
                        __remove_shared_vm_struct(next, file, mapping);
-               if (next->anon_vma)
-                       __anon_vma_merge(vma, next);
        } else if (insert) {
                /*
                 * split_vma has split insert from vma, and needs
@@ -631,9 +612,9 @@ again:                      remove_next = 1 + (end > next->vm_end);
        }
 
        if (anon_vma)
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
 
        if (remove_next) {
                if (file) {
@@ -641,6 +622,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
                        if (next->vm_flags & VM_EXECUTABLE)
                                removed_exe_file_vma(mm);
                }
+               if (next->anon_vma)
+                       anon_vma_merge(vma, next);
                mm->map_count--;
                mpol_put(vma_policy(next));
                kmem_cache_free(vm_area_cachep, next);
@@ -656,18 +639,19 @@ again:                    remove_next = 1 + (end > next->vm_end);
        }
 
        validate_mm(mm);
+
+       return 0;
 }
 
 /*
  * If the vma has a ->close operation then the driver probably needs to release
  * per-vma resources, so we don't attempt to merge those.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
-
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
 {
-       if (vma->vm_flags != vm_flags)
+       /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */
+       if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
                return 0;
        if (vma->vm_file != file)
                return 0;
@@ -677,9 +661,17 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
 }
 
 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
-                                       struct anon_vma *anon_vma2)
+                                       struct anon_vma *anon_vma2,
+                                       struct vm_area_struct *vma)
 {
-       return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
+       /*
+        * The list_is_singular() test is to avoid merging VMA cloned from
+        * parents. This can improve scalability caused by anon_vma lock.
+        */
+       if ((!anon_vma1 || !anon_vma2) && (!vma ||
+               list_is_singular(&vma->anon_vma_chain)))
+               return 1;
+       return anon_vma1 == anon_vma2;
 }
 
 /*
@@ -698,7 +690,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
        if (is_mergeable_vma(vma, file, vm_flags) &&
-           is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
        }
@@ -717,7 +709,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
        if (is_mergeable_vma(vma, file, vm_flags) &&
-           is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
                if (vma->vm_pgoff + vm_pglen == vm_pgoff)
@@ -763,6 +755,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
+       int err;
 
        /*
         * We later require that vma->vm_flags == vm_flags,
@@ -794,13 +787,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                can_vma_merge_before(next, vm_flags,
                                        anon_vma, file, pgoff+pglen) &&
                                is_mergeable_anon_vma(prev->anon_vma,
-                                                     next->anon_vma)) {
+                                                     next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
-                       vma_adjust(prev, prev->vm_start,
+                       err = vma_adjust(prev, prev->vm_start,
                                next->vm_end, prev->vm_pgoff, NULL);
                } else                                  /* cases 2, 5, 7 */
-                       vma_adjust(prev, prev->vm_start,
+                       err = vma_adjust(prev, prev->vm_start,
                                end, prev->vm_pgoff, NULL);
+               if (err)
+                       return NULL;
+               khugepaged_enter_vma_merge(prev);
                return prev;
        }
 
@@ -812,11 +808,14 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        can_vma_merge_before(next, vm_flags,
                                        anon_vma, file, pgoff+pglen)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
-                       vma_adjust(prev, prev->vm_start,
+                       err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
                else                                    /* cases 3, 8 */
-                       vma_adjust(area, addr, next->vm_end,
+                       err = vma_adjust(area, addr, next->vm_end,
                                next->vm_pgoff - pglen, NULL);
+               if (err)
+                       return NULL;
+               khugepaged_enter_vma_merge(area);
                return area;
        }
 
@@ -824,6 +823,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
 }
 
 /*
+ * Rough compatbility check to quickly see if it's even worth looking
+ * at sharing an anon_vma.
+ *
+ * They need to have the same vm_file, and the flags can only differ
+ * in things that mprotect may change.
+ *
+ * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
+ * we can merge the two vma's. For example, we refuse to merge a vma if
+ * there is a vm_ops->close() function, because that indicates that the
+ * driver is doing some kind of reference counting. But that doesn't
+ * really matter for the anon_vma sharing case.
+ */
+static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
+{
+       return a->vm_end == b->vm_start &&
+               mpol_equal(vma_policy(a), vma_policy(b)) &&
+               a->vm_file == b->vm_file &&
+               !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
+               b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
+}
+
+/*
+ * Do some basic sanity checking to see if we can re-use the anon_vma
+ * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
+ * the same as 'old', the other will be the new one that is trying
+ * to share the anon_vma.
+ *
+ * NOTE! This runs with mm_sem held for reading, so it is possible that
+ * the anon_vma of 'old' is concurrently in the process of being set up
+ * by another page fault trying to merge _that_. But that's ok: if it
+ * is being set up, that automatically means that it will be a singleton
+ * acceptable for merging, so we can do all of this optimistically. But
+ * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
+ *
+ * IOW: that the "list_is_singular()" test on the anon_vma_chain only
+ * matters for the 'stable anon_vma' case (ie the thing we want to avoid
+ * is to return an anon_vma that is "complex" due to having gone through
+ * a fork).
+ *
+ * We also make sure that the two vma's are compatible (adjacent,
+ * and with the same memory policies). That's all stable, even with just
+ * a read lock on the mm_sem.
+ */
+static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
+{
+       if (anon_vma_compatible(a, b)) {
+               struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
+
+               if (anon_vma && list_is_singular(&old->anon_vma_chain))
+                       return anon_vma;
+       }
+       return NULL;
+}
+
+/*
  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
  * neighbouring vmas for a suitable anon_vma, before it goes off
  * to allocate a new anon_vma.  It checks because a repetitive
@@ -833,48 +887,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  */
 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
 {
+       struct anon_vma *anon_vma;
        struct vm_area_struct *near;
-       unsigned long vm_flags;
 
        near = vma->vm_next;
        if (!near)
                goto try_prev;
 
-       /*
-        * Since only mprotect tries to remerge vmas, match flags
-        * which might be mprotected into each other later on.
-        * Neither mlock nor madvise tries to remerge at present,
-        * so leave their flags as obstructing a merge.
-        */
-       vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
-       vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
-
-       if (near->anon_vma && vma->vm_end == near->vm_start &&
-                       mpol_equal(vma_policy(vma), vma_policy(near)) &&
-                       can_vma_merge_before(near, vm_flags,
-                               NULL, vma->vm_file, vma->vm_pgoff +
-                               ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
-               return near->anon_vma;
+       anon_vma = reusable_anon_vma(near, vma, near);
+       if (anon_vma)
+               return anon_vma;
 try_prev:
-       /*
-        * It is potentially slow to have to call find_vma_prev here.
-        * But it's only on the first write fault on the vma, not
-        * every time, and we could devise a way to avoid it later
-        * (e.g. stash info in next's anon_vma_node when assigning
-        * an anon_vma, or when trying vma_merge).  Another time.
-        */
-       BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
+       near = vma->vm_prev;
        if (!near)
                goto none;
 
-       vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
-       vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
-
-       if (near->anon_vma && near->vm_end == vma->vm_start &&
-                       mpol_equal(vma_policy(near), vma_policy(vma)) &&
-                       can_vma_merge_after(near, vm_flags,
-                               NULL, vma->vm_file, vma->vm_pgoff))
-               return near->anon_vma;
+       anon_vma = reusable_anon_vma(near, near, vma);
+       if (anon_vma)
+               return anon_vma;
 none:
        /*
         * There's no absolute need to look only at touching neighbours:
@@ -906,18 +936,17 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
 #endif /* CONFIG_PROC_FS */
 
 /*
- * The caller must hold down_write(current->mm->mmap_sem).
+ * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
+unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
                        unsigned long flags, unsigned long pgoff)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
-       unsigned int vm_flags;
+       vm_flags_t vm_flags;
        int error;
-       int accountable = 1;
        unsigned long reqprot = prot;
 
        /*
@@ -936,13 +965,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        if (!(flags & MAP_FIXED))
                addr = round_hint_to_min(addr);
 
-       error = arch_mmap_check(addr, len, flags);
-       if (error)
-               return error;
-
        /* Careful about overflows.. */
        len = PAGE_ALIGN(len);
-       if (!len || len > TASK_SIZE)
+       if (!len)
                return -ENOMEM;
 
        /* offset overflow? */
@@ -967,17 +992,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 
-       if (flags & MAP_LOCKED) {
+       if (flags & MAP_LOCKED)
                if (!can_do_mlock())
                        return -EPERM;
-               vm_flags |= VM_LOCKED;
-       }
+
        /* mlock MCL_FUTURE? */
        if (vm_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
                locked = len >> PAGE_SHIFT;
                locked += mm->locked_vm;
-               lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
                lock_limit >>= PAGE_SHIFT;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
@@ -1017,8 +1041,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                                        return -EPERM;
                                vm_flags &= ~VM_MAYEXEC;
                        }
-                       if (is_file_hugepages(file))
-                               accountable = 0;
 
                        if (!file->f_op || !file->f_op->mmap)
                                return -ENODEV;
@@ -1030,6 +1052,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        } else {
                switch (flags & MAP_TYPE) {
                case MAP_SHARED:
+                       /*
+                        * Ignore pgoff.
+                        */
+                       pgoff = 0;
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        break;
                case MAP_PRIVATE:
@@ -1047,11 +1073,75 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        if (error)
                return error;
 
-       return mmap_region(file, addr, len, flags, vm_flags, pgoff,
-                          accountable);
+       return mmap_region(file, addr, len, flags, vm_flags, pgoff);
 }
 EXPORT_SYMBOL(do_mmap_pgoff);
 
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+               unsigned long, prot, unsigned long, flags,
+               unsigned long, fd, unsigned long, pgoff)
+{
+       struct file *file = NULL;
+       unsigned long retval = -EBADF;
+
+       if (!(flags & MAP_ANONYMOUS)) {
+               audit_mmap_fd(fd, flags);
+               if (unlikely(flags & MAP_HUGETLB))
+                       return -EINVAL;
+               file = fget(fd);
+               if (!file)
+                       goto out;
+       } else if (flags & MAP_HUGETLB) {
+               struct user_struct *user = NULL;
+               /*
+                * VM_NORESERVE is used because the reservations will be
+                * taken when vm_ops->mmap() is called
+                * A dummy user value is used because we are not locking
+                * memory so no accounting is necessary
+                */
+               len = ALIGN(len, huge_page_size(&default_hstate));
+               file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+                                               &user, HUGETLB_ANONHUGE_INODE);
+               if (IS_ERR(file))
+                       return PTR_ERR(file);
+       }
+
+       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+       down_write(&current->mm->mmap_sem);
+       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+       up_write(&current->mm->mmap_sem);
+
+       if (file)
+               fput(file);
+out:
+       return retval;
+}
+
+#ifdef __ARCH_WANT_SYS_OLD_MMAP
+struct mmap_arg_struct {
+       unsigned long addr;
+       unsigned long len;
+       unsigned long prot;
+       unsigned long flags;
+       unsigned long fd;
+       unsigned long offset;
+};
+
+SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
+{
+       struct mmap_arg_struct a;
+
+       if (copy_from_user(&a, arg, sizeof(a)))
+               return -EFAULT;
+       if (a.offset & ~PAGE_MASK)
+               return -EINVAL;
+
+       return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+                             a.offset >> PAGE_SHIFT);
+}
+#endif /* __ARCH_WANT_SYS_OLD_MMAP */
+
 /*
  * Some shared mappigns will want the pages marked read-only
  * to track write events. If so, we'll downgrade vm_page_prot
@@ -1060,7 +1150,7 @@ EXPORT_SYMBOL(do_mmap_pgoff);
  */
 int vma_wants_writenotify(struct vm_area_struct *vma)
 {
-       unsigned int vm_flags = vma->vm_flags;
+       vm_flags_t vm_flags = vma->vm_flags;
 
        /* If it was private or non-writable, the write bit is already clear */
        if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
@@ -1084,10 +1174,25 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                mapping_cap_account_dirty(vma->vm_file->f_mapping);
 }
 
+/*
+ * We account for memory if it's a private writeable mapping,
+ * not hugepages and VM_NORESERVE wasn't set.
+ */
+static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
+{
+       /*
+        * hugetlb has its own accounting separate from the core VM
+        * VM_HUGETLB may not be set yet so we cannot check for that flag.
+        */
+       if (file && is_file_hugepages(file))
+               return 0;
+
+       return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
+}
+
 unsigned long mmap_region(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long flags,
-                         unsigned int vm_flags, unsigned long pgoff,
-                         int accountable)
+                         vm_flags_t vm_flags, unsigned long pgoff)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
@@ -1111,33 +1216,35 @@ munmap_back:
        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       if (flags & MAP_NORESERVE)
-               vm_flags |= VM_NORESERVE;
+       /*
+        * Set 'VM_NORESERVE' if we should not account for the
+        * memory use of this mapping.
+        */
+       if ((flags & MAP_NORESERVE)) {
+               /* We honor MAP_NORESERVE if allowed to overcommit */
+               if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
+                       vm_flags |= VM_NORESERVE;
 
-       if (accountable && (!(flags & MAP_NORESERVE) ||
-                           sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
-               if (vm_flags & VM_SHARED) {
-                       /* Check memory availability in shmem_file_setup? */
-                       vm_flags |= VM_ACCOUNT;
-               } else if (vm_flags & VM_WRITE) {
-                       /*
-                        * Private writable mapping: check memory availability
-                        */
-                       charged = len >> PAGE_SHIFT;
-                       if (security_vm_enough_memory(charged))
-                               return -ENOMEM;
-                       vm_flags |= VM_ACCOUNT;
-               }
+               /* hugetlb applies strict overcommit unless MAP_NORESERVE */
+               if (file && is_file_hugepages(file))
+                       vm_flags |= VM_NORESERVE;
        }
 
        /*
-        * Can we just expand an old private anonymous mapping?
-        * The VM_SHARED test is necessary because shmem_zero_setup
-        * will create the file object for a shared anonymous map below.
+        * Private writable mapping: check memory availability
         */
-       if (!file && !(vm_flags & VM_SHARED) &&
-           vma_merge(mm, prev, addr, addr + len, vm_flags,
-                                       NULL, NULL, pgoff, NULL))
+       if (accountable_mapping(file, vm_flags)) {
+               charged = len >> PAGE_SHIFT;
+               if (security_vm_enough_memory(charged))
+                       return -ENOMEM;
+               vm_flags |= VM_ACCOUNT;
+       }
+
+       /*
+        * Can we just expand an old mapping?
+        */
+       vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
+       if (vma)
                goto out;
 
        /*
@@ -1157,6 +1264,7 @@ munmap_back:
        vma->vm_flags = vm_flags;
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        if (file) {
                error = -EINVAL;
@@ -1175,55 +1283,51 @@ munmap_back:
                        goto unmap_and_free_vma;
                if (vm_flags & VM_EXECUTABLE)
                        added_exe_file_vma(mm);
+
+               /* Can addr have changed??
+                *
+                * Answer: Yes, several device drivers can do it in their
+                *         f_op->mmap method. -DaveM
+                */
+               addr = vma->vm_start;
+               pgoff = vma->vm_pgoff;
+               vm_flags = vma->vm_flags;
        } else if (vm_flags & VM_SHARED) {
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
        }
 
-       /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
-        * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
-        * that memory reservation must be checked; but that reservation
-        * belongs to shared memory object, not to vma: so now clear it.
-        */
-       if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
-               vma->vm_flags &= ~VM_ACCOUNT;
-
-       /* Can addr have changed??
-        *
-        * Answer: Yes, several device drivers can do it in their
-        *         f_op->mmap method. -DaveM
-        */
-       addr = vma->vm_start;
-       pgoff = vma->vm_pgoff;
-       vm_flags = vma->vm_flags;
+       if (vma_wants_writenotify(vma)) {
+               pgprot_t pprot = vma->vm_page_prot;
 
-       if (vma_wants_writenotify(vma))
+               /* Can vma->vm_page_prot have changed??
+                *
+                * Answer: Yes, drivers may have changed it in their
+                *         f_op->mmap method.
+                *
+                * Ensures that vmas marked as uncached stay that way.
+                */
                vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
-
-       if (file && vma_merge(mm, prev, addr, vma->vm_end,
-                       vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               mpol_put(vma_policy(vma));
-               kmem_cache_free(vm_area_cachep, vma);
-               fput(file);
-               if (vm_flags & VM_EXECUTABLE)
-                       removed_exe_file_vma(mm);
-       } else {
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               file = vma->vm_file;
+               if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
+                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        }
 
+       vma_link(mm, vma, prev, rb_link, rb_parent);
+       file = vma->vm_file;
+
        /* Once vma denies write, undo our temporary denial count */
        if (correct_wcount)
                atomic_inc(&inode->i_writecount);
 out:
+       perf_event_mmap(vma);
+
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
-       }
-       if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+               if (!mlock_vma_pages_range(vma, addr, addr + len))
+                       mm->locked_vm += (len >> PAGE_SHIFT);
+       } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
                make_pages_present(addr, addr + len);
        return addr;
 
@@ -1436,6 +1540,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        unsigned long (*get_area)(struct file *, unsigned long,
                                  unsigned long, unsigned long, unsigned long);
 
+       unsigned long error = arch_mmap_check(addr, len, flags);
+       if (error)
+               return error;
+
+       /* Careful about overflows.. */
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
        get_area = current->mm->get_unmapped_area;
        if (file && file->f_op && file->f_op->get_unmapped_area)
                get_area = file->f_op->get_unmapped_area;
@@ -1454,7 +1566,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 EXPORT_SYMBOL(get_unmapped_area);
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
-struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
@@ -1497,7 +1609,7 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
        struct vm_area_struct *vma = NULL, *prev = NULL;
-       struct rb_node * rb_node;
+       struct rb_node *rb_node;
        if (!mm)
                goto out;
 
@@ -1531,7 +1643,7 @@ out:
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
@@ -1542,7 +1654,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > rlim[RLIMIT_STACK].rlim_cur)
+       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -1550,7 +1662,8 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
                unsigned long locked;
                unsigned long limit;
                locked = mm->locked_vm + grow;
-               limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+               limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+               limit >>= PAGE_SHIFT;
                if (locked > limit && !capable(CAP_IPC_LOCK))
                        return -ENOMEM;
        }
@@ -1565,7 +1678,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
         * Overcommit..  This must be the final test, as it will
         * update security statistics.
         */
-       if (security_vm_enough_memory(grow))
+       if (security_vm_enough_memory_mm(mm, grow))
                return -ENOMEM;
 
        /* Ok, everything looks good - let it rip */
@@ -1581,9 +1694,6 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
  * vma is the last one with address > vma->vm_end.  Have to extend vma.
  */
-#ifndef CONFIG_IA64
-static inline
-#endif
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        int error;
@@ -1597,7 +1707,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1608,7 +1718,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (address < PAGE_ALIGN(address+4))
                address = PAGE_ALIGN(address+4);
        else {
-               anon_vma_unlock(vma);
+               vma_unlock_anon_vma(vma);
                return -ENOMEM;
        }
        error = 0;
@@ -1620,11 +1730,17 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                size = address - vma->vm_start;
                grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
-               error = acct_stack_growth(vma, size, grow);
-               if (!error)
-                       vma->vm_end = address;
+               error = -ENOMEM;
+               if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               vma->vm_end = address;
+                               perf_event_mmap(vma);
+                       }
+               }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
+       khugepaged_enter_vma_merge(vma);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1632,7 +1748,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-static inline int expand_downwards(struct vm_area_struct *vma,
+int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
        int error;
@@ -1649,7 +1765,7 @@ static inline int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1664,21 +1780,21 @@ static inline int expand_downwards(struct vm_area_struct *vma,
                size = vma->vm_end - address;
                grow = (vma->vm_start - address) >> PAGE_SHIFT;
 
-               error = acct_stack_growth(vma, size, grow);
-               if (!error) {
-                       vma->vm_start = address;
-                       vma->vm_pgoff -= grow;
+               error = -ENOMEM;
+               if (grow <= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               vma->vm_start = address;
+                               vma->vm_pgoff -= grow;
+                               perf_event_mmap(vma);
+                       }
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
+       khugepaged_enter_vma_merge(vma);
        return error;
 }
 
-int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
-{
-       return expand_downwards(vma, address);
-}
-
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
@@ -1696,8 +1812,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!prev || expand_stack(prev, addr))
                return NULL;
-       if (prev->vm_flags & VM_LOCKED)
-               make_pages_present(addr, prev->vm_end);
+       if (prev->vm_flags & VM_LOCKED) {
+               mlock_vma_pages_range(prev, addr, prev->vm_end);
+       }
        return prev;
 }
 #else
@@ -1723,8 +1840,9 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED)
-               make_pages_present(addr, start);
+       if (vma->vm_flags & VM_LOCKED) {
+               mlock_vma_pages_range(vma, addr, start);
+       }
        return vma;
 }
 #endif
@@ -1743,8 +1861,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
                long nrpages = vma_pages(vma);
 
                mm->total_vm -= nrpages;
-               if (vma->vm_flags & VM_LOCKED)
-                       mm->locked_vm -= nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
@@ -1761,17 +1877,17 @@ static void unmap_region(struct mm_struct *mm,
                unsigned long start, unsigned long end)
 {
        struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
-       struct mmu_gather *tlb;
+       struct mmu_gather tlb;
        unsigned long nr_accounted = 0;
 
        lru_add_drain();
-       tlb = tlb_gather_mmu(mm, 0);
+       tlb_gather_mmu(&tlb, mm, 0);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
-                                next? next->vm_start: 0);
-       tlb_finish_mmu(tlb, start, end);
+       free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+                                next ? next->vm_start : 0);
+       tlb_finish_mmu(&tlb, start, end);
 }
 
 /*
@@ -1787,6 +1903,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long addr;
 
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+       vma->vm_prev = NULL;
        do {
                rb_erase(&vma->vm_rb, &mm->mm_rb);
                mm->map_count--;
@@ -1794,6 +1911,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
+       if (vma)
+               vma->vm_prev = prev;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
@@ -1804,29 +1923,29 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 /*
- * Split a vma into two pieces at address 'addr', a new vma is allocated
- * either for the first part or the tail.
+ * __split_vma() bypasses sysctl_max_map_count checking.  We use this on the
+ * munmap path where it doesn't make sense to fail.
  */
-int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
              unsigned long addr, int new_below)
 {
        struct mempolicy *pol;
        struct vm_area_struct *new;
+       int err = -ENOMEM;
 
        if (is_vm_hugetlb_page(vma) && (addr &
                                        ~(huge_page_mask(hstate_vma(vma)))))
                return -EINVAL;
 
-       if (mm->map_count >= sysctl_max_map_count)
-               return -ENOMEM;
-
        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
-               return -ENOMEM;
+               goto out_err;
 
        /* most fields are the same, copy all, and then fixup */
        *new = *vma;
 
+       INIT_LIST_HEAD(&new->anon_vma_chain);
+
        if (new_below)
                new->vm_end = addr;
        else {
@@ -1836,11 +1955,14 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
 
        pol = mpol_dup(vma_policy(vma));
        if (IS_ERR(pol)) {
-               kmem_cache_free(vm_area_cachep, new);
-               return PTR_ERR(pol);
+               err = PTR_ERR(pol);
+               goto out_free_vma;
        }
        vma_set_policy(new, pol);
 
+       if (anon_vma_clone(new, vma))
+               goto out_free_mpol;
+
        if (new->vm_file) {
                get_file(new->vm_file);
                if (vma->vm_flags & VM_EXECUTABLE)
@@ -1851,12 +1973,43 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                new->vm_ops->open(new);
 
        if (new_below)
-               vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
+               err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
                        ((addr - new->vm_start) >> PAGE_SHIFT), new);
        else
-               vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
+               err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
 
-       return 0;
+       /* Success. */
+       if (!err)
+               return 0;
+
+       /* Clean everything up if vma_adjust failed. */
+       if (new->vm_ops && new->vm_ops->close)
+               new->vm_ops->close(new);
+       if (new->vm_file) {
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(mm);
+               fput(new->vm_file);
+       }
+       unlink_anon_vmas(new);
+ out_free_mpol:
+       mpol_put(pol);
+ out_free_vma:
+       kmem_cache_free(vm_area_cachep, new);
+ out_err:
+       return err;
+}
+
+/*
+ * Split a vma into two pieces at address 'addr', a new vma is allocated
+ * either for the first part or the tail.
+ */
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+             unsigned long addr, int new_below)
+{
+       if (mm->map_count >= sysctl_max_map_count)
+               return -ENOMEM;
+
+       return __split_vma(mm, vma, addr, new_below);
 }
 
 /* Munmap is split into 2 main parts -- this part which finds
@@ -1876,9 +2029,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                return -EINVAL;
 
        /* Find the first overlapping VMA */
-       vma = find_vma_prev(mm, start, &prev);
+       vma = find_vma(mm, start);
        if (!vma)
                return 0;
+       prev = vma->vm_prev;
        /* we have  start < vma->vm_end  */
 
        /* if it doesn't overlap, we have nothing.. */
@@ -1894,7 +2048,17 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
         * places tmp vma above, and higher split_vma places tmp vma below.
         */
        if (start > vma->vm_start) {
-               int error = split_vma(mm, vma, start, 0);
+               int error;
+
+               /*
+                * Make sure that map_count on return from munmap() will
+                * not exceed its limit; but let map_count go just above
+                * its limit temporarily, to help free resources as expected.
+                */
+               if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
+                       return -ENOMEM;
+
+               error = __split_vma(mm, vma, start, 0);
                if (error)
                        return error;
                prev = vma;
@@ -1903,13 +2067,27 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        /* Does it split the last one? */
        last = find_vma(mm, end);
        if (last && end > last->vm_start) {
-               int error = split_vma(mm, last, end, 1);
+               int error = __split_vma(mm, last, end, 1);
                if (error)
                        return error;
        }
        vma = prev? prev->vm_next: mm->mmap;
 
        /*
+        * unlock any mlock()ed ranges before detaching vmas
+        */
+       if (mm->locked_vm) {
+               struct vm_area_struct *tmp = vma;
+               while (tmp && tmp->vm_start < end) {
+                       if (tmp->vm_flags & VM_LOCKED) {
+                               mm->locked_vm -= vma_pages(tmp);
+                               munlock_vma_pages_all(tmp);
+                       }
+                       tmp = tmp->vm_next;
+               }
+       }
+
+       /*
         * Remove the vma's, and unmap the actual pages
         */
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
@@ -1923,7 +2101,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 
 EXPORT_SYMBOL(do_munmap);
 
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
        int ret;
        struct mm_struct *mm = current->mm;
@@ -1964,20 +2142,14 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (!len)
                return addr;
 
-       if ((addr + len) > TASK_SIZE || (addr + len) < addr)
-               return -EINVAL;
-
-       if (is_hugepage_only_range(mm, addr, len))
-               return -EINVAL;
-
        error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
        if (error)
                return error;
 
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
-       error = arch_mmap_check(addr, len, flags);
-       if (error)
+       error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+       if (error & ~PAGE_MASK)
                return error;
 
        /*
@@ -1987,7 +2159,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
                unsigned long locked, lock_limit;
                locked = len >> PAGE_SHIFT;
                locked += mm->locked_vm;
-               lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
                lock_limit >>= PAGE_SHIFT;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        return -EAGAIN;
@@ -2021,8 +2193,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
                return -ENOMEM;
 
        /* Can we just expand an old private anonymous mapping? */
-       if (vma_merge(mm, prev, addr, addr + len, flags,
-                                       NULL, NULL, pgoff, NULL))
+       vma = vma_merge(mm, prev, addr, addr + len, flags,
+                                       NULL, NULL, pgoff, NULL);
+       if (vma)
                goto out;
 
        /*
@@ -2034,6 +2207,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
                return -ENOMEM;
        }
 
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
@@ -2042,10 +2216,11 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_page_prot = vm_get_page_prot(flags);
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
+       perf_event_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED) {
-               mm->locked_vm += len >> PAGE_SHIFT;
-               make_pages_present(addr, addr + len);
+               if (!mlock_vma_pages_range(vma, addr, addr + len))
+                       mm->locked_vm += (len >> PAGE_SHIFT);
        }
        return addr;
 }
@@ -2055,24 +2230,39 @@ EXPORT_SYMBOL(do_brk);
 /* Release all mmaps. */
 void exit_mmap(struct mm_struct *mm)
 {
-       struct mmu_gather *tlb;
-       struct vm_area_struct *vma = mm->mmap;
+       struct mmu_gather tlb;
+       struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
        unsigned long end;
 
        /* mm's last user has gone, and its about to be pulled down */
-       arch_exit_mmap(mm);
        mmu_notifier_release(mm);
 
+       if (mm->locked_vm) {
+               vma = mm->mmap;
+               while (vma) {
+                       if (vma->vm_flags & VM_LOCKED)
+                               munlock_vma_pages_all(vma);
+                       vma = vma->vm_next;
+               }
+       }
+
+       arch_exit_mmap(mm);
+
+       vma = mm->mmap;
+       if (!vma)       /* Can happen if dup_mmap() received an OOM */
+               return;
+
        lru_add_drain();
        flush_cache_mm(mm);
-       tlb = tlb_gather_mmu(mm, 1);
-       /* Don't update_hiwater_rss(mm) here, do_exit already did */
+       tlb_gather_mmu(&tlb, mm, 1);
+       /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
-       tlb_finish_mmu(tlb, 0, end);
+
+       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+       tlb_finish_mmu(&tlb, 0, end);
 
        /*
         * Walk the list again, actually closing and freeing it,
@@ -2086,7 +2276,7 @@ void exit_mmap(struct mm_struct *mm)
 
 /* Insert vm structure into process list sorted by address
  * and into the inode's i_mmap tree.  If vm_file is non-NULL
- * then i_mmap_lock is taken here.
+ * then i_mmap_mutex is taken here.
  */
 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
 {
@@ -2155,10 +2345,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                if (new_vma) {
                        *new_vma = *vma;
                        pol = mpol_dup(vma_policy(vma));
-                       if (IS_ERR(pol)) {
-                               kmem_cache_free(vm_area_cachep, new_vma);
-                               return NULL;
-                       }
+                       if (IS_ERR(pol))
+                               goto out_free_vma;
+                       INIT_LIST_HEAD(&new_vma->anon_vma_chain);
+                       if (anon_vma_clone(new_vma, vma))
+                               goto out_free_mempol;
                        vma_set_policy(new_vma, pol);
                        new_vma->vm_start = addr;
                        new_vma->vm_end = addr + len;
@@ -2174,6 +2365,12 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                }
        }
        return new_vma;
+
+ out_free_mempol:
+       mpol_put(pol);
+ out_free_vma:
+       kmem_cache_free(vm_area_cachep, new_vma);
+       return NULL;
 }
 
 /*
@@ -2185,7 +2382,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
        unsigned long cur = mm->total_vm;       /* pages */
        unsigned long lim;
 
-       lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
+       lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
 
        if (cur + npages > lim)
                return 0;
@@ -2227,7 +2424,7 @@ static void special_mapping_close(struct vm_area_struct *vma)
 {
 }
 
-static struct vm_operations_struct special_mapping_vmops = {
+static const struct vm_operations_struct special_mapping_vmops = {
        .close = special_mapping_close,
        .fault = special_mapping_fault,
 };
@@ -2245,12 +2442,14 @@ int install_special_mapping(struct mm_struct *mm,
                            unsigned long addr, unsigned long len,
                            unsigned long vm_flags, struct page **pages)
 {
+       int ret;
        struct vm_area_struct *vma;
 
        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (unlikely(vma == NULL))
                return -ENOMEM;
 
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
@@ -2261,37 +2460,46 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       if (unlikely(insert_vm_struct(mm, vma))) {
-               kmem_cache_free(vm_area_cachep, vma);
-               return -ENOMEM;
-       }
+       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
+       if (ret)
+               goto out;
+
+       ret = insert_vm_struct(mm, vma);
+       if (ret)
+               goto out;
 
        mm->total_vm += len >> PAGE_SHIFT;
 
+       perf_event_mmap(vma);
+
        return 0;
+
+out:
+       kmem_cache_free(vm_area_cachep, vma);
+       return ret;
 }
 
 static DEFINE_MUTEX(mm_all_locks_mutex);
 
 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 {
-       if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+       if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
                /*
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
+               mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
-                * anon_vma->lock. If some other vma in this mm shares
+                * anon_vma->root->mutex. If some other vma in this mm shares
                 * the same anon_vma we won't take it again.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us thanks to the
-                * anon_vma->lock.
+                * anon_vma->root->mutex.
                 */
                if (__test_and_set_bit(0, (unsigned long *)
-                                      &anon_vma->head.next))
+                                      &anon_vma->root->head.next))
                        BUG();
        }
 }
@@ -2310,7 +2518,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
-               spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
+               mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
        }
 }
 
@@ -2337,7 +2545,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
@@ -2349,6 +2557,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
 int mm_take_all_locks(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
+       struct anon_vma_chain *avc;
        int ret = -EINTR;
 
        BUG_ON(down_read_trylock(&mm->mmap_sem));
@@ -2358,11 +2567,18 @@ int mm_take_all_locks(struct mm_struct *mm)
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (signal_pending(current))
                        goto out_unlock;
-               if (vma->anon_vma)
-                       vm_lock_anon_vma(mm, vma->anon_vma);
                if (vma->vm_file && vma->vm_file->f_mapping)
                        vm_lock_mapping(mm, vma->vm_file->f_mapping);
        }
+
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               if (signal_pending(current))
+                       goto out_unlock;
+               if (vma->anon_vma)
+                       list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+                               vm_lock_anon_vma(mm, avc->anon_vma);
+       }
+
        ret = 0;
 
 out_unlock:
@@ -2374,7 +2590,7 @@ out_unlock:
 
 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
 {
-       if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+       if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
                /*
                 * The LSB of head.next can't change to 0 from under
                 * us because we hold the mm_all_locks_mutex.
@@ -2385,12 +2601,12 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us until we release the
-                * anon_vma->lock.
+                * anon_vma->root->mutex.
                 */
                if (!__test_and_clear_bit(0, (unsigned long *)
-                                         &anon_vma->head.next))
+                                         &anon_vma->root->head.next))
                        BUG();
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        }
 }
 
@@ -2401,7 +2617,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
                 * AS_MM_ALL_LOCKS can't change to 0 from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
                if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
                                        &mapping->flags))
                        BUG();
@@ -2415,16 +2631,29 @@ static void vm_unlock_mapping(struct address_space *mapping)
 void mm_drop_all_locks(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
+       struct anon_vma_chain *avc;
 
        BUG_ON(down_read_trylock(&mm->mmap_sem));
        BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->anon_vma)
-                       vm_unlock_anon_vma(vma->anon_vma);
+                       list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+                               vm_unlock_anon_vma(avc->anon_vma);
                if (vma->vm_file && vma->vm_file->f_mapping)
                        vm_unlock_mapping(vma->vm_file->f_mapping);
        }
 
        mutex_unlock(&mm_all_locks_mutex);
 }
+
+/*
+ * initialise the VMA slab
+ */
+void __init mmap_init(void)
+{
+       int ret;
+
+       ret = percpu_counter_init(&vm_committed_as, 0);
+       VM_BUG_ON(ret);
+}