]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - mm/mmap.c
mm: make do_mmap_pgoff return populate as a size in bytes, not as a bool
[linux-3.10.git] / mm / mmap.c
index 8c05e5b43b69c0f82f218a5f18476e6e7f514aa5..44bb4d86988456d0fb806a62445a798a7f307400 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -22,7 +22,7 @@
 #include <linux/security.h>
 #include <linux/hugetlb.h>
 #include <linux/profile.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/mount.h>
 #include <linux/mempolicy.h>
 #include <linux/rmap.h>
@@ -30,6 +30,9 @@
 #include <linux/perf_event.h>
 #include <linux/audit.h>
 #include <linux/khugepaged.h>
+#include <linux/uprobes.h>
+#include <linux/rbtree_augmented.h>
+#include <linux/sched/sysctl.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -50,12 +53,6 @@ static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
 
-/*
- * WARNING: the debugging will use recursive algorithms so never enable this
- * unless you know what you are doing.
- */
-#undef DEBUG_MM_RB
-
 /* description of effects of mapping type and prot in current implementation.
  * this is due to the limited x86 page protection hardware.  The expected
  * behavior is in parens:
@@ -84,10 +81,28 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
-int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
-int sysctl_overcommit_ratio = 50;      /* default is 50% */
+int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
+int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-struct percpu_counter vm_committed_as;
+/*
+ * Make sure vm_committed_as in one cacheline and not cacheline shared with
+ * other variables. It can be updated by several CPUs frequently.
+ */
+struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
+
+/*
+ * The global memory commitment made in the system can be a metric
+ * that can be used to drive ballooning decisions when Linux is hosted
+ * as a guest. On Hyper-V, the host implements a policy engine for dynamically
+ * balancing memory across competing virtual machines that are hosted.
+ * Several metrics drive this policy engine including the guest reported
+ * memory commitment.
+ */
+unsigned long vm_memory_committed(void)
+{
+       return percpu_counter_read_positive(&vm_committed_as);
+}
+EXPORT_SYMBOL_GPL(vm_memory_committed);
 
 /*
  * Check that a process has enough memory to allocate a new virtual
@@ -118,9 +133,17 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                return 0;
 
        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
-               unsigned long n;
+               free = global_page_state(NR_FREE_PAGES);
+               free += global_page_state(NR_FILE_PAGES);
+
+               /*
+                * shmem pages shouldn't be counted as free in this
+                * case, they can't be purged, only swapped out, and
+                * that won't affect the overall amount of available
+                * memory in the system.
+                */
+               free -= global_page_state(NR_SHMEM);
 
-               free = global_page_state(NR_FILE_PAGES);
                free += nr_swap_pages;
 
                /*
@@ -131,35 +154,19 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
                 */
                free += global_page_state(NR_SLAB_RECLAIMABLE);
 
-               /*
-                * Leave the last 3% for root
-                */
-               if (!cap_sys_admin)
-                       free -= free / 32;
-
-               if (free > pages)
-                       return 0;
-
-               /*
-                * nr_free_pages() is very expensive on large systems,
-                * only call if we're about to fail.
-                */
-               n = nr_free_pages();
-
                /*
                 * Leave reserved pages. The pages are not for anonymous pages.
                 */
-               if (n <= totalreserve_pages)
+               if (free <= totalreserve_pages)
                        goto error;
                else
-                       n -= totalreserve_pages;
+                       free -= totalreserve_pages;
 
                /*
                 * Leave the last 3% for root
                 */
                if (!cap_sys_admin)
-                       n -= n / 32;
-               free += n;
+                       free -= free / 32;
 
                if (free > pages)
                        return 0;
@@ -190,7 +197,7 @@ error:
 }
 
 /*
- * Requires inode->i_mapping->i_mmap_lock
+ * Requires inode->i_mapping->i_mmap_mutex
  */
 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
@@ -202,14 +209,14 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
 
        flush_dcache_mmap_lock(mapping);
        if (unlikely(vma->vm_flags & VM_NONLINEAR))
-               list_del_init(&vma->shared.vm_set.list);
+               list_del_init(&vma->shared.nonlinear);
        else
-               vma_prio_tree_remove(vma, &mapping->i_mmap);
+               vma_interval_tree_remove(vma, &mapping->i_mmap);
        flush_dcache_mmap_unlock(mapping);
 }
 
 /*
- * Unlink a file-based vm structure from its prio_tree, to hide
+ * Unlink a file-based vm structure from its interval tree, to hide
  * vma from rmap and vmtruncate before freeing its page tables.
  */
 void unlink_file_vma(struct vm_area_struct *vma)
@@ -218,9 +225,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
 
        if (file) {
                struct address_space *mapping = file->f_mapping;
-               spin_lock(&mapping->i_mmap_lock);
+               mutex_lock(&mapping->i_mmap_mutex);
                __remove_shared_vm_struct(vma, file, mapping);
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
        }
 }
 
@@ -234,22 +241,22 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        might_sleep();
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
-       if (vma->vm_file) {
+       if (vma->vm_file)
                fput(vma->vm_file);
-               if (vma->vm_flags & VM_EXECUTABLE)
-                       removed_exe_file_vma(vma->vm_mm);
-       }
        mpol_put(vma_policy(vma));
        kmem_cache_free(vm_area_cachep, vma);
        return next;
 }
 
+static unsigned long do_brk(unsigned long addr, unsigned long len);
+
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
        unsigned long min_brk;
+       bool populate;
 
        down_write(&mm->mmap_sem);
 
@@ -259,7 +266,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
         * randomize_va_space to 2, which will still cause mm->start_brk
         * to be arbitrarily shifted
         */
-       if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+       if (current->brk_randomized)
                min_brk = mm->start_brk;
        else
                min_brk = mm->end_data;
@@ -299,75 +306,217 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        /* Ok, looks good - let it rip. */
        if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
                goto out;
+
 set_brk:
        mm->brk = brk;
+       populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
+       up_write(&mm->mmap_sem);
+       if (populate)
+               mm_populate(oldbrk, newbrk - oldbrk);
+       return brk;
+
 out:
        retval = mm->brk;
        up_write(&mm->mmap_sem);
        return retval;
 }
 
-#ifdef DEBUG_MM_RB
+static long vma_compute_subtree_gap(struct vm_area_struct *vma)
+{
+       unsigned long max, subtree_gap;
+       max = vma->vm_start;
+       if (vma->vm_prev)
+               max -= vma->vm_prev->vm_end;
+       if (vma->vm_rb.rb_left) {
+               subtree_gap = rb_entry(vma->vm_rb.rb_left,
+                               struct vm_area_struct, vm_rb)->rb_subtree_gap;
+               if (subtree_gap > max)
+                       max = subtree_gap;
+       }
+       if (vma->vm_rb.rb_right) {
+               subtree_gap = rb_entry(vma->vm_rb.rb_right,
+                               struct vm_area_struct, vm_rb)->rb_subtree_gap;
+               if (subtree_gap > max)
+                       max = subtree_gap;
+       }
+       return max;
+}
+
+#ifdef CONFIG_DEBUG_VM_RB
 static int browse_rb(struct rb_root *root)
 {
-       int i = 0, j;
+       int i = 0, j, bug = 0;
        struct rb_node *nd, *pn = NULL;
        unsigned long prev = 0, pend = 0;
 
        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               if (vma->vm_start < prev)
-                       printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
-               if (vma->vm_start < pend)
+               if (vma->vm_start < prev) {
+                       printk("vm_start %lx prev %lx\n", vma->vm_start, prev);
+                       bug = 1;
+               }
+               if (vma->vm_start < pend) {
                        printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
-               if (vma->vm_start > vma->vm_end)
-                       printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
+                       bug = 1;
+               }
+               if (vma->vm_start > vma->vm_end) {
+                       printk("vm_end %lx < vm_start %lx\n",
+                               vma->vm_end, vma->vm_start);
+                       bug = 1;
+               }
+               if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
+                       printk("free gap %lx, correct %lx\n",
+                              vma->rb_subtree_gap,
+                              vma_compute_subtree_gap(vma));
+                       bug = 1;
+               }
                i++;
                pn = nd;
                prev = vma->vm_start;
                pend = vma->vm_end;
        }
        j = 0;
-       for (nd = pn; nd; nd = rb_prev(nd)) {
+       for (nd = pn; nd; nd = rb_prev(nd))
                j++;
+       if (i != j) {
+               printk("backwards %d, forwards %d\n", j, i);
+               bug = 1;
+       }
+       return bug ? -1 : i;
+}
+
+static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
+{
+       struct rb_node *nd;
+
+       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+               struct vm_area_struct *vma;
+               vma = rb_entry(nd, struct vm_area_struct, vm_rb);
+               BUG_ON(vma != ignore &&
+                      vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
        }
-       if (i != j)
-               printk("backwards %d, forwards %d\n", j, i), i = 0;
-       return i;
 }
 
 void validate_mm(struct mm_struct *mm)
 {
        int bug = 0;
        int i = 0;
-       struct vm_area_struct *tmp = mm->mmap;
-       while (tmp) {
-               tmp = tmp->vm_next;
+       unsigned long highest_address = 0;
+       struct vm_area_struct *vma = mm->mmap;
+       while (vma) {
+               struct anon_vma_chain *avc;
+               vma_lock_anon_vma(vma);
+               list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+                       anon_vma_interval_tree_verify(avc);
+               vma_unlock_anon_vma(vma);
+               highest_address = vma->vm_end;
+               vma = vma->vm_next;
                i++;
        }
-       if (i != mm->map_count)
-               printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
+       if (i != mm->map_count) {
+               printk("map_count %d vm_next %d\n", mm->map_count, i);
+               bug = 1;
+       }
+       if (highest_address != mm->highest_vm_end) {
+               printk("mm->highest_vm_end %lx, found %lx\n",
+                      mm->highest_vm_end, highest_address);
+               bug = 1;
+       }
        i = browse_rb(&mm->mm_rb);
-       if (i != mm->map_count)
-               printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
+       if (i != mm->map_count) {
+               printk("map_count %d rb %d\n", mm->map_count, i);
+               bug = 1;
+       }
        BUG_ON(bug);
 }
 #else
+#define validate_mm_rb(root, ignore) do { } while (0)
 #define validate_mm(mm) do { } while (0)
 #endif
 
-static struct vm_area_struct *
-find_vma_prepare(struct mm_struct *mm, unsigned long addr,
-               struct vm_area_struct **pprev, struct rb_node ***rb_link,
-               struct rb_node ** rb_parent)
+RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
+                    unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
+
+/*
+ * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
+ * vma->vm_prev->vm_end values changed, without modifying the vma's position
+ * in the rbtree.
+ */
+static void vma_gap_update(struct vm_area_struct *vma)
 {
-       struct vm_area_struct * vma;
-       struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
+       /*
+        * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
+        * function that does exacltly what we want.
+        */
+       vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
+}
+
+static inline void vma_rb_insert(struct vm_area_struct *vma,
+                                struct rb_root *root)
+{
+       /* All rb_subtree_gap values must be consistent prior to insertion */
+       validate_mm_rb(root, NULL);
+
+       rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
+}
+
+static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
+{
+       /*
+        * All rb_subtree_gap values must be consistent prior to erase,
+        * with the possible exception of the vma being erased.
+        */
+       validate_mm_rb(root, vma);
+
+       /*
+        * Note rb_erase_augmented is a fairly large inline function,
+        * so make sure we instantiate it only once with our desired
+        * augmented rbtree callbacks.
+        */
+       rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
+}
+
+/*
+ * vma has some anon_vma assigned, and is already inserted on that
+ * anon_vma's interval trees.
+ *
+ * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
+ * vma must be removed from the anon_vma's interval trees using
+ * anon_vma_interval_tree_pre_update_vma().
+ *
+ * After the update, the vma will be reinserted using
+ * anon_vma_interval_tree_post_update_vma().
+ *
+ * The entire update must be protected by exclusive mmap_sem and by
+ * the root anon_vma's mutex.
+ */
+static inline void
+anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
+{
+       struct anon_vma_chain *avc;
+
+       list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+               anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
+}
+
+static inline void
+anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
+{
+       struct anon_vma_chain *avc;
+
+       list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+               anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
+}
+
+static int find_vma_links(struct mm_struct *mm, unsigned long addr,
+               unsigned long end, struct vm_area_struct **pprev,
+               struct rb_node ***rb_link, struct rb_node **rb_parent)
+{
+       struct rb_node **__rb_link, *__rb_parent, *rb_prev;
 
        __rb_link = &mm->mm_rb.rb_node;
        rb_prev = __rb_parent = NULL;
-       vma = NULL;
 
        while (*__rb_link) {
                struct vm_area_struct *vma_tmp;
@@ -376,9 +525,9 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
                vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
 
                if (vma_tmp->vm_end > addr) {
-                       vma = vma_tmp;
-                       if (vma_tmp->vm_start <= addr)
-                               break;
+                       /* Fail if an existing vma overlaps the area */
+                       if (vma_tmp->vm_start < end)
+                               return -ENOMEM;
                        __rb_link = &__rb_parent->rb_left;
                } else {
                        rb_prev = __rb_parent;
@@ -391,37 +540,31 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr,
                *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
        *rb_link = __rb_link;
        *rb_parent = __rb_parent;
-       return vma;
-}
-
-static inline void
-__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
-               struct vm_area_struct *prev, struct rb_node *rb_parent)
-{
-       struct vm_area_struct *next;
-
-       vma->vm_prev = prev;
-       if (prev) {
-               next = prev->vm_next;
-               prev->vm_next = vma;
-       } else {
-               mm->mmap = vma;
-               if (rb_parent)
-                       next = rb_entry(rb_parent,
-                                       struct vm_area_struct, vm_rb);
-               else
-                       next = NULL;
-       }
-       vma->vm_next = next;
-       if (next)
-               next->vm_prev = vma;
+       return 0;
 }
 
 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
                struct rb_node **rb_link, struct rb_node *rb_parent)
 {
+       /* Update tracking information for the gap following the new vma. */
+       if (vma->vm_next)
+               vma_gap_update(vma->vm_next);
+       else
+               mm->highest_vm_end = vma->vm_end;
+
+       /*
+        * vma->vm_prev wasn't known when we followed the rbtree to find the
+        * correct insertion point for that vma. As a result, we could not
+        * update the vma vm_rb parents rb_subtree_gap values on the way down.
+        * So, we first insert the vma with a zero rb_subtree_gap value
+        * (to be consistent with what we did on the way down), and then
+        * immediately update the gap to the correct value. Finally we
+        * rebalance the rbtree after all augmented values have been set.
+        */
        rb_link_node(&vma->vm_rb, rb_parent, rb_link);
-       rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+       vma->rb_subtree_gap = 0;
+       vma_gap_update(vma);
+       vma_rb_insert(vma, &mm->mm_rb);
 }
 
 static void __vma_link_file(struct vm_area_struct *vma)
@@ -441,7 +584,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
                if (unlikely(vma->vm_flags & VM_NONLINEAR))
                        vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
                else
-                       vma_prio_tree_insert(vma, &mapping->i_mmap);
+                       vma_interval_tree_insert(vma, &mapping->i_mmap);
                flush_dcache_mmap_unlock(mapping);
        }
 }
@@ -464,33 +607,31 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_file)
                mapping = vma->vm_file->f_mapping;
 
-       if (mapping) {
-               spin_lock(&mapping->i_mmap_lock);
-               vma->vm_truncate_count = mapping->truncate_count;
-       }
+       if (mapping)
+               mutex_lock(&mapping->i_mmap_mutex);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
 
        mm->map_count++;
        validate_mm(mm);
 }
 
 /*
- * Helper for vma_adjust in the split_vma insert case:
- * insert vm structure into list and rbtree and anon_vma,
- * but it has already been inserted into prio_tree earlier.
+ * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
+ * mm's list and rbtree.  It has already been inserted into the interval tree.
  */
 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *__vma, *prev;
+       struct vm_area_struct *prev;
        struct rb_node **rb_link, *rb_parent;
 
-       __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
-       BUG_ON(__vma && __vma->vm_start < vma->vm_end);
+       if (find_vma_links(mm, vma->vm_start, vma->vm_end,
+                          &prev, &rb_link, &rb_parent))
+               BUG();
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        mm->map_count++;
 }
@@ -499,12 +640,12 @@ static inline void
 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev)
 {
-       struct vm_area_struct *next = vma->vm_next;
+       struct vm_area_struct *next;
 
-       prev->vm_next = next;
+       vma_rb_erase(vma, &mm->mm_rb);
+       prev->vm_next = next = vma->vm_next;
        if (next)
                next->vm_prev = prev;
-       rb_erase(&vma->vm_rb, &mm->mm_rb);
        if (mm->mmap_cache == vma)
                mm->mmap_cache = prev;
 }
@@ -523,9 +664,10 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
        struct vm_area_struct *next = vma->vm_next;
        struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
-       struct prio_tree_root *root = NULL;
+       struct rb_root *root = NULL;
        struct anon_vma *anon_vma = NULL;
        struct file *file = vma->vm_file;
+       bool start_changed = false, end_changed = false;
        long adjust_next = 0;
        int remove_next = 0;
 
@@ -574,21 +716,19 @@ again:                    remove_next = 1 + (end > next->vm_end);
 
        if (file) {
                mapping = file->f_mapping;
-               if (!(vma->vm_flags & VM_NONLINEAR))
+               if (!(vma->vm_flags & VM_NONLINEAR)) {
                        root = &mapping->i_mmap;
-               spin_lock(&mapping->i_mmap_lock);
-               if (importer &&
-                   vma->vm_truncate_count != next->vm_truncate_count) {
-                       /*
-                        * unmap_mapping_range might be in progress:
-                        * ensure that the expanding vma is rescanned.
-                        */
-                       importer->vm_truncate_count = 0;
+                       uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+
+                       if (adjust_next)
+                               uprobe_munmap(next, next->vm_start,
+                                                       next->vm_end);
                }
+
+               mutex_lock(&mapping->i_mmap_mutex);
                if (insert) {
-                       insert->vm_truncate_count = vma->vm_truncate_count;
                        /*
-                        * Put into prio_tree now, so instantiated pages
+                        * Put into interval tree now, so instantiated pages
                         * are visible to arm/parisc __flush_dcache_page
                         * throughout; but we cannot insert into address
                         * space until vma start or end is updated.
@@ -599,26 +739,33 @@ again:                    remove_next = 1 + (end > next->vm_end);
 
        vma_adjust_trans_huge(vma, start, end, adjust_next);
 
-       /*
-        * When changing only vma->vm_end, we don't really need anon_vma
-        * lock. This is a fairly rare case by itself, but the anon_vma
-        * lock may be shared between many sibling processes.  Skipping
-        * the lock for brk adjustments makes a difference sometimes.
-        */
-       if (vma->anon_vma && (insert || importer || start != vma->vm_start)) {
-               anon_vma = vma->anon_vma;
-               anon_vma_lock(anon_vma);
+       anon_vma = vma->anon_vma;
+       if (!anon_vma && adjust_next)
+               anon_vma = next->anon_vma;
+       if (anon_vma) {
+               VM_BUG_ON(adjust_next && next->anon_vma &&
+                         anon_vma != next->anon_vma);
+               anon_vma_lock_write(anon_vma);
+               anon_vma_interval_tree_pre_update_vma(vma);
+               if (adjust_next)
+                       anon_vma_interval_tree_pre_update_vma(next);
        }
 
        if (root) {
                flush_dcache_mmap_lock(mapping);
-               vma_prio_tree_remove(vma, root);
+               vma_interval_tree_remove(vma, root);
                if (adjust_next)
-                       vma_prio_tree_remove(next, root);
+                       vma_interval_tree_remove(next, root);
        }
 
-       vma->vm_start = start;
-       vma->vm_end = end;
+       if (start != vma->vm_start) {
+               vma->vm_start = start;
+               start_changed = true;
+       }
+       if (end != vma->vm_end) {
+               vma->vm_end = end;
+               end_changed = true;
+       }
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
                next->vm_start += adjust_next << PAGE_SHIFT;
@@ -627,8 +774,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
 
        if (root) {
                if (adjust_next)
-                       vma_prio_tree_insert(next, root);
-               vma_prio_tree_insert(vma, root);
+                       vma_interval_tree_insert(next, root);
+               vma_interval_tree_insert(vma, root);
                flush_dcache_mmap_unlock(mapping);
        }
 
@@ -647,18 +794,37 @@ again:                    remove_next = 1 + (end > next->vm_end);
                 * (it may either follow vma or precede it).
                 */
                __insert_vm_struct(mm, insert);
+       } else {
+               if (start_changed)
+                       vma_gap_update(vma);
+               if (end_changed) {
+                       if (!next)
+                               mm->highest_vm_end = end;
+                       else if (!adjust_next)
+                               vma_gap_update(next);
+               }
        }
 
-       if (anon_vma)
+       if (anon_vma) {
+               anon_vma_interval_tree_post_update_vma(vma);
+               if (adjust_next)
+                       anon_vma_interval_tree_post_update_vma(next);
                anon_vma_unlock(anon_vma);
+       }
        if (mapping)
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
+
+       if (root) {
+               uprobe_mmap(vma);
+
+               if (adjust_next)
+                       uprobe_mmap(next);
+       }
 
        if (remove_next) {
                if (file) {
+                       uprobe_munmap(next, next->vm_start, next->vm_end);
                        fput(file);
-                       if (next->vm_flags & VM_EXECUTABLE)
-                               removed_exe_file_vma(mm);
                }
                if (next->anon_vma)
                        anon_vma_merge(vma, next);
@@ -670,11 +836,16 @@ again:                    remove_next = 1 + (end > next->vm_end);
                 * we must remove another next too. It would clutter
                 * up the code too much to do both in one go.
                 */
-               if (remove_next == 2) {
-                       next = vma->vm_next;
+               next = vma->vm_next;
+               if (remove_next == 2)
                        goto again;
-               }
+               else if (next)
+                       vma_gap_update(next);
+               else
+                       mm->highest_vm_end = end;
        }
+       if (insert && file)
+               uprobe_mmap(insert);
 
        validate_mm(mm);
 
@@ -688,8 +859,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
 {
-       /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */
-       if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
+       if (vma->vm_flags ^ vm_flags)
                return 0;
        if (vma->vm_file != file)
                return 0;
@@ -699,9 +869,17 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
 }
 
 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
-                                       struct anon_vma *anon_vma2)
+                                       struct anon_vma *anon_vma2,
+                                       struct vm_area_struct *vma)
 {
-       return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
+       /*
+        * The list_is_singular() test is to avoid merging VMA cloned from
+        * parents. This can improve scalability caused by anon_vma lock.
+        */
+       if ((!anon_vma1 || !anon_vma2) && (!vma ||
+               list_is_singular(&vma->anon_vma_chain)))
+               return 1;
+       return anon_vma1 == anon_vma2;
 }
 
 /*
@@ -720,7 +898,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
        if (is_mergeable_vma(vma, file, vm_flags) &&
-           is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
        }
@@ -739,7 +917,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 {
        if (is_mergeable_vma(vma, file, vm_flags) &&
-           is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
+           is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
                if (vma->vm_pgoff + vm_pglen == vm_pgoff)
@@ -817,7 +995,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                can_vma_merge_before(next, vm_flags,
                                        anon_vma, file, pgoff+pglen) &&
                                is_mergeable_anon_vma(prev->anon_vma,
-                                                     next->anon_vma)) {
+                                                     next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
                        err = vma_adjust(prev, prev->vm_start,
                                next->vm_end, prev->vm_pgoff, NULL);
@@ -928,14 +1106,7 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
        if (anon_vma)
                return anon_vma;
 try_prev:
-       /*
-        * It is potentially slow to have to call find_vma_prev here.
-        * But it's only on the first write fault on the vma, not
-        * every time, and we could devise a way to avoid it later
-        * (e.g. stash info in next's anon_vma_node when assigning
-        * an anon_vma, or when trying vma_merge).  Another time.
-        */
-       BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
+       near = vma->vm_prev;
        if (!near)
                goto none;
 
@@ -961,30 +1132,44 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
        const unsigned long stack_flags
                = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
 
+       mm->total_vm += pages;
+
        if (file) {
                mm->shared_vm += pages;
                if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
                        mm->exec_vm += pages;
        } else if (flags & stack_flags)
                mm->stack_vm += pages;
-       if (flags & (VM_RESERVED|VM_IO))
-               mm->reserved_vm += pages;
 }
 #endif /* CONFIG_PROC_FS */
 
+/*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+ */
+static inline unsigned long round_hint_to_min(unsigned long hint)
+{
+       hint &= PAGE_MASK;
+       if (((void *)hint != NULL) &&
+           (hint < mmap_min_addr))
+               return PAGE_ALIGN(mmap_min_addr);
+       return hint;
+}
+
 /*
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
 
 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
-                       unsigned long flags, unsigned long pgoff)
+                       unsigned long flags, unsigned long pgoff,
+                       unsigned long *populate)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
-       unsigned int vm_flags;
-       int error;
-       unsigned long reqprot = prot;
+       vm_flags_t vm_flags;
+
+       *populate = 0;
 
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1106,13 +1291,25 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                }
        }
 
-       error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
-       if (error)
-               return error;
+       /*
+        * Set 'VM_NORESERVE' if we should not account for the
+        * memory use of this mapping.
+        */
+       if (flags & MAP_NORESERVE) {
+               /* We honor MAP_NORESERVE if allowed to overcommit */
+               if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
+                       vm_flags |= VM_NORESERVE;
+
+               /* hugetlb applies strict overcommit unless MAP_NORESERVE */
+               if (file && is_file_hugepages(file))
+                       vm_flags |= VM_NORESERVE;
+       }
 
-       return mmap_region(file, addr, len, flags, vm_flags, pgoff);
+       addr = mmap_region(file, addr, len, vm_flags, pgoff);
+       if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE))
+               *populate = len;
+       return addr;
 }
-EXPORT_SYMBOL(do_mmap_pgoff);
 
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                unsigned long, prot, unsigned long, flags,
@@ -1136,19 +1333,17 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
                 * A dummy user value is used because we are not locking
                 * memory so no accounting is necessary
                 */
-               len = ALIGN(len, huge_page_size(&default_hstate));
-               file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
-                                               &user, HUGETLB_ANONHUGE_INODE);
+               file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
+                               VM_NORESERVE,
+                               &user, HUGETLB_ANONHUGE_INODE,
+                               (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
                if (IS_ERR(file))
                        return PTR_ERR(file);
        }
 
        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
 
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-
+       retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
        if (file)
                fput(file);
 out:
@@ -1187,7 +1382,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
  */
 int vma_wants_writenotify(struct vm_area_struct *vma)
 {
-       unsigned int vm_flags = vma->vm_flags;
+       vm_flags_t vm_flags = vma->vm_flags;
 
        /* If it was private or non-writable, the write bit is already clear */
        if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
@@ -1203,7 +1398,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                return 0;
 
        /* Specialty mapping? */
-       if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
+       if (vm_flags & VM_PFNMAP)
                return 0;
 
        /* Can the mapping track the dirty pages? */
@@ -1215,7 +1410,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
  * We account for memory if it's a private writeable mapping,
  * not hugepages and VM_NORESERVE wasn't set.
  */
-static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
+static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
 {
        /*
         * hugetlb has its own accounting separate from the core VM
@@ -1228,8 +1423,7 @@ static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
 }
 
 unsigned long mmap_region(struct file *file, unsigned long addr,
-                         unsigned long len, unsigned long flags,
-                         unsigned int vm_flags, unsigned long pgoff)
+               unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
@@ -1242,8 +1436,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        /* Clear old maps */
        error = -ENOMEM;
 munmap_back:
-       vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       if (vma && vma->vm_start < addr + len) {
+       if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
                if (do_munmap(mm, addr, len))
                        return -ENOMEM;
                goto munmap_back;
@@ -1253,26 +1446,12 @@ munmap_back:
        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       /*
-        * Set 'VM_NORESERVE' if we should not account for the
-        * memory use of this mapping.
-        */
-       if ((flags & MAP_NORESERVE)) {
-               /* We honor MAP_NORESERVE if allowed to overcommit */
-               if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
-                       vm_flags |= VM_NORESERVE;
-
-               /* hugetlb applies strict overcommit unless MAP_NORESERVE */
-               if (file && is_file_hugepages(file))
-                       vm_flags |= VM_NORESERVE;
-       }
-
        /*
         * Private writable mapping: check memory availability
         */
        if (accountable_mapping(file, vm_flags)) {
                charged = len >> PAGE_SHIFT;
-               if (security_vm_enough_memory(charged))
+               if (security_vm_enough_memory_mm(mm, charged))
                        return -ENOMEM;
                vm_flags |= VM_ACCOUNT;
        }
@@ -1303,8 +1482,9 @@ munmap_back:
        vma->vm_pgoff = pgoff;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
 
+       error = -EINVAL;        /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
+
        if (file) {
-               error = -EINVAL;
                if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
                        goto free_vma;
                if (vm_flags & VM_DENYWRITE) {
@@ -1313,23 +1493,26 @@ munmap_back:
                                goto free_vma;
                        correct_wcount = 1;
                }
-               vma->vm_file = file;
-               get_file(file);
+               vma->vm_file = get_file(file);
                error = file->f_op->mmap(file, vma);
                if (error)
                        goto unmap_and_free_vma;
-               if (vm_flags & VM_EXECUTABLE)
-                       added_exe_file_vma(mm);
 
                /* Can addr have changed??
                 *
                 * Answer: Yes, several device drivers can do it in their
                 *         f_op->mmap method. -DaveM
+                * Bug: If addr is changed, prev, rb_link, rb_parent should
+                *      be updated for vma_link()
                 */
+               WARN_ON_ONCE(addr != vma->vm_start);
+
                addr = vma->vm_start;
                pgoff = vma->vm_pgoff;
                vm_flags = vma->vm_flags;
        } else if (vm_flags & VM_SHARED) {
+               if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
+                       goto free_vma;
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
@@ -1359,13 +1542,18 @@ munmap_back:
 out:
        perf_event_mmap(vma);
 
-       mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               if (!mlock_vma_pages_range(vma, addr, addr + len))
+               if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
+                                       vma == get_gate_vma(current->mm)))
                        mm->locked_vm += (len >> PAGE_SHIFT);
-       } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
-               make_pages_present(addr, addr + len);
+               else
+                       vma->vm_flags &= ~VM_LOCKED;
+       }
+
+       if (file)
+               uprobe_mmap(vma);
+
        return addr;
 
 unmap_and_free_vma:
@@ -1385,6 +1573,206 @@ unacct_error:
        return error;
 }
 
+unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+{
+       /*
+        * We implement the search by looking for an rbtree node that
+        * immediately follows a suitable gap. That is,
+        * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
+        * - gap_end   = vma->vm_start        >= info->low_limit  + length;
+        * - gap_end - gap_start >= length
+        */
+
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long length, low_limit, high_limit, gap_start, gap_end;
+
+       /* Adjust search length to account for worst case alignment overhead */
+       length = info->length + info->align_mask;
+       if (length < info->length)
+               return -ENOMEM;
+
+       /* Adjust search limits by the desired length */
+       if (info->high_limit < length)
+               return -ENOMEM;
+       high_limit = info->high_limit - length;
+
+       if (info->low_limit > high_limit)
+               return -ENOMEM;
+       low_limit = info->low_limit + length;
+
+       /* Check if rbtree root looks promising */
+       if (RB_EMPTY_ROOT(&mm->mm_rb))
+               goto check_highest;
+       vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+       if (vma->rb_subtree_gap < length)
+               goto check_highest;
+
+       while (true) {
+               /* Visit left subtree if it looks promising */
+               gap_end = vma->vm_start;
+               if (gap_end >= low_limit && vma->vm_rb.rb_left) {
+                       struct vm_area_struct *left =
+                               rb_entry(vma->vm_rb.rb_left,
+                                        struct vm_area_struct, vm_rb);
+                       if (left->rb_subtree_gap >= length) {
+                               vma = left;
+                               continue;
+                       }
+               }
+
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+check_current:
+               /* Check if current node has a suitable gap */
+               if (gap_start > high_limit)
+                       return -ENOMEM;
+               if (gap_end >= low_limit && gap_end - gap_start >= length)
+                       goto found;
+
+               /* Visit right subtree if it looks promising */
+               if (vma->vm_rb.rb_right) {
+                       struct vm_area_struct *right =
+                               rb_entry(vma->vm_rb.rb_right,
+                                        struct vm_area_struct, vm_rb);
+                       if (right->rb_subtree_gap >= length) {
+                               vma = right;
+                               continue;
+                       }
+               }
+
+               /* Go back up the rbtree to find next candidate node */
+               while (true) {
+                       struct rb_node *prev = &vma->vm_rb;
+                       if (!rb_parent(prev))
+                               goto check_highest;
+                       vma = rb_entry(rb_parent(prev),
+                                      struct vm_area_struct, vm_rb);
+                       if (prev == vma->vm_rb.rb_left) {
+                               gap_start = vma->vm_prev->vm_end;
+                               gap_end = vma->vm_start;
+                               goto check_current;
+                       }
+               }
+       }
+
+check_highest:
+       /* Check highest gap, which does not precede any rbtree node */
+       gap_start = mm->highest_vm_end;
+       gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
+       if (gap_start > high_limit)
+               return -ENOMEM;
+
+found:
+       /* We found a suitable gap. Clip it with the original low_limit. */
+       if (gap_start < info->low_limit)
+               gap_start = info->low_limit;
+
+       /* Adjust gap address to the desired alignment */
+       gap_start += (info->align_offset - gap_start) & info->align_mask;
+
+       VM_BUG_ON(gap_start + info->length > info->high_limit);
+       VM_BUG_ON(gap_start + info->length > gap_end);
+       return gap_start;
+}
+
+unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long length, low_limit, high_limit, gap_start, gap_end;
+
+       /* Adjust search length to account for worst case alignment overhead */
+       length = info->length + info->align_mask;
+       if (length < info->length)
+               return -ENOMEM;
+
+       /*
+        * Adjust search limits by the desired length.
+        * See implementation comment at top of unmapped_area().
+        */
+       gap_end = info->high_limit;
+       if (gap_end < length)
+               return -ENOMEM;
+       high_limit = gap_end - length;
+
+       if (info->low_limit > high_limit)
+               return -ENOMEM;
+       low_limit = info->low_limit + length;
+
+       /* Check highest gap, which does not precede any rbtree node */
+       gap_start = mm->highest_vm_end;
+       if (gap_start <= high_limit)
+               goto found_highest;
+
+       /* Check if rbtree root looks promising */
+       if (RB_EMPTY_ROOT(&mm->mm_rb))
+               return -ENOMEM;
+       vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
+       if (vma->rb_subtree_gap < length)
+               return -ENOMEM;
+
+       while (true) {
+               /* Visit right subtree if it looks promising */
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+               if (gap_start <= high_limit && vma->vm_rb.rb_right) {
+                       struct vm_area_struct *right =
+                               rb_entry(vma->vm_rb.rb_right,
+                                        struct vm_area_struct, vm_rb);
+                       if (right->rb_subtree_gap >= length) {
+                               vma = right;
+                               continue;
+                       }
+               }
+
+check_current:
+               /* Check if current node has a suitable gap */
+               gap_end = vma->vm_start;
+               if (gap_end < low_limit)
+                       return -ENOMEM;
+               if (gap_start <= high_limit && gap_end - gap_start >= length)
+                       goto found;
+
+               /* Visit left subtree if it looks promising */
+               if (vma->vm_rb.rb_left) {
+                       struct vm_area_struct *left =
+                               rb_entry(vma->vm_rb.rb_left,
+                                        struct vm_area_struct, vm_rb);
+                       if (left->rb_subtree_gap >= length) {
+                               vma = left;
+                               continue;
+                       }
+               }
+
+               /* Go back up the rbtree to find next candidate node */
+               while (true) {
+                       struct rb_node *prev = &vma->vm_rb;
+                       if (!rb_parent(prev))
+                               return -ENOMEM;
+                       vma = rb_entry(rb_parent(prev),
+                                      struct vm_area_struct, vm_rb);
+                       if (prev == vma->vm_rb.rb_right) {
+                               gap_start = vma->vm_prev ?
+                                       vma->vm_prev->vm_end : 0;
+                               goto check_current;
+                       }
+               }
+       }
+
+found:
+       /* We found a suitable gap. Clip it with the original high_limit. */
+       if (gap_end > info->high_limit)
+               gap_end = info->high_limit;
+
+found_highest:
+       /* Compute highest gap address at the desired alignment */
+       gap_end -= info->length;
+       gap_end -= (gap_end - info->align_offset) & info->align_mask;
+
+       VM_BUG_ON(gap_end < info->low_limit);
+       VM_BUG_ON(gap_end < gap_start);
+       return gap_end;
+}
+
 /* Get an address range which is currently unmapped.
  * For shmat() with addr=0.
  *
@@ -1403,7 +1791,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       unsigned long start_addr;
+       struct vm_unmapped_area_info info;
 
        if (len > TASK_SIZE)
                return -ENOMEM;
@@ -1418,40 +1806,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                    (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
-       if (len > mm->cached_hole_size) {
-               start_addr = addr = mm->free_area_cache;
-       } else {
-               start_addr = addr = TASK_UNMAPPED_BASE;
-               mm->cached_hole_size = 0;
-       }
 
-full_search:
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               /* At this point:  (!vma || addr < vma->vm_end). */
-               if (TASK_SIZE - len < addr) {
-                       /*
-                        * Start a new search - just in case we missed
-                        * some holes.
-                        */
-                       if (start_addr != TASK_UNMAPPED_BASE) {
-                               addr = TASK_UNMAPPED_BASE;
-                               start_addr = addr;
-                               mm->cached_hole_size = 0;
-                               goto full_search;
-                       }
-                       return -ENOMEM;
-               }
-               if (!vma || addr + len <= vma->vm_start) {
-                       /*
-                        * Remember the place where we stopped the search:
-                        */
-                       mm->free_area_cache = addr + len;
-                       return addr;
-               }
-               if (addr + mm->cached_hole_size < vma->vm_start)
-                       mm->cached_hole_size = vma->vm_start - addr;
-               addr = vma->vm_end;
-       }
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = 0;
+       return vm_unmapped_area(&info);
 }
 #endif 
 
@@ -1460,10 +1821,8 @@ void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
        /*
         * Is this a new hole at the lowest possible address?
         */
-       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
+       if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
                mm->free_area_cache = addr;
-               mm->cached_hole_size = ~0UL;
-       }
 }
 
 /*
@@ -1479,6 +1838,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
+       struct vm_unmapped_area_info info;
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE)
@@ -1496,62 +1856,26 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        return addr;
        }
 
-       /* check if free_area_cache is useful for us */
-       if (len <= mm->cached_hole_size) {
-               mm->cached_hole_size = 0;
-               mm->free_area_cache = mm->mmap_base;
-       }
-
-       /* either no address requested or can't fit in requested address hole */
-       addr = mm->free_area_cache;
-
-       /* make sure it can fit in the remaining address space */
-       if (addr > len) {
-               vma = find_vma(mm, addr-len);
-               if (!vma || addr <= vma->vm_start)
-                       /* remember the address as a hint for next time */
-                       return (mm->free_area_cache = addr-len);
-       }
-
-       if (mm->mmap_base < len)
-               goto bottomup;
-
-       addr = mm->mmap_base-len;
+       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+       info.length = len;
+       info.low_limit = PAGE_SIZE;
+       info.high_limit = mm->mmap_base;
+       info.align_mask = 0;
+       addr = vm_unmapped_area(&info);
 
-       do {
-               /*
-                * Lookup failure means no vma is above this address,
-                * else if new region fits below vma->vm_start,
-                * return with success:
-                */
-               vma = find_vma(mm, addr);
-               if (!vma || addr+len <= vma->vm_start)
-                       /* remember the address as a hint for next time */
-                       return (mm->free_area_cache = addr);
-
-               /* remember the largest hole we saw so far */
-               if (addr + mm->cached_hole_size < vma->vm_start)
-                       mm->cached_hole_size = vma->vm_start - addr;
-
-               /* try just below the current vma->vm_start */
-               addr = vma->vm_start-len;
-       } while (len < vma->vm_start);
-
-bottomup:
        /*
         * A failed mmap() very likely causes application failure,
         * so fall back to the bottom-up function here. This scenario
         * can happen with large stack limits and large mmap()
         * allocations.
         */
-       mm->cached_hole_size = ~0UL;
-       mm->free_area_cache = TASK_UNMAPPED_BASE;
-       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-       /*
-        * Restore the topdown base:
-        */
-       mm->free_area_cache = mm->mmap_base;
-       mm->cached_hole_size = ~0UL;
+       if (addr & ~PAGE_MASK) {
+               VM_BUG_ON(addr != -ENOMEM);
+               info.flags = 0;
+               info.low_limit = TASK_UNMAPPED_BASE;
+               info.high_limit = TASK_SIZE;
+               addr = vm_unmapped_area(&info);
+       }
 
        return addr;
 }
@@ -1597,7 +1921,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return arch_rebalance_pgtables(addr, len);
+       addr = arch_rebalance_pgtables(addr, len);
+       error = security_mmap_addr(addr);
+       return error ? error : addr;
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1607,72 +1933,61 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma = NULL;
 
-       if (mm) {
-               /* Check the cache first. */
-               /* (Cache hit rate is typically around 35%.) */
-               vma = mm->mmap_cache;
-               if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
-                       struct rb_node * rb_node;
-
-                       rb_node = mm->mm_rb.rb_node;
-                       vma = NULL;
-
-                       while (rb_node) {
-                               struct vm_area_struct * vma_tmp;
-
-                               vma_tmp = rb_entry(rb_node,
-                                               struct vm_area_struct, vm_rb);
-
-                               if (vma_tmp->vm_end > addr) {
-                                       vma = vma_tmp;
-                                       if (vma_tmp->vm_start <= addr)
-                                               break;
-                                       rb_node = rb_node->rb_left;
-                               } else
-                                       rb_node = rb_node->rb_right;
-                       }
-                       if (vma)
-                               mm->mmap_cache = vma;
+       if (WARN_ON_ONCE(!mm))          /* Remove this in linux-3.6 */
+               return NULL;
+
+       /* Check the cache first. */
+       /* (Cache hit rate is typically around 35%.) */
+       vma = mm->mmap_cache;
+       if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+               struct rb_node *rb_node;
+
+               rb_node = mm->mm_rb.rb_node;
+               vma = NULL;
+
+               while (rb_node) {
+                       struct vm_area_struct *vma_tmp;
+
+                       vma_tmp = rb_entry(rb_node,
+                                          struct vm_area_struct, vm_rb);
+
+                       if (vma_tmp->vm_end > addr) {
+                               vma = vma_tmp;
+                               if (vma_tmp->vm_start <= addr)
+                                       break;
+                               rb_node = rb_node->rb_left;
+                       } else
+                               rb_node = rb_node->rb_right;
                }
+               if (vma)
+                       mm->mmap_cache = vma;
        }
        return vma;
 }
 
 EXPORT_SYMBOL(find_vma);
 
-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
+/*
+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
+ */
 struct vm_area_struct *
 find_vma_prev(struct mm_struct *mm, unsigned long addr,
                        struct vm_area_struct **pprev)
 {
-       struct vm_area_struct *vma = NULL, *prev = NULL;
-       struct rb_node *rb_node;
-       if (!mm)
-               goto out;
-
-       /* Guard against addr being lower than the first VMA */
-       vma = mm->mmap;
-
-       /* Go through the RB tree quickly. */
-       rb_node = mm->mm_rb.rb_node;
-
-       while (rb_node) {
-               struct vm_area_struct *vma_tmp;
-               vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+       struct vm_area_struct *vma;
 
-               if (addr < vma_tmp->vm_end) {
-                       rb_node = rb_node->rb_left;
-               } else {
-                       prev = vma_tmp;
-                       if (!prev->vm_next || (addr < prev->vm_next->vm_end))
-                               break;
+       vma = find_vma(mm, addr);
+       if (vma) {
+               *pprev = vma->vm_prev;
+       } else {
+               struct rb_node *rb_node = mm->mm_rb.rb_node;
+               *pprev = NULL;
+               while (rb_node) {
+                       *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
                        rb_node = rb_node->rb_right;
                }
        }
-
-out:
-       *pprev = prev;
-       return prev ? prev->vm_next : vma;
+       return vma;
 }
 
 /*
@@ -1719,7 +2034,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
                return -ENOMEM;
 
        /* Ok, everything looks good - let it rip */
-       mm->total_vm += grow;
        if (vma->vm_flags & VM_LOCKED)
                mm->locked_vm += grow;
        vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
@@ -1767,14 +2081,38 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                size = address - vma->vm_start;
                grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
-               error = acct_stack_growth(vma, size, grow);
-               if (!error) {
-                       vma->vm_end = address;
-                       perf_event_mmap(vma);
+               error = -ENOMEM;
+               if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               /*
+                                * vma_gap_update() doesn't support concurrent
+                                * updates, but we only hold a shared mmap_sem
+                                * lock here, so we need to protect against
+                                * concurrent vma expansions.
+                                * vma_lock_anon_vma() doesn't help here, as
+                                * we don't guarantee that all growable vmas
+                                * in a mm share the same root anon vma.
+                                * So, we reuse mm->page_table_lock to guard
+                                * against concurrent vma expansions.
+                                */
+                               spin_lock(&vma->vm_mm->page_table_lock);
+                               anon_vma_interval_tree_pre_update_vma(vma);
+                               vma->vm_end = address;
+                               anon_vma_interval_tree_post_update_vma(vma);
+                               if (vma->vm_next)
+                                       vma_gap_update(vma->vm_next);
+                               else
+                                       vma->vm_mm->highest_vm_end = address;
+                               spin_unlock(&vma->vm_mm->page_table_lock);
+
+                               perf_event_mmap(vma);
+                       }
                }
        }
        vma_unlock_anon_vma(vma);
        khugepaged_enter_vma_merge(vma);
+       validate_mm(vma->vm_mm);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1782,7 +2120,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 /*
  * vma is the first one with address < vma->vm_start.  Have to extend vma.
  */
-static int expand_downwards(struct vm_area_struct *vma,
+int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
        int error;
@@ -1795,7 +2133,7 @@ static int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
+       error = security_mmap_addr(address);
        if (error)
                return error;
 
@@ -1818,22 +2156,35 @@ static int expand_downwards(struct vm_area_struct *vma,
                if (grow <= vma->vm_pgoff) {
                        error = acct_stack_growth(vma, size, grow);
                        if (!error) {
+                               /*
+                                * vma_gap_update() doesn't support concurrent
+                                * updates, but we only hold a shared mmap_sem
+                                * lock here, so we need to protect against
+                                * concurrent vma expansions.
+                                * vma_lock_anon_vma() doesn't help here, as
+                                * we don't guarantee that all growable vmas
+                                * in a mm share the same root anon vma.
+                                * So, we reuse mm->page_table_lock to guard
+                                * against concurrent vma expansions.
+                                */
+                               spin_lock(&vma->vm_mm->page_table_lock);
+                               anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_start = address;
                                vma->vm_pgoff -= grow;
+                               anon_vma_interval_tree_post_update_vma(vma);
+                               vma_gap_update(vma);
+                               spin_unlock(&vma->vm_mm->page_table_lock);
+
                                perf_event_mmap(vma);
                        }
                }
        }
        vma_unlock_anon_vma(vma);
        khugepaged_enter_vma_merge(vma);
+       validate_mm(vma->vm_mm);
        return error;
 }
 
-int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
-{
-       return expand_downwards(vma, address);
-}
-
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
@@ -1851,9 +2202,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!prev || expand_stack(prev, addr))
                return NULL;
-       if (prev->vm_flags & VM_LOCKED) {
-               mlock_vma_pages_range(prev, addr, prev->vm_end);
-       }
+       if (prev->vm_flags & VM_LOCKED)
+               __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
        return prev;
 }
 #else
@@ -1879,9 +2229,8 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
-       if (vma->vm_flags & VM_LOCKED) {
-               mlock_vma_pages_range(vma, addr, start);
-       }
+       if (vma->vm_flags & VM_LOCKED)
+               __mlock_vma_pages_range(vma, addr, start, NULL);
        return vma;
 }
 #endif
@@ -1894,15 +2243,19 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
  */
 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 {
+       unsigned long nr_accounted = 0;
+
        /* Update high watermark before we lower total_vm */
        update_hiwater_vm(mm);
        do {
                long nrpages = vma_pages(vma);
 
-               mm->total_vm -= nrpages;
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
+       vm_unacct_memory(nr_accounted);
        validate_mm(mm);
 }
 
@@ -1916,17 +2269,15 @@ static void unmap_region(struct mm_struct *mm,
                unsigned long start, unsigned long end)
 {
        struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
-       struct mmu_gather *tlb;
-       unsigned long nr_accounted = 0;
+       struct mmu_gather tlb;
 
        lru_add_drain();
-       tlb = tlb_gather_mmu(mm, 0);
+       tlb_gather_mmu(&tlb, mm, 0);
        update_hiwater_rss(mm);
-       unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
-       vm_unacct_memory(nr_accounted);
-       free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
-                                next? next->vm_start: 0);
-       tlb_finish_mmu(tlb, start, end);
+       unmap_vmas(&tlb, vma, start, end);
+       free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+                                next ? next->vm_start : 0);
+       tlb_finish_mmu(&tlb, start, end);
 }
 
 /*
@@ -1944,14 +2295,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
        vma->vm_prev = NULL;
        do {
-               rb_erase(&vma->vm_rb, &mm->mm_rb);
+               vma_rb_erase(vma, &mm->mm_rb);
                mm->map_count--;
                tail_vma = vma;
                vma = vma->vm_next;
        } while (vma && vma->vm_start < end);
        *insertion_point = vma;
-       if (vma)
+       if (vma) {
                vma->vm_prev = prev;
+               vma_gap_update(vma);
+       } else
+               mm->highest_vm_end = prev ? prev->vm_end : 0;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;
@@ -2002,11 +2356,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        if (anon_vma_clone(new, vma))
                goto out_free_mpol;
 
-       if (new->vm_file) {
+       if (new->vm_file)
                get_file(new->vm_file);
-               if (vma->vm_flags & VM_EXECUTABLE)
-                       added_exe_file_vma(mm);
-       }
 
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
@@ -2024,11 +2375,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
        /* Clean everything up if vma_adjust failed. */
        if (new->vm_ops && new->vm_ops->close)
                new->vm_ops->close(new);
-       if (new->vm_file) {
-               if (vma->vm_flags & VM_EXECUTABLE)
-                       removed_exe_file_vma(mm);
+       if (new->vm_file)
                fput(new->vm_file);
-       }
        unlink_anon_vmas(new);
  out_free_mpol:
        mpol_put(pol);
@@ -2068,9 +2416,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                return -EINVAL;
 
        /* Find the first overlapping VMA */
-       vma = find_vma_prev(mm, start, &prev);
+       vma = find_vma(mm, start);
        if (!vma)
                return 0;
+       prev = vma->vm_prev;
        /* we have  start < vma->vm_end  */
 
        /* if it doesn't overlap, we have nothing.. */
@@ -2137,20 +2486,23 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        return 0;
 }
 
-EXPORT_SYMBOL(do_munmap);
-
-SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+int vm_munmap(unsigned long start, size_t len)
 {
        int ret;
        struct mm_struct *mm = current->mm;
 
-       profile_munmap(addr);
-
        down_write(&mm->mmap_sem);
-       ret = do_munmap(mm, addr, len);
+       ret = do_munmap(mm, start, len);
        up_write(&mm->mmap_sem);
        return ret;
 }
+EXPORT_SYMBOL(vm_munmap);
+
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+{
+       profile_munmap(addr);
+       return vm_munmap(addr, len);
+}
 
 static inline void verify_mm_writelocked(struct mm_struct *mm)
 {
@@ -2167,7 +2519,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-unsigned long do_brk(unsigned long addr, unsigned long len)
+static unsigned long do_brk(unsigned long addr, unsigned long len)
 {
        struct mm_struct * mm = current->mm;
        struct vm_area_struct * vma, * prev;
@@ -2180,10 +2532,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (!len)
                return addr;
 
-       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
-       if (error)
-               return error;
-
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2213,8 +2561,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
         * Clear old maps.  this also does some error checking for us
         */
  munmap_back:
-       vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-       if (vma && vma->vm_start < addr + len) {
+       if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
                if (do_munmap(mm, addr, len))
                        return -ENOMEM;
                goto munmap_back;
@@ -2227,7 +2574,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (mm->map_count > sysctl_max_map_count)
                return -ENOMEM;
 
-       if (security_vm_enough_memory(len >> PAGE_SHIFT))
+       if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        /* Can we just expand an old private anonymous mapping? */
@@ -2256,22 +2603,33 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
 out:
        perf_event_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
-       if (flags & VM_LOCKED) {
-               if (!mlock_vma_pages_range(vma, addr, addr + len))
-                       mm->locked_vm += (len >> PAGE_SHIFT);
-       }
+       if (flags & VM_LOCKED)
+               mm->locked_vm += (len >> PAGE_SHIFT);
        return addr;
 }
 
-EXPORT_SYMBOL(do_brk);
+unsigned long vm_brk(unsigned long addr, unsigned long len)
+{
+       struct mm_struct *mm = current->mm;
+       unsigned long ret;
+       bool populate;
+
+       down_write(&mm->mmap_sem);
+       ret = do_brk(addr, len);
+       populate = ((mm->def_flags & VM_LOCKED) != 0);
+       up_write(&mm->mmap_sem);
+       if (populate)
+               mm_populate(addr, len);
+       return ret;
+}
+EXPORT_SYMBOL(vm_brk);
 
 /* Release all mmaps. */
 void exit_mmap(struct mm_struct *mm)
 {
-       struct mmu_gather *tlb;
+       struct mmu_gather tlb;
        struct vm_area_struct *vma;
        unsigned long nr_accounted = 0;
-       unsigned long end;
 
        /* mm's last user has gone, and its about to be pulled down */
        mmu_notifier_release(mm);
@@ -2293,33 +2651,36 @@ void exit_mmap(struct mm_struct *mm)
 
        lru_add_drain();
        flush_cache_mm(mm);
-       tlb = tlb_gather_mmu(mm, 1);
+       tlb_gather_mmu(&tlb, mm, 1);
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
-       end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
-       vm_unacct_memory(nr_accounted);
+       unmap_vmas(&tlb, vma, 0, -1);
 
-       free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
-       tlb_finish_mmu(tlb, 0, end);
+       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+       tlb_finish_mmu(&tlb, 0, -1);
 
        /*
         * Walk the list again, actually closing and freeing it,
         * with preemption enabled, without holding any MM locks.
         */
-       while (vma)
+       while (vma) {
+               if (vma->vm_flags & VM_ACCOUNT)
+                       nr_accounted += vma_pages(vma);
                vma = remove_vma(vma);
+       }
+       vm_unacct_memory(nr_accounted);
 
-       BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+       WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
 }
 
 /* Insert vm structure into process list sorted by address
  * and into the inode's i_mmap tree.  If vm_file is non-NULL
- * then i_mmap_lock is taken here.
+ * then i_mmap_mutex is taken here.
  */
-int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct * __vma, * prev;
-       struct rb_node ** rb_link, * rb_parent;
+       struct vm_area_struct *prev;
+       struct rb_node **rb_link, *rb_parent;
 
        /*
         * The vm_pgoff of a purely anonymous vma should be irrelevant
@@ -2337,12 +2698,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
                BUG_ON(vma->anon_vma);
                vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
        }
-       __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
-       if (__vma && __vma->vm_start < vma->vm_end)
+       if (find_vma_links(mm, vma->vm_start, vma->vm_end,
+                          &prev, &rb_link, &rb_parent))
                return -ENOMEM;
        if ((vma->vm_flags & VM_ACCOUNT) &&
             security_vm_enough_memory_mm(mm, vma_pages(vma)))
                return -ENOMEM;
+
        vma_link(mm, vma, prev, rb_link, rb_parent);
        return 0;
 }
@@ -2352,7 +2714,8 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  * prior to moving page table entries, to effect an mremap move.
  */
 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
-       unsigned long addr, unsigned long len, pgoff_t pgoff)
+       unsigned long addr, unsigned long len, pgoff_t pgoff,
+       bool *need_rmap_locks)
 {
        struct vm_area_struct *vma = *vmap;
        unsigned long vma_start = vma->vm_start;
@@ -2360,46 +2723,63 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
        struct vm_area_struct *new_vma, *prev;
        struct rb_node **rb_link, *rb_parent;
        struct mempolicy *pol;
+       bool faulted_in_anon_vma = true;
 
        /*
         * If anonymous vma has not yet been faulted, update new pgoff
         * to match new location, to increase its chance of merging.
         */
-       if (!vma->vm_file && !vma->anon_vma)
+       if (unlikely(!vma->vm_file && !vma->anon_vma)) {
                pgoff = addr >> PAGE_SHIFT;
+               faulted_in_anon_vma = false;
+       }
 
-       find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
+       if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
+               return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
                        vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
                 */
-               if (vma_start >= new_vma->vm_start &&
-                   vma_start < new_vma->vm_end)
-                       *vmap = new_vma;
+               if (unlikely(vma_start >= new_vma->vm_start &&
+                            vma_start < new_vma->vm_end)) {
+                       /*
+                        * The only way we can get a vma_merge with
+                        * self during an mremap is if the vma hasn't
+                        * been faulted in yet and we were allowed to
+                        * reset the dst vma->vm_pgoff to the
+                        * destination address of the mremap to allow
+                        * the merge to happen. mremap must change the
+                        * vm_pgoff linearity between src and dst vmas
+                        * (in turn preventing a vma_merge) to be
+                        * safe. It is only safe to keep the vm_pgoff
+                        * linear if there are no pages mapped yet.
+                        */
+                       VM_BUG_ON(faulted_in_anon_vma);
+                       *vmap = vma = new_vma;
+               }
+               *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
        } else {
                new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
+                       new_vma->vm_start = addr;
+                       new_vma->vm_end = addr + len;
+                       new_vma->vm_pgoff = pgoff;
                        pol = mpol_dup(vma_policy(vma));
                        if (IS_ERR(pol))
                                goto out_free_vma;
+                       vma_set_policy(new_vma, pol);
                        INIT_LIST_HEAD(&new_vma->anon_vma_chain);
                        if (anon_vma_clone(new_vma, vma))
                                goto out_free_mempol;
-                       vma_set_policy(new_vma, pol);
-                       new_vma->vm_start = addr;
-                       new_vma->vm_end = addr + len;
-                       new_vma->vm_pgoff = pgoff;
-                       if (new_vma->vm_file) {
+                       if (new_vma->vm_file)
                                get_file(new_vma->vm_file);
-                               if (vma->vm_flags & VM_EXECUTABLE)
-                                       added_exe_file_vma(mm);
-                       }
                        if (new_vma->vm_ops && new_vma->vm_ops->open)
                                new_vma->vm_ops->open(new_vma);
                        vma_link(mm, new_vma, prev, rb_link, rb_parent);
+                       *need_rmap_locks = false;
                }
        }
        return new_vma;
@@ -2498,10 +2878,6 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_ops = &special_mapping_vmops;
        vma->vm_private_data = pages;
 
-       ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
-       if (ret)
-               goto out;
-
        ret = insert_vm_struct(mm, vma);
        if (ret)
                goto out;
@@ -2521,23 +2897,23 @@ static DEFINE_MUTEX(mm_all_locks_mutex);
 
 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 {
-       if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
+       if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
                /*
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem);
+               down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
-                * anon_vma->root->lock. If some other vma in this mm shares
+                * anon_vma->root->rwsem. If some other vma in this mm shares
                 * the same anon_vma we won't take it again.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us thanks to the
-                * anon_vma->root->lock.
+                * anon_vma->root->rwsem.
                 */
                if (__test_and_set_bit(0, (unsigned long *)
-                                      &anon_vma->root->head.next))
+                                      &anon_vma->root->rb_root.rb_node))
                        BUG();
        }
 }
@@ -2556,7 +2932,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
                 */
                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
                        BUG();
-               spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
+               mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
        }
 }
 
@@ -2578,12 +2954,12 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * A single task can't take more than one mm_take_all_locks() in a row
  * or it would deadlock.
  *
- * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
+ * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
  * mapping->flags avoid to take the same lock twice, if more than one
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
@@ -2596,7 +2972,6 @@ int mm_take_all_locks(struct mm_struct *mm)
 {
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
-       int ret = -EINTR;
 
        BUG_ON(down_read_trylock(&mm->mmap_sem));
 
@@ -2617,32 +2992,30 @@ int mm_take_all_locks(struct mm_struct *mm)
                                vm_lock_anon_vma(mm, avc->anon_vma);
        }
 
-       ret = 0;
+       return 0;
 
 out_unlock:
-       if (ret)
-               mm_drop_all_locks(mm);
-
-       return ret;
+       mm_drop_all_locks(mm);
+       return -EINTR;
 }
 
 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
 {
-       if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
+       if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
                /*
                 * The LSB of head.next can't change to 0 from under
                 * us because we hold the mm_all_locks_mutex.
                 *
                 * We must however clear the bitflag before unlocking
-                * the vma so the users using the anon_vma->head will
+                * the vma so the users using the anon_vma->rb_root will
                 * never see our bitflag.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us until we release the
-                * anon_vma->root->lock.
+                * anon_vma->root->rwsem.
                 */
                if (!__test_and_clear_bit(0, (unsigned long *)
-                                         &anon_vma->root->head.next))
+                                         &anon_vma->root->rb_root.rb_node))
                        BUG();
                anon_vma_unlock(anon_vma);
        }
@@ -2655,7 +3028,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
                 * AS_MM_ALL_LOCKS can't change to 0 from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_unlock(&mapping->i_mmap_lock);
+               mutex_unlock(&mapping->i_mmap_mutex);
                if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
                                        &mapping->flags))
                        BUG();