mm: don't access vm_flags as 'int'
[linux-2.6.git] / mm / memory.c
index 4b5200f..fc24f7d 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/kallsyms.h>
 #include <linux/swapops.h>
 #include <linux/elf.h>
+#include <linux/gfp.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -108,6 +109,286 @@ static int __init disable_randmaps(char *s)
 }
 __setup("norandmaps", disable_randmaps);
 
+unsigned long zero_pfn __read_mostly;
+unsigned long highest_memmap_pfn __read_mostly;
+
+/*
+ * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+ */
+static int __init init_zero_pfn(void)
+{
+       zero_pfn = page_to_pfn(ZERO_PAGE(0));
+       return 0;
+}
+core_initcall(init_zero_pfn);
+
+
+#if defined(SPLIT_RSS_COUNTING)
+
+static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
+{
+       int i;
+
+       for (i = 0; i < NR_MM_COUNTERS; i++) {
+               if (task->rss_stat.count[i]) {
+                       add_mm_counter(mm, i, task->rss_stat.count[i]);
+                       task->rss_stat.count[i] = 0;
+               }
+       }
+       task->rss_stat.events = 0;
+}
+
+static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
+{
+       struct task_struct *task = current;
+
+       if (likely(task->mm == mm))
+               task->rss_stat.count[member] += val;
+       else
+               add_mm_counter(mm, member, val);
+}
+#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
+#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
+
+/* sync counter once per 64 page faults */
+#define TASK_RSS_EVENTS_THRESH (64)
+static void check_sync_rss_stat(struct task_struct *task)
+{
+       if (unlikely(task != current))
+               return;
+       if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
+               __sync_task_rss_stat(task, task->mm);
+}
+
+unsigned long get_mm_counter(struct mm_struct *mm, int member)
+{
+       long val = 0;
+
+       /*
+        * Don't use task->mm here...for avoiding to use task_get_mm()..
+        * The caller must guarantee task->mm is not invalid.
+        */
+       val = atomic_long_read(&mm->rss_stat.count[member]);
+       /*
+        * counter is updated in asynchronous manner and may go to minus.
+        * But it's never be expected number for users.
+        */
+       if (val < 0)
+               return 0;
+       return (unsigned long)val;
+}
+
+void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
+{
+       __sync_task_rss_stat(task, mm);
+}
+#else /* SPLIT_RSS_COUNTING */
+
+#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
+#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
+
+static void check_sync_rss_stat(struct task_struct *task)
+{
+}
+
+#endif /* SPLIT_RSS_COUNTING */
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+
+static int tlb_next_batch(struct mmu_gather *tlb)
+{
+       struct mmu_gather_batch *batch;
+
+       batch = tlb->active;
+       if (batch->next) {
+               tlb->active = batch->next;
+               return 1;
+       }
+
+       batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+       if (!batch)
+               return 0;
+
+       batch->next = NULL;
+       batch->nr   = 0;
+       batch->max  = MAX_GATHER_BATCH;
+
+       tlb->active->next = batch;
+       tlb->active = batch;
+
+       return 1;
+}
+
+/* tlb_gather_mmu
+ *     Called to initialize an (on-stack) mmu_gather structure for page-table
+ *     tear-down from @mm. The @fullmm argument is used when @mm is without
+ *     users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+{
+       tlb->mm = mm;
+
+       tlb->fullmm     = fullmm;
+       tlb->need_flush = 0;
+       tlb->fast_mode  = (num_possible_cpus() == 1);
+       tlb->local.next = NULL;
+       tlb->local.nr   = 0;
+       tlb->local.max  = ARRAY_SIZE(tlb->__pages);
+       tlb->active     = &tlb->local;
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb->batch = NULL;
+#endif
+}
+
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       struct mmu_gather_batch *batch;
+
+       if (!tlb->need_flush)
+               return;
+       tlb->need_flush = 0;
+       tlb_flush(tlb);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb_table_flush(tlb);
+#endif
+
+       if (tlb_fast_mode(tlb))
+               return;
+
+       for (batch = &tlb->local; batch; batch = batch->next) {
+               free_pages_and_swap_cache(batch->pages, batch->nr);
+               batch->nr = 0;
+       }
+       tlb->active = &tlb->local;
+}
+
+/* tlb_finish_mmu
+ *     Called at the end of the shootdown operation to free up any resources
+ *     that were required.
+ */
+void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+       struct mmu_gather_batch *batch, *next;
+
+       tlb_flush_mmu(tlb);
+
+       /* keep the page table cache within bounds */
+       check_pgt_cache();
+
+       for (batch = tlb->local.next; batch; batch = next) {
+               next = batch->next;
+               free_pages((unsigned long)batch, 0);
+       }
+       tlb->local.next = NULL;
+}
+
+/* __tlb_remove_page
+ *     Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ *     handling the additional races in SMP caused by other CPUs caching valid
+ *     mappings in their TLBs. Returns the number of free page slots left.
+ *     When out of page slots we must call tlb_flush_mmu().
+ */
+int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+       struct mmu_gather_batch *batch;
+
+       tlb->need_flush = 1;
+
+       if (tlb_fast_mode(tlb)) {
+               free_page_and_swap_cache(page);
+               return 1; /* avoid calling tlb_flush_mmu() */
+       }
+
+       batch = tlb->active;
+       batch->pages[batch->nr++] = page;
+       if (batch->nr == batch->max) {
+               if (!tlb_next_batch(tlb))
+                       return 0;
+       }
+       VM_BUG_ON(batch->nr > batch->max);
+
+       return batch->max - batch->nr;
+}
+
+#endif /* HAVE_GENERIC_MMU_GATHER */
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+/*
+ * See the comment near struct mmu_table_batch.
+ */
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+       /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+       /*
+        * This isn't an RCU grace period and hence the page-tables cannot be
+        * assumed to be actually RCU-freed.
+        *
+        * It is however sufficient for software page-table walkers that rely on
+        * IRQ disabling. See the comment near struct mmu_table_batch.
+        */
+       smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+       __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+       struct mmu_table_batch *batch;
+       int i;
+
+       batch = container_of(head, struct mmu_table_batch, rcu);
+
+       for (i = 0; i < batch->nr; i++)
+               __tlb_remove_table(batch->tables[i]);
+
+       free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+       struct mmu_table_batch **batch = &tlb->batch;
+
+       if (*batch) {
+               call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+               *batch = NULL;
+       }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+       struct mmu_table_batch **batch = &tlb->batch;
+
+       tlb->need_flush = 1;
+
+       /*
+        * When there's less then two users of this mm there cannot be a
+        * concurrent page-table walk.
+        */
+       if (atomic_read(&tlb->mm->mm_users) < 2) {
+               __tlb_remove_table(table);
+               return;
+       }
+
+       if (*batch == NULL) {
+               *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+               if (*batch == NULL) {
+                       tlb_remove_table_one(table);
+                       return;
+               }
+               (*batch)->nr = 0;
+       }
+       (*batch)->tables[(*batch)->nr++] = table;
+       if ((*batch)->nr == MAX_TABLE_BATCH)
+               tlb_table_flush(tlb);
+}
+
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
 /*
  * If a p?d_bad entry is found while walking page tables, report
@@ -223,7 +504,6 @@ void free_pgd_range(struct mmu_gather *tlb,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long start;
 
        /*
         * The next few lines have given us lots of grief...
@@ -267,7 +547,6 @@ void free_pgd_range(struct mmu_gather *tlb,
        if (addr > end - 1)
                return;
 
-       start = addr;
        pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -285,9 +564,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long addr = vma->vm_start;
 
                /*
-                * Hide vma from rmap and vmtruncate before freeing pgtables
+                * Hide vma from rmap and truncate_pagecache before freeing
+                * pgtables
                 */
-               anon_vma_unlink(vma);
+               unlink_anon_vmas(vma);
                unlink_file_vma(vma);
 
                if (is_vm_hugetlb_page(vma)) {
@@ -301,7 +581,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                               && !is_vm_hugetlb_page(next)) {
                                vma = next;
                                next = vma->vm_next;
-                               anon_vma_unlink(vma);
+                               unlink_anon_vmas(vma);
                                unlink_file_vma(vma);
                        }
                        free_pgd_range(tlb, addr, vma->vm_end,
@@ -311,9 +591,11 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
        }
 }
 
-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+               pmd_t *pmd, unsigned long address)
 {
        pgtable_t new = pte_alloc_one(mm, address);
+       int wait_split_huge_page;
        if (!new)
                return -ENOMEM;
 
@@ -333,14 +615,18 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
        smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 
        spin_lock(&mm->page_table_lock);
-       if (!pmd_present(*pmd)) {       /* Has another populated it ? */
+       wait_split_huge_page = 0;
+       if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
                mm->nr_ptes++;
                pmd_populate(mm, pmd, new);
                new = NULL;
-       }
+       } else if (unlikely(pmd_trans_splitting(*pmd)))
+               wait_split_huge_page = 1;
        spin_unlock(&mm->page_table_lock);
        if (new)
                pte_free(mm, new);
+       if (wait_split_huge_page)
+               wait_split_huge_page(vma->anon_vma, pmd);
        return 0;
 }
 
@@ -353,22 +639,31 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
        smp_wmb(); /* See comment in __pte_alloc */
 
        spin_lock(&init_mm.page_table_lock);
-       if (!pmd_present(*pmd)) {       /* Has another populated it ? */
+       if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
                pmd_populate_kernel(&init_mm, pmd, new);
                new = NULL;
-       }
+       } else
+               VM_BUG_ON(pmd_trans_splitting(*pmd));
        spin_unlock(&init_mm.page_table_lock);
        if (new)
                pte_free_kernel(&init_mm, new);
        return 0;
 }
 
-static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
+static inline void init_rss_vec(int *rss)
+{
+       memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
+}
+
+static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
 {
-       if (file_rss)
-               add_mm_counter(mm, file_rss, file_rss);
-       if (anon_rss)
-               add_mm_counter(mm, anon_rss, anon_rss);
+       int i;
+
+       if (current->mm == mm)
+               sync_mm_rss(current, mm);
+       for (i = 0; i < NR_MM_COUNTERS; i++)
+               if (rss[i])
+                       add_mm_counter(mm, i, rss[i]);
 }
 
 /*
@@ -417,12 +712,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
                current->comm,
                (long long)pte_val(pte), (long long)pmd_val(*pmd));
-       if (page) {
-               printk(KERN_ALERT
-               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
-               page, (void *)page->flags, page_count(page),
-               page_mapcount(page), page->mapping, page->index);
-       }
+       if (page)
+               dump_page(page);
        printk(KERN_ALERT
                "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
                (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
@@ -439,11 +730,25 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
        add_taint(TAINT_BAD_PAGE);
 }
 
-static inline int is_cow_mapping(unsigned int flags)
+static inline int is_cow_mapping(vm_flags_t flags)
 {
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+#ifndef is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+       return pfn == zero_pfn;
+}
+#endif
+
+#ifndef my_zero_pfn
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+       return zero_pfn;
+}
+#endif
+
 /*
  * vm_normal_page -- This function gets the "struct page" associated with a pte.
  *
@@ -499,7 +804,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
        if (HAVE_PTE_SPECIAL) {
                if (likely(!pte_special(pte)))
                        goto check_pfn;
-               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+               if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+                       return NULL;
+               if (!is_zero_pfn(pfn))
                        print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
@@ -521,6 +828,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
+       if (is_zero_pfn(pfn))
+               return NULL;
 check_pfn:
        if (unlikely(pfn > highest_memmap_pfn)) {
                print_bad_pte(vma, addr, pte, NULL);
@@ -541,7 +850,7 @@ out:
  * covered by this vma.
  */
 
-static inline void
+static inline unsigned long
 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
                unsigned long addr, int *rss)
@@ -555,7 +864,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       swap_duplicate(entry);
+                       if (swap_duplicate(entry) < 0)
+                               return entry.val;
+
                        /* make sure dst_mm is on swapoff's mmlist. */
                        if (unlikely(list_empty(&dst_mm->mmlist))) {
                                spin_lock(&mmlist_lock);
@@ -564,7 +875,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                                 &src_mm->mmlist);
                                spin_unlock(&mmlist_lock);
                        }
-                       if (is_write_migration_entry(entry) &&
+                       if (likely(!non_swap_entry(entry)))
+                               rss[MM_SWAPENTS]++;
+                       else if (is_write_migration_entry(entry) &&
                                        is_cow_mapping(vm_flags)) {
                                /*
                                 * COW mappings require pages in both parent
@@ -599,30 +912,39 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (page) {
                get_page(page);
                page_dup_rmap(page);
-               rss[PageAnon(page)]++;
+               if (PageAnon(page))
+                       rss[MM_ANONPAGES]++;
+               else
+                       rss[MM_FILEPAGES]++;
        }
 
 out_set_pte:
        set_pte_at(dst_mm, addr, dst_pte, pte);
+       return 0;
 }
 
-static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
-               pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
-               unsigned long addr, unsigned long end)
+int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+                  pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
+                  unsigned long addr, unsigned long end)
 {
+       pte_t *orig_src_pte, *orig_dst_pte;
        pte_t *src_pte, *dst_pte;
        spinlock_t *src_ptl, *dst_ptl;
        int progress = 0;
-       int rss[2];
+       int rss[NR_MM_COUNTERS];
+       swp_entry_t entry = (swp_entry_t){0};
 
 again:
-       rss[1] = rss[0] = 0;
+       init_rss_vec(rss);
+
        dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
        if (!dst_pte)
                return -ENOMEM;
-       src_pte = pte_offset_map_nested(src_pmd, addr);
+       src_pte = pte_offset_map(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+       orig_src_pte = src_pte;
+       orig_dst_pte = dst_pte;
        arch_enter_lazy_mmu_mode();
 
        do {
@@ -640,16 +962,25 @@ again:
                        progress++;
                        continue;
                }
-               copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
+               entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
+                                                       vma, addr, rss);
+               if (entry.val)
+                       break;
                progress += 8;
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
 
        arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
-       pte_unmap_nested(src_pte - 1);
-       add_mm_rss(dst_mm, rss[0], rss[1]);
-       pte_unmap_unlock(dst_pte - 1, dst_ptl);
+       pte_unmap(orig_src_pte);
+       add_mm_rss_vec(dst_mm, rss);
+       pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
+
+       if (entry.val) {
+               if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
+                       return -ENOMEM;
+               progress = 0;
+       }
        if (addr != end)
                goto again;
        return 0;
@@ -668,6 +999,17 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
        src_pmd = pmd_offset(src_pud, addr);
        do {
                next = pmd_addr_end(addr, end);
+               if (pmd_trans_huge(*src_pmd)) {
+                       int err;
+                       VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
+                       err = copy_huge_pmd(dst_mm, src_mm,
+                                           dst_pmd, src_pmd, addr, vma);
+                       if (err == -ENOMEM)
+                               return -ENOMEM;
+                       if (!err)
+                               continue;
+                       /* fall through */
+               }
                if (pmd_none_or_clear_bad(src_pmd))
                        continue;
                if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
@@ -764,25 +1106,24 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        struct mm_struct *mm = tlb->mm;
-       pte_t *pte;
+       int force_flush = 0;
+       int rss[NR_MM_COUNTERS];
        spinlock_t *ptl;
-       int file_rss = 0;
-       int anon_rss = 0;
+       pte_t *pte;
 
+again:
+       init_rss_vec(rss);
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
        do {
                pte_t ptent = *pte;
                if (pte_none(ptent)) {
-                       (*zap_work)--;
                        continue;
                }
 
-               (*zap_work) -= PAGE_SIZE;
-
                if (pte_present(ptent)) {
                        struct page *page;
 
@@ -816,19 +1157,21 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                set_pte_at(mm, addr, pte,
                                           pgoff_to_pte(page->index));
                        if (PageAnon(page))
-                               anon_rss--;
+                               rss[MM_ANONPAGES]--;
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
                                if (pte_young(ptent) &&
                                    likely(!VM_SequentialReadHint(vma)))
                                        mark_page_accessed(page);
-                               file_rss--;
+                               rss[MM_FILEPAGES]--;
                        }
                        page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
                                print_bad_pte(vma, addr, ptent, page);
-                       tlb_remove_page(tlb, page);
+                       force_flush = !__tlb_remove_page(tlb, page);
+                       if (force_flush)
+                               break;
                        continue;
                }
                /*
@@ -840,23 +1183,40 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                if (pte_file(ptent)) {
                        if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
                                print_bad_pte(vma, addr, ptent, NULL);
-               } else if
-                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
-                       print_bad_pte(vma, addr, ptent, NULL);
+               } else {
+                       swp_entry_t entry = pte_to_swp_entry(ptent);
+
+                       if (!non_swap_entry(entry))
+                               rss[MM_SWAPENTS]--;
+                       if (unlikely(!free_swap_and_cache(entry)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               }
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
-       } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
+       } while (pte++, addr += PAGE_SIZE, addr != end);
 
-       add_mm_rss(mm, file_rss, anon_rss);
+       add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
+       /*
+        * mmu_gather ran out of room to batch pages, we break out of
+        * the PTE lock to avoid doing the potential expensive TLB invalidate
+        * and page-free while holding it.
+        */
+       if (force_flush) {
+               force_flush = 0;
+               tlb_flush_mmu(tlb);
+               if (addr != end)
+                       goto again;
+       }
+
        return addr;
 }
 
 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pud_t *pud,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -864,13 +1224,19 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (pmd_none_or_clear_bad(pmd)) {
-                       (*zap_work)--;
-                       continue;
+               if (pmd_trans_huge(*pmd)) {
+                       if (next-addr != HPAGE_PMD_SIZE) {
+                               VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
+                               split_huge_page_pmd(vma->vm_mm, pmd);
+                       } else if (zap_huge_pmd(tlb, vma, pmd))
+                               continue;
+                       /* fall through */
                }
-               next = zap_pte_range(tlb, vma, pmd, addr, next,
-                                               zap_work, details);
-       } while (pmd++, addr = next, (addr != end && *zap_work > 0));
+               if (pmd_none_or_clear_bad(pmd))
+                       continue;
+               next = zap_pte_range(tlb, vma, pmd, addr, next, details);
+               cond_resched();
+       } while (pmd++, addr = next, addr != end);
 
        return addr;
 }
@@ -878,7 +1244,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pgd_t *pgd,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        pud_t *pud;
        unsigned long next;
@@ -886,13 +1252,10 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud)) {
-                       (*zap_work)--;
+               if (pud_none_or_clear_bad(pud))
                        continue;
-               }
-               next = zap_pmd_range(tlb, vma, pud, addr, next,
-                                               zap_work, details);
-       } while (pud++, addr = next, (addr != end && *zap_work > 0));
+               next = zap_pmd_range(tlb, vma, pud, addr, next, details);
+       } while (pud++, addr = next, addr != end);
 
        return addr;
 }
@@ -900,7 +1263,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
 static unsigned long unmap_page_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        pgd_t *pgd;
        unsigned long next;
@@ -909,18 +1272,17 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                details = NULL;
 
        BUG_ON(addr >= end);
+       mem_cgroup_uncharge_start();
        tlb_start_vma(tlb, vma);
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd)) {
-                       (*zap_work)--;
+               if (pgd_none_or_clear_bad(pgd))
                        continue;
-               }
-               next = zap_pud_range(tlb, vma, pgd, addr, next,
-                                               zap_work, details);
-       } while (pgd++, addr = next, (addr != end && *zap_work > 0));
+               next = zap_pud_range(tlb, vma, pgd, addr, next, details);
+       } while (pgd++, addr = next, addr != end);
        tlb_end_vma(tlb, vma);
+       mem_cgroup_uncharge_end();
 
        return addr;
 }
@@ -958,17 +1320,12 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  * drops the lock and schedules.
  */
-unsigned long unmap_vmas(struct mmu_gather **tlbp,
+unsigned long unmap_vmas(struct mmu_gather *tlb,
                struct vm_area_struct *vma, unsigned long start_addr,
                unsigned long end_addr, unsigned long *nr_accounted,
                struct zap_details *details)
 {
-       long zap_work = ZAP_BLOCK_SIZE;
-       unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
-       int tlb_start_valid = 0;
        unsigned long start = start_addr;
-       spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
-       int fullmm = (*tlbp)->fullmm;
        struct mm_struct *mm = vma->vm_mm;
 
        mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
@@ -989,11 +1346,6 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        untrack_pfn_vma(vma, 0, 0);
 
                while (start != end) {
-                       if (!tlb_start_valid) {
-                               tlb_start = start;
-                               tlb_start_valid = 1;
-                       }
-
                        if (unlikely(is_vm_hugetlb_page(vma))) {
                                /*
                                 * It is undesirable to test vma->vm_file as it
@@ -1006,39 +1358,15 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                                 * Since no pte has actually been setup, it is
                                 * safe to do nothing in this case.
                                 */
-                               if (vma->vm_file) {
+                               if (vma->vm_file)
                                        unmap_hugepage_range(vma, start, end, NULL);
-                                       zap_work -= (end - start) /
-                                       pages_per_huge_page(hstate_vma(vma));
-                               }
 
                                start = end;
                        } else
-                               start = unmap_page_range(*tlbp, vma,
-                                               start, end, &zap_work, details);
-
-                       if (zap_work > 0) {
-                               BUG_ON(start != end);
-                               break;
-                       }
-
-                       tlb_finish_mmu(*tlbp, tlb_start, start);
-
-                       if (need_resched() ||
-                               (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
-                               if (i_mmap_lock) {
-                                       *tlbp = NULL;
-                                       goto out;
-                               }
-                               cond_resched();
-                       }
-
-                       *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
-                       tlb_start_valid = 0;
-                       zap_work = ZAP_BLOCK_SIZE;
+                               start = unmap_page_range(tlb, vma, start, end, details);
                }
        }
-out:
+
        mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
        return start;   /* which is now the end (or restart) address */
 }
@@ -1054,16 +1382,15 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
                unsigned long size, struct zap_details *details)
 {
        struct mm_struct *mm = vma->vm_mm;
-       struct mmu_gather *tlb;
+       struct mmu_gather tlb;
        unsigned long end = address + size;
        unsigned long nr_accounted = 0;
 
        lru_add_drain();
-       tlb = tlb_gather_mmu(mm, 0);
+       tlb_gather_mmu(&tlb, mm, 0);
        update_hiwater_rss(mm);
        end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
-       if (tlb)
-               tlb_finish_mmu(tlb, address, end);
+       tlb_finish_mmu(&tlb, address, end);
        return end;
 }
 
@@ -1090,8 +1417,17 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
-/*
- * Do a quick page-table lookup for a single page.
+/**
+ * follow_page - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
  */
 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                        unsigned int flags)
@@ -1118,7 +1454,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
        pud = pud_offset(pgd, address);
        if (pud_none(*pud))
                goto no_page_table;
-       if (pud_huge(*pud)) {
+       if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
                BUG_ON(flags & FOLL_GET);
                page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
                goto out;
@@ -1129,11 +1465,32 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd))
                goto no_page_table;
-       if (pmd_huge(*pmd)) {
+       if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
                BUG_ON(flags & FOLL_GET);
                page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
                goto out;
        }
+       if (pmd_trans_huge(*pmd)) {
+               if (flags & FOLL_SPLIT) {
+                       split_huge_page_pmd(mm, pmd);
+                       goto split_fallthrough;
+               }
+               spin_lock(&mm->page_table_lock);
+               if (likely(pmd_trans_huge(*pmd))) {
+                       if (unlikely(pmd_trans_splitting(*pmd))) {
+                               spin_unlock(&mm->page_table_lock);
+                               wait_split_huge_page(vma->anon_vma, pmd);
+                       } else {
+                               page = follow_trans_huge_pmd(mm, address,
+                                                            pmd, flags);
+                               spin_unlock(&mm->page_table_lock);
+                               goto out;
+                       }
+               } else
+                       spin_unlock(&mm->page_table_lock);
+               /* fall through */
+       }
+split_fallthrough:
        if (unlikely(pmd_bad(*pmd)))
                goto no_page_table;
 
@@ -1144,9 +1501,14 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
+
        page = vm_normal_page(vma, address, pte);
-       if (unlikely(!page))
-               goto bad_page;
+       if (unlikely(!page)) {
+               if ((flags & FOLL_DUMP) ||
+                   !is_zero_pfn(pte_pfn(pte)))
+                       goto bad_page;
+               page = pte_page(pte);
+       }
 
        if (flags & FOLL_GET)
                get_page(page);
@@ -1161,6 +1523,28 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                 */
                mark_page_accessed(page);
        }
+       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
+               /*
+                * The preliminary mapping check is mainly to avoid the
+                * pointless overhead of lock_page on the ZERO_PAGE
+                * which might bounce very badly if there is contention.
+                *
+                * If the page is already locked, we don't need to
+                * handle it now - vmscan will handle it later if and
+                * when it attempts to reclaim the page.
+                */
+               if (page->mapping && trylock_page(page)) {
+                       lru_add_drain();  /* push cached pages to LRU */
+                       /*
+                        * Because we lock page here and migration is
+                        * blocked by the pte's page reference, we need
+                        * only check for file-cache page truncation.
+                        */
+                       if (page->mapping)
+                               mlock_vma_page(page);
+                       unlock_page(page);
+               }
+       }
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out:
@@ -1174,75 +1558,113 @@ no_page:
        pte_unmap_unlock(ptep, ptl);
        if (!pte_none(pte))
                return page;
-       /* Fall through to ZERO_PAGE handling */
+
 no_page_table:
        /*
         * When core dumping an enormous anonymous area that nobody
-        * has touched so far, we don't want to allocate page tables.
+        * has touched so far, we don't want to allocate unnecessary pages or
+        * page tables.  Return error instead of NULL to skip handle_mm_fault,
+        * then get_dump_page() will return NULL to leave a hole in the dump.
+        * But we can only make this optimization where a hole would surely
+        * be zero-filled if handle_mm_fault() actually did handle it.
         */
-       if (flags & FOLL_ANON) {
-               page = ZERO_PAGE(0);
-               if (flags & FOLL_GET)
-                       get_page(page);
-               BUG_ON(flags & FOLL_WRITE);
-       }
+       if ((flags & FOLL_DUMP) &&
+           (!vma->vm_ops || !vma->vm_ops->fault))
+               return ERR_PTR(-EFAULT);
        return page;
 }
 
-/* Can we do the FOLL_ANON optimization? */
-static inline int use_zero_page(struct vm_area_struct *vma)
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       /*
-        * We don't want to optimize FOLL_ANON for make_pages_present()
-        * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
-        * we want to get the page from the page tables to make sure
-        * that we serialize and update with any other user of that
-        * mapping.
-        */
-       if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
-               return 0;
-       /*
-        * And if we have a fault routine, it's not an anonymous region.
-        */
-       return !vma->vm_ops || !vma->vm_ops->fault;
+       return stack_guard_page_start(vma, addr) ||
+              stack_guard_page_end(vma, addr+PAGE_SIZE);
 }
 
-
-
+/**
+ * __get_user_pages() - pin user pages in memory
+ * @tsk:       task_struct of target task
+ * @mm:                mm_struct of target mm
+ * @start:     starting user address
+ * @nr_pages:  number of pages from start to pin
+ * @gup_flags: flags modifying pin behaviour
+ * @pages:     array that receives pointers to the pages pinned.
+ *             Should be at least nr_pages long. Or NULL, if caller
+ *             only intends to ensure the pages are faulted in.
+ * @vmas:      array of pointers to vmas corresponding to each page.
+ *             Or NULL if the caller does not require them.
+ * @nonblocking: whether waiting for disk IO or mmap_sem contention
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno. Each page returned must be released
+ * with a put_page() call when it is finished with. vmas will only
+ * remain valid while mmap_sem is held.
+ *
+ * Must be called with mmap_sem held for read or write.
+ *
+ * __get_user_pages walks a process's page tables and takes a reference to
+ * each struct page that each user address corresponds to at a given
+ * instant. That is, it takes the page that would be accessed if a user
+ * thread accesses the given user virtual address at that instant.
+ *
+ * This does not guarantee that the page exists in the user mappings when
+ * __get_user_pages returns, and there may even be a completely different
+ * page there in some cases (eg. if mmapped pagecache has been invalidated
+ * and subsequently re faulted). However it does guarantee that the page
+ * won't be freed completely. And mostly callers simply care that the page
+ * contains data that was valid *at some point in time*. Typically, an IO
+ * or similar operation cannot guarantee anything stronger anyway because
+ * locks can't be held over the syscall boundary.
+ *
+ * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
+ * the page is written to, set_page_dirty (or set_page_dirty_lock, as
+ * appropriate) must be called after the page is finished with, and
+ * before put_page is called.
+ *
+ * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
+ * or mmap_sem contention, and if waiting is needed to pin all pages,
+ * *@nonblocking will be set to 0.
+ *
+ * In most cases, get_user_pages or get_user_pages_fast should be used
+ * instead of __get_user_pages. __get_user_pages should be used only if
+ * you need some special @gup_flags.
+ */
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int nr_pages, int flags,
-                    struct page **pages, struct vm_area_struct **vmas)
+                    unsigned long start, int nr_pages, unsigned int gup_flags,
+                    struct page **pages, struct vm_area_struct **vmas,
+                    int *nonblocking)
 {
        int i;
-       unsigned int vm_flags = 0;
-       int write = !!(flags & GUP_FLAGS_WRITE);
-       int force = !!(flags & GUP_FLAGS_FORCE);
+       unsigned long vm_flags;
 
        if (nr_pages <= 0)
                return 0;
+
+       VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
        /* 
         * Require read or write permissions.
-        * If 'force' is set, we only require the "MAY" flags.
+        * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
-       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       vm_flags  = (gup_flags & FOLL_WRITE) ?
+                       (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= (gup_flags & FOLL_FORCE) ?
+                       (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0;
 
        do {
                struct vm_area_struct *vma;
-               unsigned int foll_flags;
 
                vma = find_extend_vma(mm, start);
-               if (!vma && in_gate_area(tsk, start)) {
+               if (!vma && in_gate_area(mm, start)) {
                        unsigned long pg = start & PAGE_MASK;
-                       struct vm_area_struct *gate_vma = get_gate_vma(tsk);
                        pgd_t *pgd;
                        pud_t *pud;
                        pmd_t *pmd;
                        pte_t *pte;
 
                        /* user gate pages are read-only */
-                       if (write)
+                       if (gup_flags & FOLL_WRITE)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1254,24 +1676,31 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pmd = pmd_offset(pud, pg);
                        if (pmd_none(*pmd))
                                return i ? : -EFAULT;
+                       VM_BUG_ON(pmd_trans_huge(*pmd));
                        pte = pte_offset_map(pmd, pg);
                        if (pte_none(*pte)) {
                                pte_unmap(pte);
                                return i ? : -EFAULT;
                        }
+                       vma = get_gate_vma(mm);
                        if (pages) {
-                               struct page *page = vm_normal_page(gate_vma, start, *pte);
+                               struct page *page;
+
+                               page = vm_normal_page(vma, start, *pte);
+                               if (!page) {
+                                       if (!(gup_flags & FOLL_DUMP) &&
+                                            is_zero_pfn(pte_pfn(*pte)))
+                                               page = pte_page(*pte);
+                                       else {
+                                               pte_unmap(pte);
+                                               return i ? : -EFAULT;
+                                       }
+                               }
                                pages[i] = page;
-                               if (page)
-                                       get_page(page);
+                               get_page(page);
                        }
                        pte_unmap(pte);
-                       if (vmas)
-                               vmas[i] = gate_vma;
-                       i++;
-                       start += PAGE_SIZE;
-                       nr_pages--;
-                       continue;
+                       goto next_page;
                }
 
                if (!vma ||
@@ -1281,18 +1710,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &nr_pages, i, write);
+                                       &start, &nr_pages, i, gup_flags);
                        continue;
                }
 
-               foll_flags = FOLL_TOUCH;
-               if (pages)
-                       foll_flags |= FOLL_GET;
-               if (!write && use_zero_page(vma))
-                       foll_flags |= FOLL_ANON;
-
                do {
                        struct page *page;
+                       unsigned int foll_flags = gup_flags;
 
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
@@ -1301,28 +1725,55 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
 
-                       if (write)
-                               foll_flags |= FOLL_WRITE;
-
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
+                               unsigned int fault_flags = 0;
+
+                               /* For mlock, just skip the stack guard page. */
+                               if (foll_flags & FOLL_MLOCK) {
+                                       if (stack_guard_page(vma, start))
+                                               goto next_page;
+                               }
+                               if (foll_flags & FOLL_WRITE)
+                                       fault_flags |= FAULT_FLAG_WRITE;
+                               if (nonblocking)
+                                       fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+                               if (foll_flags & FOLL_NOWAIT)
+                                       fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
 
                                ret = handle_mm_fault(mm, vma, start,
-                                       (foll_flags & FOLL_WRITE) ?
-                                       FAULT_FLAG_WRITE : 0);
+                                                       fault_flags);
 
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
-                                       else if (ret & VM_FAULT_SIGBUS)
+                                       if (ret & (VM_FAULT_HWPOISON |
+                                                  VM_FAULT_HWPOISON_LARGE)) {
+                                               if (i)
+                                                       return i;
+                                               else if (gup_flags & FOLL_HWPOISON)
+                                                       return -EHWPOISON;
+                                               else
+                                                       return -EFAULT;
+                                       }
+                                       if (ret & VM_FAULT_SIGBUS)
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
-                               if (ret & VM_FAULT_MAJOR)
-                                       tsk->maj_flt++;
-                               else
-                                       tsk->min_flt++;
+
+                               if (tsk) {
+                                       if (ret & VM_FAULT_MAJOR)
+                                               tsk->maj_flt++;
+                                       else
+                                               tsk->min_flt++;
+                               }
+
+                               if (ret & VM_FAULT_RETRY) {
+                                       if (nonblocking)
+                                               *nonblocking = 0;
+                                       return i;
+                               }
 
                                /*
                                 * The VM_FAULT_WRITE bit tells us that
@@ -1350,6 +1801,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                flush_anon_page(vma, page, start);
                                flush_dcache_page(page);
                        }
+next_page:
                        if (vmas)
                                vmas[i] = vma;
                        i++;
@@ -1359,10 +1811,12 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        } while (nr_pages);
        return i;
 }
+EXPORT_SYMBOL(__get_user_pages);
 
 /**
  * get_user_pages() - pin user pages in memory
- * @tsk:       task_struct of target task
+ * @tsk:       the task_struct to use for page fault accounting, or
+ *             NULL if faults are not to be recorded.
  * @mm:                mm_struct of target mm
  * @start:     starting user address
  * @nr_pages:  number of pages from start to pin
@@ -1414,27 +1868,60 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int nr_pages, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
 {
-       int flags = 0;
+       int flags = FOLL_TOUCH;
 
+       if (pages)
+               flags |= FOLL_GET;
        if (write)
-               flags |= GUP_FLAGS_WRITE;
+               flags |= FOLL_WRITE;
        if (force)
-               flags |= GUP_FLAGS_FORCE;
+               flags |= FOLL_FORCE;
 
-       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+                               NULL);
 }
-
 EXPORT_SYMBOL(get_user_pages);
 
-pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by page_cache_release() or put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+       struct vm_area_struct *vma;
+       struct page *page;
+
+       if (__get_user_pages(current, current->mm, addr, 1,
+                            FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
+                            NULL) < 1)
+               return NULL;
+       flush_cache_page(vma, addr, page_to_pfn(page));
+       return page;
+}
+#endif /* CONFIG_ELF_CORE */
+
+pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
                        spinlock_t **ptl)
 {
        pgd_t * pgd = pgd_offset(mm, addr);
        pud_t * pud = pud_alloc(mm, pgd, addr);
        if (pud) {
                pmd_t * pmd = pmd_alloc(mm, pud, addr);
-               if (pmd)
+               if (pmd) {
+                       VM_BUG_ON(pmd_trans_huge(*pmd));
                        return pte_alloc_map_lock(mm, pmd, addr, ptl);
+               }
        }
        return NULL;
 }
@@ -1468,7 +1955,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
 
        /* Ok, finally just insert the thing.. */
        get_page(page);
-       inc_mm_counter(mm, file_rss);
+       inc_mm_counter_fast(mm, MM_FILEPAGES);
        page_add_file_rmap(page);
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
@@ -1534,7 +2021,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        /* Ok, finally just insert the thing.. */
        entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+       update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
@@ -1603,7 +2090,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * If we don't have pte special, then we have to use the pfn_valid()
         * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
         * refcount the page if pfn_valid is true (hence insert_page rather
-        * than insert_pfn).
+        * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
+        * without pte special, it would there be refcounted as a normal page.
         */
        if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
                struct page *page;
@@ -1652,6 +2140,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
        pmd = pmd_alloc(mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
+       VM_BUG_ON(pmd_trans_huge(*pmd));
        do {
                next = pmd_addr_end(addr, end);
                if (remap_pte_range(mm, pmd, addr, next,
@@ -1778,10 +2267,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
        token = pmd_pgtable(*pmd);
 
        do {
-               err = fn(pte, token, addr, data);
+               err = fn(pte++, token, addr, data);
                if (err)
                        break;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       } while (addr += PAGE_SIZE, addr != end);
 
        arch_leave_lazy_mmu_mode();
 
@@ -1841,11 +2330,10 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long start = addr, end = addr + size;
+       unsigned long end = addr + size;
        int err;
 
        BUG_ON(addr >= end);
-       mmu_notifier_invalidate_range_start(mm, start, end);
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1853,7 +2341,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       mmu_notifier_invalidate_range_end(mm, start, end);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1862,10 +2350,10 @@ EXPORT_SYMBOL_GPL(apply_to_page_range);
  * handle_pte_fault chooses page fault handler according to an entry
  * which was read non-atomically.  Before making any commitment, on
  * those architectures or configurations (e.g. i386 with PAE) which
- * might give a mix of unmatched parts, do_swap_page and do_file_page
+ * might give a mix of unmatched parts, do_swap_page and do_nonlinear_fault
  * must check under lock before unmapping the pte and proceeding
  * (but do_wp_page is only called after already making such a check;
- * and do_anonymous_page and do_no_page can safely check later on).
+ * and do_anonymous_page can safely check later on).
  */
 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
                                pte_t *page_table, pte_t orig_pte)
@@ -1883,19 +2371,6 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
        return same;
 }
 
-/*
- * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
- * servicing faults for write access.  In the normal case, do always want
- * pte_mkwrite.  But get_user_pages can cause write faults for mappings
- * that do not have writing enabled, when used by access_process_vm.
- */
-static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
-{
-       if (likely(vma->vm_flags & VM_WRITE))
-               pte = pte_mkwrite(pte);
-       return pte;
-}
-
 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
 {
        /*
@@ -1915,7 +2390,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                 * zeroes.
                 */
                if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
-                       memset(kaddr, 0, PAGE_SIZE);
+                       clear_page(kaddr);
                kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(dst);
        } else
@@ -1943,10 +2418,11 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
                spinlock_t *ptl, pte_t orig_pte)
+       __releases(ptl)
 {
        struct page *old_page, *new_page;
        pte_t entry;
-       int reuse = 0, ret = 0;
+       int ret = 0;
        int page_mkwrite = 0;
        struct page *dirty_page = NULL;
 
@@ -1978,12 +2454,20 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                                         &ptl);
                        if (!pte_same(*page_table, orig_pte)) {
                                unlock_page(old_page);
-                               page_cache_release(old_page);
                                goto unlock;
                        }
                        page_cache_release(old_page);
                }
-               reuse = reuse_swap_page(old_page);
+               if (reuse_swap_page(old_page)) {
+                       /*
+                        * The page is all ours.  Move it to our anon_vma so
+                        * the rmap code will not search our parent or siblings.
+                        * Protected against the rmap code by the page lock.
+                        */
+                       page_move_anon_rmap(old_page, vma, address);
+                       unlock_page(old_page);
+                       goto reuse;
+               }
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
@@ -2039,7 +2523,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                                         &ptl);
                        if (!pte_same(*page_table, orig_pte)) {
                                unlock_page(old_page);
-                               page_cache_release(old_page);
                                goto unlock;
                        }
 
@@ -2047,18 +2530,52 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                }
                dirty_page = old_page;
                get_page(dirty_page);
-               reuse = 1;
-       }
 
-       if (reuse) {
 reuse:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (ptep_set_access_flags(vma, address, page_table, entry,1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache(vma, address, page_table);
+               pte_unmap_unlock(page_table, ptl);
                ret |= VM_FAULT_WRITE;
-               goto unlock;
+
+               if (!dirty_page)
+                       return ret;
+
+               /*
+                * Yes, Virginia, this is actually required to prevent a race
+                * with clear_page_dirty_for_io() from clearing the page dirty
+                * bit after it clear all dirty ptes, but before a racing
+                * do_wp_page installs a dirty pte.
+                *
+                * __do_fault is protected similarly.
+                */
+               if (!page_mkwrite) {
+                       wait_on_page_locked(dirty_page);
+                       set_page_dirty_balance(dirty_page, page_mkwrite);
+               }
+               put_page(dirty_page);
+               if (page_mkwrite) {
+                       struct address_space *mapping = dirty_page->mapping;
+
+                       set_page_dirty(dirty_page);
+                       unlock_page(dirty_page);
+                       page_cache_release(dirty_page);
+                       if (mapping)    {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+               }
+
+               /* file_update_time outside page_lock */
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+
+               return ret;
        }
 
        /*
@@ -2070,20 +2587,17 @@ gotten:
 
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       VM_BUG_ON(old_page == ZERO_PAGE(0));
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-       if (!new_page)
-               goto oom;
-       /*
-        * Don't let another task, with possibly unlocked vma,
-        * keep the mlocked page.
-        */
-       if ((vma->vm_flags & VM_LOCKED) && old_page) {
-               lock_page(old_page);    /* for LRU manipulation */
-               clear_page_mlock(old_page);
-               unlock_page(old_page);
+
+       if (is_zero_pfn(pte_pfn(orig_pte))) {
+               new_page = alloc_zeroed_user_highpage_movable(vma, address);
+               if (!new_page)
+                       goto oom;
+       } else {
+               new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+               if (!new_page)
+                       goto oom;
+               cow_user_page(new_page, old_page, address, vma);
        }
-       cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
        if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
@@ -2096,11 +2610,11 @@ gotten:
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
                        if (!PageAnon(old_page)) {
-                               dec_mm_counter(mm, file_rss);
-                               inc_mm_counter(mm, anon_rss);
+                               dec_mm_counter_fast(mm, MM_FILEPAGES);
+                               inc_mm_counter_fast(mm, MM_ANONPAGES);
                        }
                } else
-                       inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter_fast(mm, MM_ANONPAGES);
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2118,7 +2632,7 @@ gotten:
                 * new page to be mapped directly into the secondary page table.
                 */
                set_pte_at_notify(mm, address, page_table, entry);
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
@@ -2153,42 +2667,19 @@ gotten:
 
        if (new_page)
                page_cache_release(new_page);
-       if (old_page)
-               page_cache_release(old_page);
 unlock:
        pte_unmap_unlock(page_table, ptl);
-       if (dirty_page) {
+       if (old_page) {
                /*
-                * Yes, Virginia, this is actually required to prevent a race
-                * with clear_page_dirty_for_io() from clearing the page dirty
-                * bit after it clear all dirty ptes, but before a racing
-                * do_wp_page installs a dirty pte.
-                *
-                * do_no_page is protected similarly.
+                * Don't let another task, with possibly unlocked vma,
+                * keep the mlocked page.
                 */
-               if (!page_mkwrite) {
-                       wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page, page_mkwrite);
-               }
-               put_page(dirty_page);
-               if (page_mkwrite) {
-                       struct address_space *mapping = dirty_page->mapping;
-
-                       set_page_dirty(dirty_page);
-                       unlock_page(dirty_page);
-                       page_cache_release(dirty_page);
-                       if (mapping)    {
-                               /*
-                                * Some device drivers do not set page.mapping
-                                * but still dirty their pages
-                                */
-                               balance_dirty_pages_ratelimited(mapping);
-                       }
+               if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
+                       lock_page(old_page);    /* LRU manipulation */
+                       munlock_vma_page(old_page);
+                       unlock_page(old_page);
                }
-
-               /* file_update_time outside page_lock */
-               if (vma->vm_file)
-                       file_update_time(vma->vm_file);
+               page_cache_release(old_page);
        }
        return ret;
 oom_free_new:
@@ -2208,96 +2699,11 @@ unwritable_page:
        return ret;
 }
 
-/*
- * Helper functions for unmap_mapping_range().
- *
- * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
- *
- * We have to restart searching the prio_tree whenever we drop the lock,
- * since the iterator is only valid while the lock is held, and anyway
- * a later vma might be split and reinserted earlier while lock dropped.
- *
- * The list of nonlinear vmas could be handled more efficiently, using
- * a placeholder, but handle it in the same way until a need is shown.
- * It is important to search the prio_tree before nonlinear list: a vma
- * may become nonlinear and be shifted from prio_tree to nonlinear list
- * while the lock is dropped; but never shifted from list to prio_tree.
- *
- * In order to make forward progress despite restarting the search,
- * vm_truncate_count is used to mark a vma as now dealt with, so we can
- * quickly skip it next time around.  Since the prio_tree search only
- * shows us those vmas affected by unmapping the range in question, we
- * can't efficiently keep all vmas in step with mapping->truncate_count:
- * so instead reset them all whenever it wraps back to 0 (then go to 1).
- * mapping->truncate_count and vma->vm_truncate_count are protected by
- * i_mmap_lock.
- *
- * In order to make forward progress despite repeatedly restarting some
- * large vma, note the restart_addr from unmap_vmas when it breaks out:
- * and restart from that address when we reach that vma again.  It might
- * have been split or merged, shrunk or extended, but never shifted: so
- * restart_addr remains valid so long as it remains in the vma's range.
- * unmap_mapping_range forces truncate_count to leap over page-aligned
- * values so we can save vma's restart_addr in its truncate_count field.
- */
-#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
-
-static void reset_vma_truncate_counts(struct address_space *mapping)
-{
-       struct vm_area_struct *vma;
-       struct prio_tree_iter iter;
-
-       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
-               vma->vm_truncate_count = 0;
-       list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
-               vma->vm_truncate_count = 0;
-}
-
-static int unmap_mapping_range_vma(struct vm_area_struct *vma,
+static void unmap_mapping_range_vma(struct vm_area_struct *vma,
                unsigned long start_addr, unsigned long end_addr,
                struct zap_details *details)
 {
-       unsigned long restart_addr;
-       int need_break;
-
-       /*
-        * files that support invalidating or truncating portions of the
-        * file from under mmaped areas must have their ->fault function
-        * return a locked page (and set VM_FAULT_LOCKED in the return).
-        * This provides synchronisation against concurrent unmapping here.
-        */
-
-again:
-       restart_addr = vma->vm_truncate_count;
-       if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
-               start_addr = restart_addr;
-               if (start_addr >= end_addr) {
-                       /* Top of vma has been split off since last time */
-                       vma->vm_truncate_count = details->truncate_count;
-                       return 0;
-               }
-       }
-
-       restart_addr = zap_page_range(vma, start_addr,
-                                       end_addr - start_addr, details);
-       need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
-
-       if (restart_addr >= end_addr) {
-               /* We have now completed this vma: mark it so */
-               vma->vm_truncate_count = details->truncate_count;
-               if (!need_break)
-                       return 0;
-       } else {
-               /* Note restart_addr in vma's truncate_count field */
-               vma->vm_truncate_count = restart_addr;
-               if (!need_break)
-                       goto again;
-       }
-
-       spin_unlock(details->i_mmap_lock);
-       cond_resched();
-       spin_lock(details->i_mmap_lock);
-       return -EINTR;
+       zap_page_range(vma, start_addr, end_addr - start_addr, details);
 }
 
 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
@@ -2307,12 +2713,8 @@ static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
        struct prio_tree_iter iter;
        pgoff_t vba, vea, zba, zea;
 
-restart:
        vma_prio_tree_foreach(vma, &iter, root,
                        details->first_index, details->last_index) {
-               /* Skip quickly over those we have already dealt with */
-               if (vma->vm_truncate_count == details->truncate_count)
-                       continue;
 
                vba = vma->vm_pgoff;
                vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
@@ -2324,11 +2726,10 @@ restart:
                if (zea > vea)
                        zea = vea;
 
-               if (unmap_mapping_range_vma(vma,
+               unmap_mapping_range_vma(vma,
                        ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
                        ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
-                               details) < 0)
-                       goto restart;
+                               details);
        }
 }
 
@@ -2343,15 +2744,9 @@ static inline void unmap_mapping_range_list(struct list_head *head,
         * across *all* the pages in each nonlinear VMA, not just the pages
         * whose virtual address lies outside the file truncation point.
         */
-restart:
        list_for_each_entry(vma, head, shared.vm_set.list) {
-               /* Skip quickly over those we have already dealt with */
-               if (vma->vm_truncate_count == details->truncate_count)
-                       continue;
                details->nonlinear_vma = vma;
-               if (unmap_mapping_range_vma(vma, vma->vm_start,
-                                       vma->vm_end, details) < 0)
-                       goto restart;
+               unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
        }
 }
 
@@ -2360,7 +2755,7 @@ restart:
  * @mapping: the address space containing mmaps to be unmapped.
  * @holebegin: byte in first page to unmap, relative to the start of
  * the underlying file.  This will be rounded down to a PAGE_SIZE
- * boundary.  Note that this is different from vmtruncate(), which
+ * boundary.  Note that this is different from truncate_pagecache(), which
  * must keep the partial page.  In contrast, we must get rid of
  * partial pages.
  * @holelen: size of prospective hole in bytes.  This will be rounded
@@ -2390,84 +2785,17 @@ void unmap_mapping_range(struct address_space *mapping,
        details.last_index = hba + hlen - 1;
        if (details.last_index < details.first_index)
                details.last_index = ULONG_MAX;
-       details.i_mmap_lock = &mapping->i_mmap_lock;
 
-       spin_lock(&mapping->i_mmap_lock);
-
-       /* Protect against endless unmapping loops */
-       mapping->truncate_count++;
-       if (unlikely(is_restart_addr(mapping->truncate_count))) {
-               if (mapping->truncate_count == 0)
-                       reset_vma_truncate_counts(mapping);
-               mapping->truncate_count++;
-       }
-       details.truncate_count = mapping->truncate_count;
 
+       mutex_lock(&mapping->i_mmap_mutex);
        if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
-/**
- * vmtruncate - unmap mappings "freed" by truncate() syscall
- * @inode: inode of the file used
- * @offset: file offset to start truncating
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page.  Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
-       if (inode->i_size < offset) {
-               unsigned long limit;
-
-               limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-               if (limit != RLIM_INFINITY && offset > limit)
-                       goto out_sig;
-               if (offset > inode->i_sb->s_maxbytes)
-                       goto out_big;
-               i_size_write(inode, offset);
-       } else {
-               struct address_space *mapping = inode->i_mapping;
-
-               /*
-                * truncation of in-use swapfiles is disallowed - it would
-                * cause subsequent swapout to scribble on the now-freed
-                * blocks.
-                */
-               if (IS_SWAPFILE(inode))
-                       return -ETXTBSY;
-               i_size_write(inode, offset);
-
-               /*
-                * unmap_mapping_range is called twice, first simply for
-                * efficiency so that truncate_inode_pages does fewer
-                * single-page unmaps.  However after this first call, and
-                * before truncate_inode_pages finishes, it is possible for
-                * private pages to be COWed, which remain after
-                * truncate_inode_pages finishes, hence the second
-                * unmap_mapping_range call must be made for correctness.
-                */
-               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-               truncate_inode_pages(mapping, offset);
-               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       }
-
-       if (inode->i_op->truncate)
-               inode->i_op->truncate(inode);
-       return 0;
-
-out_sig:
-       send_sig(SIGXFSZ, current, 0);
-out_big:
-       return -EFBIG;
-}
-EXPORT_SYMBOL(vmtruncate);
-
 int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
 {
        struct address_space *mapping = inode->i_mapping;
@@ -2502,18 +2830,27 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
-       struct mem_cgroup *ptr = NULL;
+       int locked;
+       struct mem_cgroup *ptr;
+       int exclusive = 0;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                goto out;
 
        entry = pte_to_swp_entry(orig_pte);
-       if (is_migration_entry(entry)) {
-               migration_entry_wait(mm, pmd, address);
+       if (unlikely(non_swap_entry(entry))) {
+               if (is_migration_entry(entry)) {
+                       migration_entry_wait(mm, pmd, address);
+               } else if (is_hwpoison_entry(entry)) {
+                       ret = VM_FAULT_HWPOISON;
+               } else {
+                       print_bad_pte(vma, address, orig_pte, NULL);
+                       ret = VM_FAULT_SIGBUS;
+               }
                goto out;
        }
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
@@ -2537,10 +2874,43 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Had to read the page from swap area: Major fault */
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
+       } else if (PageHWPoison(page)) {
+               /*
+                * hwpoisoned dirty swapcache pages are kept for killing
+                * owner processes (which may be unknown at hwpoison time)
+                */
+               ret = VM_FAULT_HWPOISON;
+               delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+               goto out_release;
        }
 
-       lock_page(page);
+       locked = lock_page_or_retry(page, mm, flags);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+       if (!locked) {
+               ret |= VM_FAULT_RETRY;
+               goto out_release;
+       }
+
+       /*
+        * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
+        * release the swapcache from under us.  The page pin, and pte_same
+        * test below, are not enough to exclude that.  Even if it is still
+        * swapcache, we need to check that the page's swap has not changed.
+        */
+       if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+               goto out_page;
+
+       if (ksm_might_need_to_copy(page, vma, address)) {
+               swapcache = page;
+               page = ksm_does_need_to_copy(page, vma, address);
+
+               if (unlikely(!page)) {
+                       ret = VM_FAULT_OOM;
+                       page = swapcache;
+                       swapcache = NULL;
+                       goto out_page;
+               }
+       }
 
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
@@ -2573,15 +2943,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * discarded at swap_free().
         */
 
-       inc_mm_counter(mm, anon_rss);
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
+       dec_mm_counter_fast(mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
        if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                flags &= ~FAULT_FLAG_WRITE;
+               ret |= VM_FAULT_WRITE;
+               exclusive = 1;
        }
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
-       page_add_anon_rmap(page, vma, address);
+       do_page_add_anon_rmap(page, vma, address, exclusive);
        /* It's better to call commit-charge after rmap is established */
        mem_cgroup_commit_charge_swapin(page, ptr);
 
@@ -2589,6 +2962,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                try_to_free_swap(page);
        unlock_page(page);
+       if (swapcache) {
+               /*
+                * Hold the lock to avoid the swap entry to be reused
+                * until we take the PT lock for the pte_same() check
+                * (to avoid false positives from pte_same). For
+                * further safety release the lock after the swap_free
+                * so that the swap count won't change under a
+                * parallel locked swapcache.
+                */
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
 
        if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
@@ -2598,7 +2983,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, pte);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
 out:
@@ -2608,11 +2993,50 @@ out_nomap:
        pte_unmap_unlock(page_table, ptl);
 out_page:
        unlock_page(page);
+out_release:
        page_cache_release(page);
+       if (swapcache) {
+               unlock_page(swapcache);
+               page_cache_release(swapcache);
+       }
        return ret;
 }
 
 /*
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+       address &= PAGE_MASK;
+       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+               struct vm_area_struct *prev = vma->vm_prev;
+
+               /*
+                * Is there a mapping abutting this one below?
+                *
+                * That's only ok if it's the same stack mapping
+                * that has gotten split..
+                */
+               if (prev && prev->vm_end == address)
+                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+               expand_downwards(vma, address - PAGE_SIZE);
+       }
+       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+               struct vm_area_struct *next = vma->vm_next;
+
+               /* As VM_GROWSDOWN but s/below/above/ */
+               if (next && next->vm_start == address + PAGE_SIZE)
+                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+               expand_upwards(vma, address + PAGE_SIZE);
+       }
+       return 0;
+}
+
+/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2625,9 +3049,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
-       /* Allocate our own private page. */
        pte_unmap(page_table);
 
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, address) < 0)
+               return VM_FAULT_SIGBUS;
+
+       /* Use the zero-page for reads */
+       if (!(flags & FAULT_FLAG_WRITE)) {
+               entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+                                               vma->vm_page_prot));
+               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+               if (!pte_none(*page_table))
+                       goto unlock;
+               goto setpte;
+       }
+
+       /* Allocate our own private page. */
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
        page = alloc_zeroed_user_highpage_movable(vma, address);
@@ -2639,18 +3077,20 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
-       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       if (vma->vm_flags & VM_WRITE)
+               entry = pte_mkwrite(pte_mkdirty(entry));
 
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!pte_none(*page_table))
                goto release;
 
-       inc_mm_counter(mm, anon_rss);
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, address);
+setpte:
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, entry);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
@@ -2698,9 +3138,16 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.page = NULL;
 
        ret = vma->vm_ops->fault(vma, &vmf);
-       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
+                           VM_FAULT_RETRY)))
                return ret;
 
+       if (unlikely(PageHWPoison(vmf.page))) {
+               if (ret & VM_FAULT_LOCKED)
+                       unlock_page(vmf.page);
+               return VM_FAULT_HWPOISON;
+       }
+
        /*
         * For consistency in subsequent calls, make the faulted page always
         * locked.
@@ -2733,12 +3180,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                goto out;
                        }
                        charged = 1;
-                       /*
-                        * Don't let another task, with possibly unlocked vma,
-                        * keep the mlocked page.
-                        */
-                       if (vma->vm_flags & VM_LOCKED)
-                               clear_page_mlock(vmf.page);
                        copy_user_highpage(page, vmf.page, address, vma);
                        __SetPageUptodate(page);
                } else {
@@ -2792,10 +3233,10 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (anon) {
-                       inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter_fast(mm, MM_ANONPAGES);
                        page_add_new_anon_rmap(page, vma, address);
                } else {
-                       inc_mm_counter(mm, file_rss);
+                       inc_mm_counter_fast(mm, MM_FILEPAGES);
                        page_add_file_rmap(page);
                        if (flags & FAULT_FLAG_WRITE) {
                                dirty_page = page;
@@ -2805,7 +3246,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
        } else {
                if (charged)
                        mem_cgroup_uncharge_page(page);
@@ -2885,7 +3326,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                 * Page table corrupted: show pte and kill process.
                 */
                print_bad_pte(vma, address, orig_pte, NULL);
-               return VM_FAULT_OOM;
+               return VM_FAULT_SIGBUS;
        }
 
        pgoff = pte_to_pgoff(orig_pte);
@@ -2905,9 +3346,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static inline int handle_pte_fault(struct mm_struct *mm,
-               struct vm_area_struct *vma, unsigned long address,
-               pte_t *pte, pmd_t *pmd, unsigned int flags)
+int handle_pte_fault(struct mm_struct *mm,
+                    struct vm_area_struct *vma, unsigned long address,
+                    pte_t *pte, pmd_t *pmd, unsigned int flags)
 {
        pte_t entry;
        spinlock_t *ptl;
@@ -2942,7 +3383,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        }
        entry = pte_mkyoung(entry);
        if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, pte);
        } else {
                /*
                 * This is needed only for protection faults but the arch code
@@ -2951,7 +3392,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                 * with threads.
                 */
                if (flags & FAULT_FLAG_WRITE)
-                       flush_tlb_page(vma, address);
+                       flush_tlb_fix_spurious_fault(vma, address);
        }
 unlock:
        pte_unmap_unlock(pte, ptl);
@@ -2973,6 +3414,9 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        count_vm_event(PGFAULT);
 
+       /* do counter updates before entering really critical section. */
+       check_sync_rss_stat(current);
+
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
@@ -2983,9 +3427,40 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pmd = pmd_alloc(mm, pud, address);
        if (!pmd)
                return VM_FAULT_OOM;
-       pte = pte_alloc_map(mm, pmd, address);
-       if (!pte)
+       if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
+               if (!vma->vm_ops)
+                       return do_huge_pmd_anonymous_page(mm, vma, address,
+                                                         pmd, flags);
+       } else {
+               pmd_t orig_pmd = *pmd;
+               barrier();
+               if (pmd_trans_huge(orig_pmd)) {
+                       if (flags & FAULT_FLAG_WRITE &&
+                           !pmd_write(orig_pmd) &&
+                           !pmd_trans_splitting(orig_pmd))
+                               return do_huge_pmd_wp_page(mm, vma, address,
+                                                          pmd, orig_pmd);
+                       return 0;
+               }
+       }
+
+       /*
+        * Use __pte_alloc instead of pte_alloc_map, because we can't
+        * run pte_offset_map on the pmd, if an huge pmd could
+        * materialize from under us from a different thread.
+        */
+       if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
                return VM_FAULT_OOM;
+       /* if an huge pmd materialized from under us just retry later */
+       if (unlikely(pmd_trans_huge(*pmd)))
+               return 0;
+       /*
+        * A regular pmd is established and it can't morph into a huge pmd
+        * from under us anymore at this point because we hold the mmap_sem
+        * read mode and khugepaged takes it in write mode. So now it's
+        * safe to run pte_offset_map().
+        */
+       pte = pte_offset_map(pmd, address);
 
        return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
@@ -3051,7 +3526,12 @@ int make_pages_present(unsigned long addr, unsigned long end)
        vma = find_vma(current->mm, addr);
        if (!vma)
                return -ENOMEM;
-       write = (vma->vm_flags & VM_WRITE) != 0;
+       /*
+        * We want to touch writable mappings with a write fault in order
+        * to break COW, except for shared mappings because these don't COW
+        * and we would not want to dirty them for nothing.
+        */
+       write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
        BUG_ON(addr >= end);
        BUG_ON(end > vma->vm_end);
        len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
@@ -3086,7 +3566,7 @@ static int __init gate_vma_init(void)
 __initcall(gate_vma_init);
 #endif
 
-struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 {
 #ifdef AT_SYSINFO_EHDR
        return &gate_vma;
@@ -3095,7 +3575,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
 #endif
 }
 
-int in_gate_area_no_task(unsigned long addr)
+int in_gate_area_no_mm(unsigned long addr)
 {
 #ifdef AT_SYSINFO_EHDR
        if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
@@ -3106,7 +3586,7 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
-static int follow_pte(struct mm_struct *mm, unsigned long address,
+static int __follow_pte(struct mm_struct *mm, unsigned long address,
                pte_t **ptepp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
@@ -3123,6 +3603,7 @@ static int follow_pte(struct mm_struct *mm, unsigned long address,
                goto out;
 
        pmd = pmd_offset(pud, address);
+       VM_BUG_ON(pmd_trans_huge(*pmd));
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
                goto out;
 
@@ -3143,6 +3624,17 @@ out:
        return -EINVAL;
 }
 
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, spinlock_t **ptlp)
+{
+       int res;
+
+       /* (void) is needed to make gcc happy */
+       (void) __cond_lock(*ptlp,
+                          !(res = __follow_pte(mm, address, ptepp, ptlp)));
+       return res;
+}
+
 /**
  * follow_pfn - look up PFN at a user virtual address
  * @vma: memory mapping
@@ -3224,20 +3716,15 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 #endif
 
 /*
- * Access another process' address space.
- * Source/target buffer must be kernel space,
- * Do not walk the page table directly, use get_user_pages
+ * Access another process' address space as given in mm.  If non-NULL, use the
+ * given task for page fault accounting.
  */
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long addr, void *buf, int len, int write)
 {
-       struct mm_struct *mm;
        struct vm_area_struct *vma;
        void *old_buf = buf;
 
-       mm = get_task_mm(tsk);
-       if (!mm)
-               return 0;
-
        down_read(&mm->mmap_sem);
        /* ignore errors, just check how much was successfully transferred */
        while (len) {
@@ -3254,7 +3741,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
                         */
 #ifdef CONFIG_HAVE_IOREMAP_PROT
                        vma = find_vma(mm, addr);
-                       if (!vma)
+                       if (!vma || vma->vm_start > addr)
                                break;
                        if (vma->vm_ops && vma->vm_ops->access)
                                ret = vma->vm_ops->access(vma, addr, buf,
@@ -3286,11 +3773,47 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
                addr += bytes;
        }
        up_read(&mm->mmap_sem);
-       mmput(mm);
 
        return buf - old_buf;
 }
 
+/**
+ * access_remote_vm - access another process' address space
+ * @mm:                the mm_struct of the target address space
+ * @addr:      start address to access
+ * @buf:       source or destination buffer
+ * @len:       number of bytes to transfer
+ * @write:     whether the access is a write
+ *
+ * The caller must hold a reference on @mm.
+ */
+int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+               void *buf, int len, int write)
+{
+       return __access_remote_vm(NULL, mm, addr, buf, len, write);
+}
+
+/*
+ * Access another process' address space.
+ * Source/target buffer must be kernel space,
+ * Do not walk the page table directly, use get_user_pages
+ */
+int access_process_vm(struct task_struct *tsk, unsigned long addr,
+               void *buf, int len, int write)
+{
+       struct mm_struct *mm;
+       int ret;
+
+       mm = get_task_mm(tsk);
+       if (!mm)
+               return 0;
+
+       ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
+       mmput(mm);
+
+       return ret;
+}
+
 /*
  * Print the name of a VMA.
  */
@@ -3352,3 +3875,74 @@ void might_fault(void)
 }
 EXPORT_SYMBOL(might_fault);
 #endif
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+static void clear_gigantic_page(struct page *page,
+                               unsigned long addr,
+                               unsigned int pages_per_huge_page)
+{
+       int i;
+       struct page *p = page;
+
+       might_sleep();
+       for (i = 0; i < pages_per_huge_page;
+            i++, p = mem_map_next(p, page, i)) {
+               cond_resched();
+               clear_user_highpage(p, addr + i * PAGE_SIZE);
+       }
+}
+void clear_huge_page(struct page *page,
+                    unsigned long addr, unsigned int pages_per_huge_page)
+{
+       int i;
+
+       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+               clear_gigantic_page(page, addr, pages_per_huge_page);
+               return;
+       }
+
+       might_sleep();
+       for (i = 0; i < pages_per_huge_page; i++) {
+               cond_resched();
+               clear_user_highpage(page + i, addr + i * PAGE_SIZE);
+       }
+}
+
+static void copy_user_gigantic_page(struct page *dst, struct page *src,
+                                   unsigned long addr,
+                                   struct vm_area_struct *vma,
+                                   unsigned int pages_per_huge_page)
+{
+       int i;
+       struct page *dst_base = dst;
+       struct page *src_base = src;
+
+       for (i = 0; i < pages_per_huge_page; ) {
+               cond_resched();
+               copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
+
+               i++;
+               dst = mem_map_next(dst, dst_base, i);
+               src = mem_map_next(src, src_base, i);
+       }
+}
+
+void copy_user_huge_page(struct page *dst, struct page *src,
+                        unsigned long addr, struct vm_area_struct *vma,
+                        unsigned int pages_per_huge_page)
+{
+       int i;
+
+       if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
+               copy_user_gigantic_page(dst, src, addr, vma,
+                                       pages_per_huge_page);
+               return;
+       }
+
+       might_sleep();
+       for (i = 0; i < pages_per_huge_page; i++) {
+               cond_resched();
+               copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
+       }
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */