]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/memory.c
mm: introduce dump_page() and print symbolic flag names
[linux-2.6.git] / mm / memory.c
index 7135d6b25995634b9387269b8f51d83497ee9378..5b7f2002e54b17e8d9b67e75cff0aaa06c9296b4 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/swap.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
+#include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/module.h>
 #include <linux/delayacct.h>
@@ -56,6 +57,7 @@
 #include <linux/swapops.h>
 #include <linux/elf.h>
 
+#include <asm/io.h>
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
@@ -106,6 +108,89 @@ static int __init disable_randmaps(char *s)
 }
 __setup("norandmaps", disable_randmaps);
 
+unsigned long zero_pfn __read_mostly;
+unsigned long highest_memmap_pfn __read_mostly;
+
+/*
+ * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
+ */
+static int __init init_zero_pfn(void)
+{
+       zero_pfn = page_to_pfn(ZERO_PAGE(0));
+       return 0;
+}
+core_initcall(init_zero_pfn);
+
+
+#if defined(SPLIT_RSS_COUNTING)
+
+void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
+{
+       int i;
+
+       for (i = 0; i < NR_MM_COUNTERS; i++) {
+               if (task->rss_stat.count[i]) {
+                       add_mm_counter(mm, i, task->rss_stat.count[i]);
+                       task->rss_stat.count[i] = 0;
+               }
+       }
+       task->rss_stat.events = 0;
+}
+
+static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
+{
+       struct task_struct *task = current;
+
+       if (likely(task->mm == mm))
+               task->rss_stat.count[member] += val;
+       else
+               add_mm_counter(mm, member, val);
+}
+#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
+#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
+
+/* sync counter once per 64 page faults */
+#define TASK_RSS_EVENTS_THRESH (64)
+static void check_sync_rss_stat(struct task_struct *task)
+{
+       if (unlikely(task != current))
+               return;
+       if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
+               __sync_task_rss_stat(task, task->mm);
+}
+
+unsigned long get_mm_counter(struct mm_struct *mm, int member)
+{
+       long val = 0;
+
+       /*
+        * Don't use task->mm here...for avoiding to use task_get_mm()..
+        * The caller must guarantee task->mm is not invalid.
+        */
+       val = atomic_long_read(&mm->rss_stat.count[member]);
+       /*
+        * counter is updated in asynchronous manner and may go to minus.
+        * But it's never be expected number for users.
+        */
+       if (val < 0)
+               return 0;
+       return (unsigned long)val;
+}
+
+void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
+{
+       __sync_task_rss_stat(task, mm);
+}
+#else
+
+#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
+#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
+
+static void check_sync_rss_stat(struct task_struct *task)
+{
+}
+
+#endif
 
 /*
  * If a p?d_bad entry is found while walking page tables, report
@@ -135,11 +220,12 @@ void pmd_clear_bad(pmd_t *pmd)
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
  */
-static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
+static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+                          unsigned long addr)
 {
        pgtable_t token = pmd_pgtable(*pmd);
        pmd_clear(pmd);
-       pte_free_tlb(tlb, token);
+       pte_free_tlb(tlb, token, addr);
        tlb->mm->nr_ptes--;
 }
 
@@ -157,7 +243,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               free_pte_range(tlb, pmd);
+               free_pte_range(tlb, pmd, addr);
        } while (pmd++, addr = next, addr != end);
 
        start &= PUD_MASK;
@@ -173,7 +259,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
-       pmd_free_tlb(tlb, pmd);
+       pmd_free_tlb(tlb, pmd, start);
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -206,7 +292,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 
        pud = pud_offset(pgd, start);
        pgd_clear(pgd);
-       pud_free_tlb(tlb, pud);
+       pud_free_tlb(tlb, pud, start);
 }
 
 /*
@@ -282,9 +368,10 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long addr = vma->vm_start;
 
                /*
-                * Hide vma from rmap and vmtruncate before freeing pgtables
+                * Hide vma from rmap and truncate_pagecache before freeing
+                * pgtables
                 */
-               anon_vma_unlink(vma);
+               unlink_anon_vmas(vma);
                unlink_file_vma(vma);
 
                if (is_vm_hugetlb_page(vma)) {
@@ -298,7 +385,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                               && !is_vm_hugetlb_page(next)) {
                                vma = next;
                                next = vma->vm_next;
-                               anon_vma_unlink(vma);
+                               unlink_anon_vmas(vma);
                                unlink_file_vma(vma);
                        }
                        free_pgd_range(tlb, addr, vma->vm_end,
@@ -360,12 +447,20 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
        return 0;
 }
 
-static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
+static inline void init_rss_vec(int *rss)
 {
-       if (file_rss)
-               add_mm_counter(mm, file_rss, file_rss);
-       if (anon_rss)
-               add_mm_counter(mm, anon_rss, anon_rss);
+       memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
+}
+
+static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
+{
+       int i;
+
+       if (current->mm == mm)
+               sync_mm_rss(current, mm);
+       for (i = 0; i < NR_MM_COUNTERS; i++)
+               if (rss[i])
+                       add_mm_counter(mm, i, rss[i]);
 }
 
 /*
@@ -414,12 +509,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
                current->comm,
                (long long)pte_val(pte), (long long)pmd_val(*pmd));
-       if (page) {
-               printk(KERN_ALERT
-               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
-               page, (void *)page->flags, page_count(page),
-               page_mapcount(page), page->mapping, page->index);
-       }
+       if (page)
+               dump_page(page);
        printk(KERN_ALERT
                "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
                (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
@@ -441,6 +532,20 @@ static inline int is_cow_mapping(unsigned int flags)
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+#ifndef is_zero_pfn
+static inline int is_zero_pfn(unsigned long pfn)
+{
+       return pfn == zero_pfn;
+}
+#endif
+
+#ifndef my_zero_pfn
+static inline unsigned long my_zero_pfn(unsigned long addr)
+{
+       return zero_pfn;
+}
+#endif
+
 /*
  * vm_normal_page -- This function gets the "struct page" associated with a pte.
  *
@@ -496,7 +601,9 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
        if (HAVE_PTE_SPECIAL) {
                if (likely(!pte_special(pte)))
                        goto check_pfn;
-               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+               if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+                       return NULL;
+               if (!is_zero_pfn(pfn))
                        print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
@@ -518,6 +625,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
+       if (is_zero_pfn(pfn))
+               return NULL;
 check_pfn:
        if (unlikely(pfn > highest_memmap_pfn)) {
                print_bad_pte(vma, addr, pte, NULL);
@@ -538,7 +647,7 @@ out:
  * covered by this vma.
  */
 
-static inline void
+static inline unsigned long
 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
                unsigned long addr, int *rss)
@@ -552,7 +661,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       swap_duplicate(entry);
+                       if (swap_duplicate(entry) < 0)
+                               return entry.val;
+
                        /* make sure dst_mm is on swapoff's mmlist. */
                        if (unlikely(list_empty(&dst_mm->mmlist))) {
                                spin_lock(&mmlist_lock);
@@ -561,7 +672,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                                 &src_mm->mmlist);
                                spin_unlock(&mmlist_lock);
                        }
-                       if (is_write_migration_entry(entry) &&
+                       if (likely(!non_swap_entry(entry)))
+                               rss[MM_SWAPENTS]++;
+                       else if (is_write_migration_entry(entry) &&
                                        is_cow_mapping(vm_flags)) {
                                /*
                                 * COW mappings require pages in both parent
@@ -595,31 +708,40 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        page = vm_normal_page(vma, addr, pte);
        if (page) {
                get_page(page);
-               page_dup_rmap(page, vma, addr);
-               rss[!!PageAnon(page)]++;
+               page_dup_rmap(page);
+               if (PageAnon(page))
+                       rss[MM_ANONPAGES]++;
+               else
+                       rss[MM_FILEPAGES]++;
        }
 
 out_set_pte:
        set_pte_at(dst_mm, addr, dst_pte, pte);
+       return 0;
 }
 
 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
                unsigned long addr, unsigned long end)
 {
+       pte_t *orig_src_pte, *orig_dst_pte;
        pte_t *src_pte, *dst_pte;
        spinlock_t *src_ptl, *dst_ptl;
        int progress = 0;
-       int rss[2];
+       int rss[NR_MM_COUNTERS];
+       swp_entry_t entry = (swp_entry_t){0};
 
 again:
-       rss[1] = rss[0] = 0;
+       init_rss_vec(rss);
+
        dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
        if (!dst_pte)
                return -ENOMEM;
        src_pte = pte_offset_map_nested(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+       orig_src_pte = src_pte;
+       orig_dst_pte = dst_pte;
        arch_enter_lazy_mmu_mode();
 
        do {
@@ -637,16 +759,25 @@ again:
                        progress++;
                        continue;
                }
-               copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
+               entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
+                                                       vma, addr, rss);
+               if (entry.val)
+                       break;
                progress += 8;
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
 
        arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
-       pte_unmap_nested(src_pte - 1);
-       add_mm_rss(dst_mm, rss[0], rss[1]);
-       pte_unmap_unlock(dst_pte - 1, dst_ptl);
+       pte_unmap_nested(orig_src_pte);
+       add_mm_rss_vec(dst_mm, rss);
+       pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
+
+       if (entry.val) {
+               if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
+                       return -ENOMEM;
+               progress = 0;
+       }
        if (addr != end)
                goto again;
        return 0;
@@ -766,8 +897,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        struct mm_struct *mm = tlb->mm;
        pte_t *pte;
        spinlock_t *ptl;
-       int file_rss = 0;
-       int anon_rss = 0;
+       int rss[NR_MM_COUNTERS];
+
+       init_rss_vec(rss);
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
@@ -813,14 +945,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                set_pte_at(mm, addr, pte,
                                           pgoff_to_pte(page->index));
                        if (PageAnon(page))
-                               anon_rss--;
+                               rss[MM_ANONPAGES]--;
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
                                if (pte_young(ptent) &&
                                    likely(!VM_SequentialReadHint(vma)))
                                        mark_page_accessed(page);
-                               file_rss--;
+                               rss[MM_FILEPAGES]--;
                        }
                        page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
@@ -837,13 +969,18 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                if (pte_file(ptent)) {
                        if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
                                print_bad_pte(vma, addr, ptent, NULL);
-               } else if
-                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
-                       print_bad_pte(vma, addr, ptent, NULL);
+               } else {
+                       swp_entry_t entry = pte_to_swp_entry(ptent);
+
+                       if (!non_swap_entry(entry))
+                               rss[MM_SWAPENTS]--;
+                       if (unlikely(!free_swap_and_cache(entry)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               }
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
-       add_mm_rss(mm, file_rss, anon_rss);
+       add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
@@ -906,6 +1043,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                details = NULL;
 
        BUG_ON(addr >= end);
+       mem_cgroup_uncharge_start();
        tlb_start_vma(tlb, vma);
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
@@ -918,6 +1056,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                                                zap_work, details);
        } while (pgd++, addr = next, (addr != end && *zap_work > 0));
        tlb_end_vma(tlb, vma);
+       mem_cgroup_uncharge_end();
 
        return addr;
 }
@@ -1141,9 +1280,14 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
+
        page = vm_normal_page(vma, address, pte);
-       if (unlikely(!page))
-               goto bad_page;
+       if (unlikely(!page)) {
+               if ((flags & FOLL_DUMP) ||
+                   !is_zero_pfn(pte_pfn(pte)))
+                       goto bad_page;
+               page = pte_page(pte);
+       }
 
        if (flags & FOLL_GET)
                get_page(page);
@@ -1171,65 +1315,46 @@ no_page:
        pte_unmap_unlock(ptep, ptl);
        if (!pte_none(pte))
                return page;
-       /* Fall through to ZERO_PAGE handling */
+
 no_page_table:
        /*
         * When core dumping an enormous anonymous area that nobody
-        * has touched so far, we don't want to allocate page tables.
+        * has touched so far, we don't want to allocate unnecessary pages or
+        * page tables.  Return error instead of NULL to skip handle_mm_fault,
+        * then get_dump_page() will return NULL to leave a hole in the dump.
+        * But we can only make this optimization where a hole would surely
+        * be zero-filled if handle_mm_fault() actually did handle it.
         */
-       if (flags & FOLL_ANON) {
-               page = ZERO_PAGE(0);
-               if (flags & FOLL_GET)
-                       get_page(page);
-               BUG_ON(flags & FOLL_WRITE);
-       }
+       if ((flags & FOLL_DUMP) &&
+           (!vma->vm_ops || !vma->vm_ops->fault))
+               return ERR_PTR(-EFAULT);
        return page;
 }
 
-/* Can we do the FOLL_ANON optimization? */
-static inline int use_zero_page(struct vm_area_struct *vma)
-{
-       /*
-        * We don't want to optimize FOLL_ANON for make_pages_present()
-        * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
-        * we want to get the page from the page tables to make sure
-        * that we serialize and update with any other user of that
-        * mapping.
-        */
-       if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
-               return 0;
-       /*
-        * And if we have a fault routine, it's not an anonymous region.
-        */
-       return !vma->vm_ops || !vma->vm_ops->fault;
-}
-
-
-
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-                    unsigned long start, int len, int flags,
-               struct page **pages, struct vm_area_struct **vmas)
+                    unsigned long start, int nr_pages, unsigned int gup_flags,
+                    struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags = 0;
-       int write = !!(flags & GUP_FLAGS_WRITE);
-       int force = !!(flags & GUP_FLAGS_FORCE);
-       int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
-       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
+       unsigned long vm_flags;
 
-       if (len <= 0)
+       if (nr_pages <= 0)
                return 0;
+
+       VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
        /* 
         * Require read or write permissions.
-        * If 'force' is set, we only require the "MAY" flags.
+        * If FOLL_FORCE is set, we only require the "MAY" flags.
         */
-       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       vm_flags  = (gup_flags & FOLL_WRITE) ?
+                       (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= (gup_flags & FOLL_FORCE) ?
+                       (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0;
 
        do {
                struct vm_area_struct *vma;
-               unsigned int foll_flags;
 
                vma = find_extend_vma(mm, start);
                if (!vma && in_gate_area(tsk, start)) {
@@ -1241,7 +1366,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pte_t *pte;
 
                        /* user gate pages are read-only */
-                       if (!ignore && write)
+                       if (gup_flags & FOLL_WRITE)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1269,53 +1394,45 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                vmas[i] = gate_vma;
                        i++;
                        start += PAGE_SIZE;
-                       len--;
+                       nr_pages--;
                        continue;
                }
 
                if (!vma ||
                    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
-                   (!ignore && !(vm_flags & vma->vm_flags)))
+                   !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &len, i, write);
+                                       &start, &nr_pages, i, gup_flags);
                        continue;
                }
 
-               foll_flags = FOLL_TOUCH;
-               if (pages)
-                       foll_flags |= FOLL_GET;
-               if (!write && use_zero_page(vma))
-                       foll_flags |= FOLL_ANON;
-
                do {
                        struct page *page;
+                       unsigned int foll_flags = gup_flags;
 
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
-                        * pages and potentially allocating memory, unless
-                        * current is handling munlock--e.g., on exit. In
-                        * that case, we are not allocating memory.  Rather,
-                        * we're only unlocking already resident/mapped pages.
+                        * pages and potentially allocating memory.
                         */
-                       if (unlikely(!ignore_sigkill &&
-                                       fatal_signal_pending(current)))
+                       if (unlikely(fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
 
-                       if (write)
-                               foll_flags |= FOLL_WRITE;
-
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
+
                                ret = handle_mm_fault(mm, vma, start,
-                                               foll_flags & FOLL_WRITE);
+                                       (foll_flags & FOLL_WRITE) ?
+                                       FAULT_FLAG_WRITE : 0);
+
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
-                                       else if (ret & VM_FAULT_SIGBUS)
+                                       if (ret &
+                                           (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@ -1354,9 +1471,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                vmas[i] = vma;
                        i++;
                        start += PAGE_SIZE;
-                       len--;
-               } while (len && start < vma->vm_end);
-       } while (len);
+                       nr_pages--;
+               } while (nr_pages && start < vma->vm_end);
+       } while (nr_pages);
        return i;
 }
 
@@ -1365,7 +1482,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  * @tsk:       task_struct of target task
  * @mm:                mm_struct of target mm
  * @start:     starting user address
- * @len:       number of pages from start to pin
+ * @nr_pages:  number of pages from start to pin
  * @write:     whether pages will be written to by the caller
  * @force:     whether to force write access even if user mapping is
  *             readonly. This will result in the page being COWed even
@@ -1377,7 +1494,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  *             Or NULL if the caller does not require them.
  *
  * Returns number of pages pinned. This may be fewer than the number
- * requested. If len is 0 or negative, returns 0. If no pages
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
  * were pinned, returns -errno. Each page returned must be released
  * with a put_page() call when it is finished with. vmas will only
  * remain valid while mmap_sem is held.
@@ -1411,23 +1528,50 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  * See also get_user_pages_fast, for performance critical applications.
  */
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int write, int force,
+               unsigned long start, int nr_pages, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
 {
-       int flags = 0;
+       int flags = FOLL_TOUCH;
 
+       if (pages)
+               flags |= FOLL_GET;
        if (write)
-               flags |= GUP_FLAGS_WRITE;
+               flags |= FOLL_WRITE;
        if (force)
-               flags |= GUP_FLAGS_FORCE;
+               flags |= FOLL_FORCE;
 
-       return __get_user_pages(tsk, mm,
-                               start, len, flags,
-                               pages, vmas);
+       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
 }
-
 EXPORT_SYMBOL(get_user_pages);
 
+/**
+ * get_dump_page() - pin user page in memory while writing it to core dump
+ * @addr: user address
+ *
+ * Returns struct page pointer of user page pinned for dump,
+ * to be freed afterwards by page_cache_release() or put_page().
+ *
+ * Returns NULL on any kind of failure - a hole must then be inserted into
+ * the corefile, to preserve alignment with its headers; and also returns
+ * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
+ * allowing a hole to be left in the corefile to save diskspace.
+ *
+ * Called without mmap_sem, but after all other threads have been killed.
+ */
+#ifdef CONFIG_ELF_CORE
+struct page *get_dump_page(unsigned long addr)
+{
+       struct vm_area_struct *vma;
+       struct page *page;
+
+       if (__get_user_pages(current, current->mm, addr, 1,
+                       FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
+               return NULL;
+       flush_cache_page(vma, addr, page_to_pfn(page));
+       return page;
+}
+#endif /* CONFIG_ELF_CORE */
+
 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
                        spinlock_t **ptl)
 {
@@ -1470,7 +1614,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
 
        /* Ok, finally just insert the thing.. */
        get_page(page);
-       inc_mm_counter(mm, file_rss);
+       inc_mm_counter_fast(mm, MM_FILEPAGES);
        page_add_file_rmap(page);
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
@@ -1536,7 +1680,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        /* Ok, finally just insert the thing.. */
        entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+       update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
@@ -1605,7 +1749,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * If we don't have pte special, then we have to use the pfn_valid()
         * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
         * refcount the page if pfn_valid is true (hence insert_page rather
-        * than insert_pfn).
+        * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
+        * without pte special, it would there be refcounted as a normal page.
         */
        if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
                struct page *page;
@@ -1780,10 +1925,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
        token = pmd_pgtable(*pmd);
 
        do {
-               err = fn(pte, token, addr, data);
+               err = fn(pte++, token, addr, data);
                if (err)
                        break;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       } while (addr += PAGE_SIZE, addr != end);
 
        arch_leave_lazy_mmu_mode();
 
@@ -1971,7 +2116,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Take out anonymous pages first, anonymous shared vmas are
         * not dirty accountable.
         */
-       if (PageAnon(old_page)) {
+       if (PageAnon(old_page) && !PageKsm(old_page)) {
                if (!trylock_page(old_page)) {
                        page_cache_get(old_page);
                        pte_unmap_unlock(page_table, ptl);
@@ -1986,6 +2131,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_release(old_page);
                }
                reuse = reuse_swap_page(old_page);
+               if (reuse)
+                       /*
+                        * The page is all ours.  Move it to our anon_vma so
+                        * the rmap code will not search our parent or siblings.
+                        * Protected against the rmap code by the page lock.
+                        */
+                       page_move_anon_rmap(old_page, vma, address);
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
@@ -2058,7 +2210,7 @@ reuse:
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (ptep_set_access_flags(vma, address, page_table, entry,1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache(vma, address, page_table);
                ret |= VM_FAULT_WRITE;
                goto unlock;
        }
@@ -2072,10 +2224,19 @@ gotten:
 
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       VM_BUG_ON(old_page == ZERO_PAGE(0));
-       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-       if (!new_page)
-               goto oom;
+
+       if (is_zero_pfn(pte_pfn(orig_pte))) {
+               new_page = alloc_zeroed_user_highpage_movable(vma, address);
+               if (!new_page)
+                       goto oom;
+       } else {
+               new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+               if (!new_page)
+                       goto oom;
+               cow_user_page(new_page, old_page, address, vma);
+       }
+       __SetPageUptodate(new_page);
+
        /*
         * Don't let another task, with possibly unlocked vma,
         * keep the mlocked page.
@@ -2085,8 +2246,6 @@ gotten:
                clear_page_mlock(old_page);
                unlock_page(old_page);
        }
-       cow_user_page(new_page, old_page, address, vma);
-       __SetPageUptodate(new_page);
 
        if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
@@ -2098,11 +2257,11 @@ gotten:
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
                        if (!PageAnon(old_page)) {
-                               dec_mm_counter(mm, file_rss);
-                               inc_mm_counter(mm, anon_rss);
+                               dec_mm_counter_fast(mm, MM_FILEPAGES);
+                               inc_mm_counter_fast(mm, MM_ANONPAGES);
                        }
                } else
-                       inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter_fast(mm, MM_ANONPAGES);
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2112,10 +2271,15 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush_notify(vma, address, page_table);
+               ptep_clear_flush(vma, address, page_table);
                page_add_new_anon_rmap(new_page, vma, address);
-               set_pte_at(mm, address, page_table, entry);
-               update_mmu_cache(vma, address, entry);
+               /*
+                * We call the notify macro here because, when using secondary
+                * mmu page tables (such as kvm shadow page tables), we want the
+                * new page to be mapped directly into the secondary page table.
+                */
+               set_pte_at_notify(mm, address, page_table, entry);
+               update_mmu_cache(vma, address, page_table);
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
@@ -2357,7 +2521,7 @@ restart:
  * @mapping: the address space containing mmaps to be unmapped.
  * @holebegin: byte in first page to unmap, relative to the start of
  * the underlying file.  This will be rounded down to a PAGE_SIZE
- * boundary.  Note that this is different from vmtruncate(), which
+ * boundary.  Note that this is different from truncate_pagecache(), which
  * must keep the partial page.  In contrast, we must get rid of
  * partial pages.
  * @holelen: size of prospective hole in bytes.  This will be rounded
@@ -2408,63 +2572,6 @@ void unmap_mapping_range(struct address_space *mapping,
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
-/**
- * vmtruncate - unmap mappings "freed" by truncate() syscall
- * @inode: inode of the file used
- * @offset: file offset to start truncating
- *
- * NOTE! We have to be ready to update the memory sharing
- * between the file and the memory map for a potential last
- * incomplete page.  Ugly, but necessary.
- */
-int vmtruncate(struct inode * inode, loff_t offset)
-{
-       if (inode->i_size < offset) {
-               unsigned long limit;
-
-               limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-               if (limit != RLIM_INFINITY && offset > limit)
-                       goto out_sig;
-               if (offset > inode->i_sb->s_maxbytes)
-                       goto out_big;
-               i_size_write(inode, offset);
-       } else {
-               struct address_space *mapping = inode->i_mapping;
-
-               /*
-                * truncation of in-use swapfiles is disallowed - it would
-                * cause subsequent swapout to scribble on the now-freed
-                * blocks.
-                */
-               if (IS_SWAPFILE(inode))
-                       return -ETXTBSY;
-               i_size_write(inode, offset);
-
-               /*
-                * unmap_mapping_range is called twice, first simply for
-                * efficiency so that truncate_inode_pages does fewer
-                * single-page unmaps.  However after this first call, and
-                * before truncate_inode_pages finishes, it is possible for
-                * private pages to be COWed, which remain after
-                * truncate_inode_pages finishes, hence the second
-                * unmap_mapping_range call must be made for correctness.
-                */
-               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-               truncate_inode_pages(mapping, offset);
-               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       }
-
-       if (inode->i_op->truncate)
-               inode->i_op->truncate(inode);
-       return 0;
-
-out_sig:
-       send_sig(SIGXFSZ, current, 0);
-out_big:
-       return -EFBIG;
-}
-EXPORT_SYMBOL(vmtruncate);
-
 int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
 {
        struct address_space *mapping = inode->i_mapping;
@@ -2496,7 +2603,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
  */
 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access, pte_t orig_pte)
+               unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
        struct page *page;
@@ -2509,14 +2616,21 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out;
 
        entry = pte_to_swp_entry(orig_pte);
-       if (is_migration_entry(entry)) {
-               migration_entry_wait(mm, pmd, address);
+       if (unlikely(non_swap_entry(entry))) {
+               if (is_migration_entry(entry)) {
+                       migration_entry_wait(mm, pmd, address);
+               } else if (is_hwpoison_entry(entry)) {
+                       ret = VM_FAULT_HWPOISON;
+               } else {
+                       print_bad_pte(vma, address, orig_pte, NULL);
+                       ret = VM_FAULT_SIGBUS;
+               }
                goto out;
        }
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry);
        if (!page) {
-               grab_swap_token(); /* Contend for token _before_ read-in */
+               grab_swap_token(mm); /* Contend for token _before_ read-in */
                page = swapin_readahead(entry,
                                        GFP_HIGHUSER_MOVABLE, vma, address);
                if (!page) {
@@ -2534,11 +2648,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Had to read the page from swap area: Major fault */
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
+       } else if (PageHWPoison(page)) {
+               /*
+                * hwpoisoned dirty swapcache pages are kept for killing
+                * owner processes (which may be unknown at hwpoison time)
+                */
+               ret = VM_FAULT_HWPOISON;
+               delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+               goto out_release;
        }
 
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
+       page = ksm_might_need_to_copy(page, vma, address);
+       if (!page) {
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                goto out_page;
@@ -2570,11 +2698,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * discarded at swap_free().
         */
 
-       inc_mm_counter(mm, anon_rss);
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
+       dec_mm_counter_fast(mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
-       if (write_access && reuse_swap_page(page)) {
+       if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
-               write_access = 0;
+               flags &= ~FAULT_FLAG_WRITE;
        }
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
@@ -2587,7 +2716,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                try_to_free_swap(page);
        unlock_page(page);
 
-       if (write_access) {
+       if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
                if (ret & VM_FAULT_ERROR)
                        ret &= VM_FAULT_ERROR;
@@ -2595,7 +2724,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, pte);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
 out:
@@ -2605,6 +2734,7 @@ out_nomap:
        pte_unmap_unlock(page_table, ptl);
 out_page:
        unlock_page(page);
+out_release:
        page_cache_release(page);
        return ret;
 }
@@ -2616,12 +2746,22 @@ out_page:
  */
 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access)
+               unsigned int flags)
 {
        struct page *page;
        spinlock_t *ptl;
        pte_t entry;
 
+       if (!(flags & FAULT_FLAG_WRITE)) {
+               entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
+                                               vma->vm_page_prot));
+               ptl = pte_lockptr(mm, pmd);
+               spin_lock(ptl);
+               if (!pte_none(*page_table))
+                       goto unlock;
+               goto setpte;
+       }
+
        /* Allocate our own private page. */
        pte_unmap(page_table);
 
@@ -2636,17 +2776,20 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
-       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       if (vma->vm_flags & VM_WRITE)
+               entry = pte_mkwrite(pte_mkdirty(entry));
 
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!pte_none(*page_table))
                goto release;
-       inc_mm_counter(mm, anon_rss);
+
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, address);
+setpte:
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, entry);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
@@ -2697,6 +2840,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
                return ret;
 
+       if (unlikely(PageHWPoison(vmf.page))) {
+               if (ret & VM_FAULT_LOCKED)
+                       unlock_page(vmf.page);
+               return VM_FAULT_HWPOISON;
+       }
+
        /*
         * For consistency in subsequent calls, make the faulted page always
         * locked.
@@ -2776,7 +2925,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * due to the bad i386 page protection. But it's valid
         * for other architectures too.
         *
-        * Note that if write_access is true, we either now have
+        * Note that if FAULT_FLAG_WRITE is set, we either now have
         * an exclusive copy of the page, or this is a shared mapping,
         * so we can make it writable and dirty to avoid having to
         * handle that later.
@@ -2788,10 +2937,10 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (anon) {
-                       inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter_fast(mm, MM_ANONPAGES);
                        page_add_new_anon_rmap(page, vma, address);
                } else {
-                       inc_mm_counter(mm, file_rss);
+                       inc_mm_counter_fast(mm, MM_FILEPAGES);
                        page_add_file_rmap(page);
                        if (flags & FAULT_FLAG_WRITE) {
                                dirty_page = page;
@@ -2801,7 +2950,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
        } else {
                if (charged)
                        mem_cgroup_uncharge_page(page);
@@ -2847,11 +2996,10 @@ unwritable_page:
 
 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access, pte_t orig_pte)
+               unsigned int flags, pte_t orig_pte)
 {
        pgoff_t pgoff = (((address & PAGE_MASK)
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-       unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
 
        pte_unmap(page_table);
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
@@ -2868,12 +3016,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access, pte_t orig_pte)
+               unsigned int flags, pte_t orig_pte)
 {
-       unsigned int flags = FAULT_FLAG_NONLINEAR |
-                               (write_access ? FAULT_FLAG_WRITE : 0);
        pgoff_t pgoff;
 
+       flags |= FAULT_FLAG_NONLINEAR;
+
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                return 0;
 
@@ -2882,7 +3030,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                 * Page table corrupted: show pte and kill process.
                 */
                print_bad_pte(vma, address, orig_pte, NULL);
-               return VM_FAULT_OOM;
+               return VM_FAULT_SIGBUS;
        }
 
        pgoff = pte_to_pgoff(orig_pte);
@@ -2904,7 +3052,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static inline int handle_pte_fault(struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long address,
-               pte_t *pte, pmd_t *pmd, int write_access)
+               pte_t *pte, pmd_t *pmd, unsigned int flags)
 {
        pte_t entry;
        spinlock_t *ptl;
@@ -2915,31 +3063,31 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                        if (vma->vm_ops) {
                                if (likely(vma->vm_ops->fault))
                                        return do_linear_fault(mm, vma, address,
-                                               pte, pmd, write_access, entry);
+                                               pte, pmd, flags, entry);
                        }
                        return do_anonymous_page(mm, vma, address,
-                                                pte, pmd, write_access);
+                                                pte, pmd, flags);
                }
                if (pte_file(entry))
                        return do_nonlinear_fault(mm, vma, address,
-                                       pte, pmd, write_access, entry);
+                                       pte, pmd, flags, entry);
                return do_swap_page(mm, vma, address,
-                                       pte, pmd, write_access, entry);
+                                       pte, pmd, flags, entry);
        }
 
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        if (unlikely(!pte_same(*pte, entry)))
                goto unlock;
-       if (write_access) {
+       if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry))
                        return do_wp_page(mm, vma, address,
                                        pte, pmd, ptl, entry);
                entry = pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
-               update_mmu_cache(vma, address, entry);
+       if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
+               update_mmu_cache(vma, address, pte);
        } else {
                /*
                 * This is needed only for protection faults but the arch code
@@ -2947,7 +3095,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                 * This still avoids useless tlb flushes for .text page faults
                 * with threads.
                 */
-               if (write_access)
+               if (flags & FAULT_FLAG_WRITE)
                        flush_tlb_page(vma, address);
        }
 unlock:
@@ -2959,7 +3107,7 @@ unlock:
  * By the time we get here, we already hold the mm semaphore
  */
 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, int write_access)
+               unsigned long address, unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -2970,8 +3118,11 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        count_vm_event(PGFAULT);
 
+       /* do counter updates before entering really critical section. */
+       check_sync_rss_stat(current);
+
        if (unlikely(is_vm_hugetlb_page(vma)))
-               return hugetlb_fault(mm, vma, address, write_access);
+               return hugetlb_fault(mm, vma, address, flags);
 
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
@@ -2984,7 +3135,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte)
                return VM_FAULT_OOM;
 
-       return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+       return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
 
 #ifndef __PAGETABLE_PUD_FOLDED
@@ -3140,55 +3291,58 @@ out:
        return -EINVAL;
 }
 
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn)
+{
+       int ret = -EINVAL;
+       spinlock_t *ptl;
+       pte_t *ptep;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+
+       ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+       pte_unmap_unlock(ptep, ptl);
+       return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
 #ifdef CONFIG_HAVE_IOREMAP_PROT
 int follow_phys(struct vm_area_struct *vma,
                unsigned long address, unsigned int flags,
                unsigned long *prot, resource_size_t *phys)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
+       int ret = -EINVAL;
        pte_t *ptep, pte;
        spinlock_t *ptl;
-       resource_size_t phys_addr = 0;
-       struct mm_struct *mm = vma->vm_mm;
-       int ret = -EINVAL;
 
        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
                goto out;
 
-       pgd = pgd_offset(mm, address);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto out;
-
-       pud = pud_offset(pgd, address);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               goto out;
-
-       pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto out;
-
-       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
-       if (pmd_huge(*pmd))
-               goto out;
-
-       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-       if (!ptep)
+       if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
                goto out;
-
        pte = *ptep;
-       if (!pte_present(pte))
-               goto unlock;
+
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
-       phys_addr = pte_pfn(pte);
-       phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
 
        *prot = pgprot_val(pte_pgprot(pte));
-       *phys = phys_addr;
-       ret = 0;
+       *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
 
+       ret = 0;
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out: