]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/memory.c
memcg: revert gfp mask fix
[linux-2.6.git] / mm / memory.c
index f82b359b27452dd1f7e682197479fa1391d19a46..e5bfbe6b594cde7662df86d0b5386fe59abfbb86 100644 (file)
 #include <linux/delayacct.h>
 #include <linux/init.h>
 #include <linux/writeback.h>
+#include <linux/memcontrol.h>
+#include <linux/mmu_notifier.h>
+#include <linux/kallsyms.h>
+#include <linux/swapops.h>
+#include <linux/elf.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -57,8 +62,7 @@
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
-#include <linux/swapops.h>
-#include <linux/elf.h>
+#include "internal.h"
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 /* use the per-pgdat data instead for discontigmem - mbligh */
@@ -82,7 +86,18 @@ void * high_memory;
 EXPORT_SYMBOL(num_physpages);
 EXPORT_SYMBOL(high_memory);
 
-int randomize_va_space __read_mostly = 1;
+/*
+ * Randomize the address space (stacks, mmaps, brk, etc.).
+ *
+ * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
+ *   as ancient (libc5 based) binaries can segfault. )
+ */
+int randomize_va_space __read_mostly =
+#ifdef CONFIG_COMPAT_BRK
+                                       1;
+#else
+                                       2;
+#endif
 
 static int __init disable_randmaps(char *s)
 {
@@ -122,11 +137,9 @@ void pmd_clear_bad(pmd_t *pmd)
  */
 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
 {
-       struct page *page = pmd_page(*pmd);
+       pgtable_t token = pmd_pgtable(*pmd);
        pmd_clear(pmd);
-       pte_lock_deinit(page);
-       pte_free_tlb(tlb, page);
-       dec_zone_page_state(page, NR_PAGETABLE);
+       pte_free_tlb(tlb, token);
        tlb->mm->nr_ptes--;
 }
 
@@ -201,7 +214,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  *
  * Must be called with pagetable lock held.
  */
-void free_pgd_range(struct mmu_gather **tlb,
+void free_pgd_range(struct mmu_gather *tlb,
                        unsigned long addr, unsigned long end,
                        unsigned long floor, unsigned long ceiling)
 {
@@ -252,19 +265,16 @@ void free_pgd_range(struct mmu_gather **tlb,
                return;
 
        start = addr;
-       pgd = pgd_offset((*tlb)->mm, addr);
+       pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
+               free_pud_range(tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
-
-       if (!(*tlb)->fullmm)
-               flush_tlb_pgtables((*tlb)->mm, start, end);
 }
 
-void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
+void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long floor, unsigned long ceiling)
 {
        while (vma) {
@@ -300,21 +310,34 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
 
 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 {
-       struct page *new = pte_alloc_one(mm, address);
+       pgtable_t new = pte_alloc_one(mm, address);
        if (!new)
                return -ENOMEM;
 
-       pte_lock_init(new);
+       /*
+        * Ensure all pte setup (eg. pte page lock and page clearing) are
+        * visible before the pte is made visible to other CPUs by being
+        * put into page tables.
+        *
+        * The other side of the story is the pointer chasing in the page
+        * table walking code (when walking the page table without locking;
+        * ie. most of the time). Fortunately, these data accesses consist
+        * of a chain of data-dependent loads, meaning most CPUs (alpha
+        * being the notable exception) will already guarantee loads are
+        * seen in-order. See the alpha page table accessors for the
+        * smp_read_barrier_depends() barriers in page table walking code.
+        */
+       smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
+
        spin_lock(&mm->page_table_lock);
-       if (pmd_present(*pmd)) {        /* Another has populated it */
-               pte_lock_deinit(new);
-               pte_free(new);
-       } else {
+       if (!pmd_present(*pmd)) {       /* Has another populated it ? */
                mm->nr_ptes++;
-               inc_zone_page_state(new, NR_PAGETABLE);
                pmd_populate(mm, pmd, new);
+               new = NULL;
        }
        spin_unlock(&mm->page_table_lock);
+       if (new)
+               pte_free(mm, new);
        return 0;
 }
 
@@ -324,12 +347,16 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&init_mm.page_table_lock);
-       if (pmd_present(*pmd))          /* Another has populated it */
-               pte_free_kernel(new);
-       else
+       if (!pmd_present(*pmd)) {       /* Has another populated it ? */
                pmd_populate_kernel(&init_mm, pmd, new);
+               new = NULL;
+       }
        spin_unlock(&init_mm.page_table_lock);
+       if (new)
+               pte_free_kernel(&init_mm, new);
        return 0;
 }
 
@@ -348,14 +375,65 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  *
  * The calling function must still handle the error.
  */
-void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
-{
-       printk(KERN_ERR "Bad pte = %08llx, process = %s, "
-                       "vm_flags = %lx, vaddr = %lx\n",
-               (long long)pte_val(pte),
-               (vma->vm_mm == current->mm ? current->comm : "???"),
-               vma->vm_flags, vaddr);
+static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+                         pte_t pte, struct page *page)
+{
+       pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+       struct address_space *mapping;
+       pgoff_t index;
+       static unsigned long resume;
+       static unsigned long nr_shown;
+       static unsigned long nr_unshown;
+
+       /*
+        * Allow a burst of 60 reports, then keep quiet for that minute;
+        * or allow a steady drip of one report per second.
+        */
+       if (nr_shown == 60) {
+               if (time_before(jiffies, resume)) {
+                       nr_unshown++;
+                       return;
+               }
+               if (nr_unshown) {
+                       printk(KERN_ALERT
+                               "BUG: Bad page map: %lu messages suppressed\n",
+                               nr_unshown);
+                       nr_unshown = 0;
+               }
+               nr_shown = 0;
+       }
+       if (nr_shown++ == 0)
+               resume = jiffies + 60 * HZ;
+
+       mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
+       index = linear_page_index(vma, addr);
+
+       printk(KERN_ALERT
+               "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
+               current->comm,
+               (long long)pte_val(pte), (long long)pmd_val(*pmd));
+       if (page) {
+               printk(KERN_ALERT
+               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
+               page, (void *)page->flags, page_count(page),
+               page_mapcount(page), page->mapping, page->index);
+       }
+       printk(KERN_ALERT
+               "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+               (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
+       /*
+        * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+        */
+       if (vma->vm_ops)
+               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
+                               (unsigned long)vma->vm_ops->fault);
+       if (vma->vm_file && vma->vm_file->f_op)
+               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
+                               (unsigned long)vma->vm_file->f_op->mmap);
        dump_stack();
+       add_taint(TAINT_BAD_PAGE);
 }
 
 static inline int is_cow_mapping(unsigned int flags)
@@ -364,55 +442,93 @@ static inline int is_cow_mapping(unsigned int flags)
 }
 
 /*
- * This function gets the "struct page" associated with a pte.
+ * vm_normal_page -- This function gets the "struct page" associated with a pte.
+ *
+ * "Special" mappings do not wish to be associated with a "struct page" (either
+ * it doesn't exist, or it exists but they don't want to touch it). In this
+ * case, NULL is returned here. "Normal" mappings do have a struct page.
  *
- * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
- * will have each page table entry just pointing to a raw page frame
- * number, and as far as the VM layer is concerned, those do not have
- * pages associated with them - even if the PFN might point to memory
- * that otherwise is perfectly fine and has a "struct page".
+ * There are 2 broad cases. Firstly, an architecture may define a pte_special()
+ * pte bit, in which case this function is trivial. Secondly, an architecture
+ * may not have a spare pte bit, which requires a more complicated scheme,
+ * described below.
  *
- * The way we recognize those mappings is through the rules set up
- * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
- * and the vm_pgoff will point to the first PFN mapped: thus every
- * page that is a raw mapping will always honor the rule
+ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
+ * special mapping (even if there are underlying and valid "struct pages").
+ * COWed pages of a VM_PFNMAP are always normal.
+ *
+ * The way we recognize COWed pages within VM_PFNMAP mappings is through the
+ * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
+ * set, and the vm_pgoff will point to the first PFN mapped: thus every special
+ * mapping will always honor the rule
  *
  *     pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  *
- * and if that isn't true, the page has been COW'ed (in which case it
- * _does_ have a "struct page" associated with it even if it is in a
- * VM_PFNMAP range).
+ * And for normal mappings this is false.
+ *
+ * This restricts such mappings to be a linear translation from virtual address
+ * to pfn. To get around this restriction, we allow arbitrary mappings so long
+ * as the vma is not a COW mapping; in that case, we know that all ptes are
+ * special (because none can have been COWed).
+ *
+ *
+ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
+ *
+ * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
+ * page" backing, however the difference is that _all_ pages with a struct
+ * page (that is, those where pfn_valid is true) are refcounted and considered
+ * normal pages by the VM. The disadvantage is that pages are refcounted
+ * (which can be slower and simply not an option for some PFNMAP users). The
+ * advantage is that we don't have to follow the strict linearity rule of
+ * PFNMAP mappings in order to support COWable mappings.
+ *
  */
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+#ifdef __HAVE_ARCH_PTE_SPECIAL
+# define HAVE_PTE_SPECIAL 1
+#else
+# define HAVE_PTE_SPECIAL 0
+#endif
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                               pte_t pte)
 {
        unsigned long pfn = pte_pfn(pte);
 
-       if (unlikely(vma->vm_flags & VM_PFNMAP)) {
-               unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
-               if (pfn == vma->vm_pgoff + off)
-                       return NULL;
-               if (!is_cow_mapping(vma->vm_flags))
-                       return NULL;
+       if (HAVE_PTE_SPECIAL) {
+               if (likely(!pte_special(pte)))
+                       goto check_pfn;
+               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+                       print_bad_pte(vma, addr, pte, NULL);
+               return NULL;
        }
 
-       /*
-        * Add some anal sanity checks for now. Eventually,
-        * we should just do "return pfn_to_page(pfn)", but
-        * in the meantime we check that we get a valid pfn,
-        * and that the resulting page looks ok.
-        */
-       if (unlikely(!pfn_valid(pfn))) {
-               print_bad_pte(vma, pte, addr);
+       /* !HAVE_PTE_SPECIAL case follows: */
+
+       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+               if (vma->vm_flags & VM_MIXEDMAP) {
+                       if (!pfn_valid(pfn))
+                               return NULL;
+                       goto out;
+               } else {
+                       unsigned long off;
+                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
+                       if (pfn == vma->vm_pgoff + off)
+                               return NULL;
+                       if (!is_cow_mapping(vma->vm_flags))
+                               return NULL;
+               }
+       }
+
+check_pfn:
+       if (unlikely(pfn > highest_memmap_pfn)) {
+               print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
 
        /*
-        * NOTE! We still have PageReserved() pages in the page 
-        * tables. 
-        *
-        * The PAGE_ZERO() pages and various VDSO mappings can
-        * cause them to exist.
+        * NOTE! We still have PageReserved() pages in the page tables.
+        * eg. VDSO mappings can cause them to exist.
         */
+out:
        return pfn_to_page(pfn);
 }
 
@@ -514,8 +630,7 @@ again:
                if (progress >= 32) {
                        progress = 0;
                        if (need_resched() ||
-                           need_lockbreak(src_ptl) ||
-                           need_lockbreak(dst_ptl))
+                           spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
                                break;
                }
                if (pte_none(*src_pte)) {
@@ -588,6 +703,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        unsigned long next;
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
+       int ret;
 
        /*
         * Don't copy ptes where a page fault will fill them correctly.
@@ -603,17 +719,43 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
+       if (unlikely(is_pfn_mapping(vma))) {
+               /*
+                * We do not free on error cases below as remove_vma
+                * gets called on error from higher level routine
+                */
+               ret = track_pfn_vma_copy(vma);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * We need to invalidate the secondary MMU mappings only when
+        * there could be a permission downgrade on the ptes of the
+        * parent mm. And a permission downgrade will only happen if
+        * is_cow_mapping() returns true.
+        */
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_start(src_mm, addr, end);
+
+       ret = 0;
        dst_pgd = pgd_offset(dst_mm, addr);
        src_pgd = pgd_offset(src_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(src_pgd))
                        continue;
-               if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
-                                               vma, addr, next))
-                       return -ENOMEM;
+               if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
+                                           vma, addr, next))) {
+                       ret = -ENOMEM;
+                       break;
+               }
        } while (dst_pgd++, src_pgd++, addr = next, addr != end);
-       return 0;
+
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_end(src_mm,
+                                                 vma->vm_start, end);
+       return ret;
 }
 
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -675,11 +817,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
-                               if (pte_young(ptent))
-                                       SetPageReferenced(page);
+                               if (pte_young(ptent) &&
+                                   likely(!VM_SequentialReadHint(vma)))
+                                       mark_page_accessed(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
+                       if (unlikely(page_mapcount(page) < 0))
+                               print_bad_pte(vma, addr, ptent, page);
                        tlb_remove_page(tlb, page);
                        continue;
                }
@@ -689,8 +834,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                 */
                if (unlikely(details))
                        continue;
-               if (!pte_file(ptent))
-                       free_swap_and_cache(pte_to_swp_entry(ptent));
+               if (pte_file(ptent)) {
+                       if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               } else if
+                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
+                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
@@ -817,7 +966,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
        unsigned long start = start_addr;
        spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
        int fullmm = (*tlbp)->fullmm;
+       struct mm_struct *mm = vma->vm_mm;
 
+       mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
        for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
                unsigned long end;
 
@@ -831,6 +982,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                if (vma->vm_flags & VM_ACCOUNT)
                        *nr_accounted += (end - start) >> PAGE_SHIFT;
 
+               if (unlikely(is_pfn_mapping(vma)))
+                       untrack_pfn_vma(vma, 0, 0);
+
                while (start != end) {
                        if (!tlb_start_valid) {
                                tlb_start = start;
@@ -838,9 +992,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        }
 
                        if (unlikely(is_vm_hugetlb_page(vma))) {
-                               unmap_hugepage_range(vma, start, end);
-                               zap_work -= (end - start) /
-                                               (HPAGE_SIZE / PAGE_SIZE);
+                               /*
+                                * It is undesirable to test vma->vm_file as it
+                                * should be non-null for valid hugetlb area.
+                                * However, vm_file will be NULL in the error
+                                * cleanup path of do_mmap_pgoff. When
+                                * hugetlbfs ->mmap method fails,
+                                * do_mmap_pgoff() nullifies vma->vm_file
+                                * before calling this function to clean up.
+                                * Since no pte has actually been setup, it is
+                                * safe to do nothing in this case.
+                                */
+                               if (vma->vm_file) {
+                                       unmap_hugepage_range(vma, start, end, NULL);
+                                       zap_work -= (end - start) /
+                                       pages_per_huge_page(hstate_vma(vma));
+                               }
+
                                start = end;
                        } else
                                start = unmap_page_range(*tlbp, vma,
@@ -854,7 +1022,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        tlb_finish_mmu(*tlbp, tlb_start, start);
 
                        if (need_resched() ||
-                               (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
+                               (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
                                if (i_mmap_lock) {
                                        *tlbp = NULL;
                                        goto out;
@@ -868,6 +1036,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                }
        }
 out:
+       mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
        return start;   /* which is now the end (or restart) address */
 }
 
@@ -895,6 +1064,29 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
        return end;
 }
 
+/**
+ * zap_vma_ptes - remove ptes mapping the vma
+ * @vma: vm_area_struct holding ptes to be zapped
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
+ *
+ * This function only unmaps ptes assigned to VM_PFNMAP vmas.
+ *
+ * The entire address range must be fully contained within the vma.
+ *
+ * Returns 0 if successful.
+ */
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+               unsigned long size)
+{
+       if (address < vma->vm_start || address + size > vma->vm_end ||
+                       !(vma->vm_flags & VM_PFNMAP))
+               return -1;
+       zap_page_range(vma, address, size, NULL);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(zap_vma_ptes);
+
 /*
  * Do a quick page-table lookup for a single page.
  */
@@ -921,31 +1113,37 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page_table;
 
        pud = pud_offset(pgd, address);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+       if (pud_none(*pud))
                goto no_page_table;
-       
-       pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+       if (pud_huge(*pud)) {
+               BUG_ON(flags & FOLL_GET);
+               page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
+               goto out;
+       }
+       if (unlikely(pud_bad(*pud)))
                goto no_page_table;
 
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd))
+               goto no_page_table;
        if (pmd_huge(*pmd)) {
                BUG_ON(flags & FOLL_GET);
                page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
                goto out;
        }
+       if (unlikely(pmd_bad(*pmd)))
+               goto no_page_table;
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-       if (!ptep)
-               goto out;
 
        pte = *ptep;
        if (!pte_present(pte))
-               goto unlock;
+               goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
        page = vm_normal_page(vma, address, pte);
        if (unlikely(!page))
-               goto unlock;
+               goto bad_page;
 
        if (flags & FOLL_GET)
                get_page(page);
@@ -960,13 +1158,22 @@ unlock:
 out:
        return page;
 
+bad_page:
+       pte_unmap_unlock(ptep, ptl);
+       return ERR_PTR(-EFAULT);
+
+no_page:
+       pte_unmap_unlock(ptep, ptl);
+       if (!pte_none(pte))
+               return page;
+       /* Fall through to ZERO_PAGE handling */
 no_page_table:
        /*
         * When core dumping an enormous anonymous area that nobody
         * has touched so far, we don't want to allocate page tables.
         */
        if (flags & FOLL_ANON) {
-               page = ZERO_PAGE(address);
+               page = ZERO_PAGE(0);
                if (flags & FOLL_GET)
                        get_page(page);
                BUG_ON(flags & FOLL_WRITE);
@@ -974,13 +1181,39 @@ no_page_table:
        return page;
 }
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int write, int force,
+/* Can we do the FOLL_ANON optimization? */
+static inline int use_zero_page(struct vm_area_struct *vma)
+{
+       /*
+        * We don't want to optimize FOLL_ANON for make_pages_present()
+        * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
+        * we want to get the page from the page tables to make sure
+        * that we serialize and update with any other user of that
+        * mapping.
+        */
+       if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
+               return 0;
+       /*
+        * And if we have a fault routine, it's not an anonymous region.
+        */
+       return !vma->vm_ops || !vma->vm_ops->fault;
+}
+
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                    unsigned long start, int len, int flags,
                struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags;
+       unsigned int vm_flags = 0;
+       int write = !!(flags & GUP_FLAGS_WRITE);
+       int force = !!(flags & GUP_FLAGS_FORCE);
+       int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
+       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
 
+       if (len <= 0)
+               return 0;
        /* 
         * Require read or write permissions.
         * If 'force' is set, we only require the "MAY" flags.
@@ -1001,7 +1234,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pud_t *pud;
                        pmd_t *pmd;
                        pte_t *pte;
-                       if (write) /* user gate pages are read-only */
+
+                       /* user gate pages are read-only */
+                       if (!ignore && write)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1033,34 +1268,36 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
-               if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
-                               || !(vm_flags & vma->vm_flags))
+               if (!vma ||
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   (!ignore && !(vm_flags & vma->vm_flags)))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &len, i);
+                                               &start, &len, i, write);
                        continue;
                }
 
                foll_flags = FOLL_TOUCH;
                if (pages)
                        foll_flags |= FOLL_GET;
-               if (!write && !(vma->vm_flags & VM_LOCKED) &&
-                   (!vma->vm_ops || (!vma->vm_ops->nopage &&
-                                       !vma->vm_ops->fault)))
+               if (!write && use_zero_page(vma))
                        foll_flags |= FOLL_ANON;
 
                do {
                        struct page *page;
 
                        /*
-                        * If tsk is ooming, cut off its access to large memory
-                        * allocations. It has a pending SIGKILL, but it can't
-                        * be processed until returning to user space.
+                        * If we have a pending SIGKILL, don't keep faulting
+                        * pages and potentially allocating memory, unless
+                        * current is handling munlock--e.g., on exit. In
+                        * that case, we are not allocating memory.  Rather,
+                        * we're only unlocking already resident/mapped pages.
                         */
-                       if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return -ENOMEM;
+                       if (unlikely(!ignore_sigkill &&
+                                       fatal_signal_pending(current)))
+                               return i ? i : -ERESTARTSYS;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1087,13 +1324,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
-                                * page lookups as if they were reads.
+                                * page lookups as if they were reads. But only
+                                * do so when looping for pte_write is futile:
+                                * in some cases userspace may also be wanting
+                                * to write to the gotten user page, which a
+                                * read fault here might prevent (a readonly
+                                * page might get reCOWed by userspace write).
                                 */
-                               if (ret & VM_FAULT_WRITE)
+                               if ((ret & VM_FAULT_WRITE) &&
+                                   !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
 
                                cond_resched();
                        }
+                       if (IS_ERR(page))
+                               return i ? i : PTR_ERR(page);
                        if (pages) {
                                pages[i] = page;
 
@@ -1109,98 +1354,27 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        } while (len);
        return i;
 }
-EXPORT_SYMBOL(get_user_pages);
-
-static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
-                       unsigned long addr, unsigned long end, pgprot_t prot)
-{
-       pte_t *pte;
-       spinlock_t *ptl;
-       int err = 0;
-
-       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
-       if (!pte)
-               return -EAGAIN;
-       arch_enter_lazy_mmu_mode();
-       do {
-               struct page *page = ZERO_PAGE(addr);
-               pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
-
-               if (unlikely(!pte_none(*pte))) {
-                       err = -EEXIST;
-                       pte++;
-                       break;
-               }
-               page_cache_get(page);
-               page_add_file_rmap(page);
-               inc_mm_counter(mm, file_rss);
-               set_pte_at(mm, addr, pte, zero_pte);
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-       arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(pte - 1, ptl);
-       return err;
-}
 
-static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
-                       unsigned long addr, unsigned long end, pgprot_t prot)
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long start, int len, int write, int force,
+               struct page **pages, struct vm_area_struct **vmas)
 {
-       pmd_t *pmd;
-       unsigned long next;
-       int err;
+       int flags = 0;
 
-       pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd)
-               return -EAGAIN;
-       do {
-               next = pmd_addr_end(addr, end);
-               err = zeromap_pte_range(mm, pmd, addr, next, prot);
-               if (err)
-                       break;
-       } while (pmd++, addr = next, addr != end);
-       return err;
-}
+       if (write)
+               flags |= GUP_FLAGS_WRITE;
+       if (force)
+               flags |= GUP_FLAGS_FORCE;
 
-static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
-                       unsigned long addr, unsigned long end, pgprot_t prot)
-{
-       pud_t *pud;
-       unsigned long next;
-       int err;
-
-       pud = pud_alloc(mm, pgd, addr);
-       if (!pud)
-               return -EAGAIN;
-       do {
-               next = pud_addr_end(addr, end);
-               err = zeromap_pmd_range(mm, pud, addr, next, prot);
-               if (err)
-                       break;
-       } while (pud++, addr = next, addr != end);
-       return err;
+       return __get_user_pages(tsk, mm,
+                               start, len, flags,
+                               pages, vmas);
 }
 
-int zeromap_page_range(struct vm_area_struct *vma,
-                       unsigned long addr, unsigned long size, pgprot_t prot)
-{
-       pgd_t *pgd;
-       unsigned long next;
-       unsigned long end = addr + size;
-       struct mm_struct *mm = vma->vm_mm;
-       int err;
-
-       BUG_ON(addr >= end);
-       pgd = pgd_offset(mm, addr);
-       flush_cache_range(vma, addr, end);
-       do {
-               next = pgd_addr_end(addr, end);
-               err = zeromap_pud_range(mm, pgd, addr, next, prot);
-               if (err)
-                       break;
-       } while (pgd++, addr = next, addr != end);
-       return err;
-}
+EXPORT_SYMBOL(get_user_pages);
 
-pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
+pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+                       spinlock_t **ptl)
 {
        pgd_t * pgd = pgd_offset(mm, addr);
        pud_t * pud = pud_alloc(mm, pgd, addr);
@@ -1219,11 +1393,13 @@ pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlo
  * old drivers should use this, and they needed to mark their
  * pages reserved for the old functions anyway.
  */
-static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page, pgprot_t prot)
 {
+       struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte;
-       spinlock_t *ptl;  
+       spinlock_t *ptl;
 
        retval = -EINVAL;
        if (PageAnon(page))
@@ -1244,6 +1420,8 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
        retval = 0;
+       pte_unmap_unlock(pte, ptl);
+       return retval;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
 out:
@@ -1272,40 +1450,26 @@ out:
  *
  * The page does not need to be reserved.
  */
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page)
 {
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
        vma->vm_flags |= VM_INSERTPAGE;
-       return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+       return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
 
-/**
- * vm_insert_pfn - insert single pfn into user vma
- * @vma: user vma to map to
- * @addr: target user address of this page
- * @pfn: source kernel pfn
- *
- * Similar to vm_inert_page, this allows drivers to insert individual pages
- * they've allocated into a user vma. Same comments apply.
- *
- * This function should only be called from a vm_ops->fault handler, and
- * in that case the handler should return NULL.
- */
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-               unsigned long pfn)
+static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn, pgprot_t prot)
 {
        struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte, entry;
        spinlock_t *ptl;
 
-       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
-       BUG_ON(is_cow_mapping(vma->vm_flags));
-
        retval = -ENOMEM;
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
@@ -1315,19 +1479,88 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                goto out_unlock;
 
        /* Ok, finally just insert the thing.. */
-       entry = pfn_pte(pfn, vma->vm_page_prot);
+       entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry);
+       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
-
 out:
        return retval;
 }
+
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops->fault handler, and
+ * in that case the handler should return NULL.
+ *
+ * vma cannot be a COW mapping.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       int ret;
+       /*
+        * Technically, architectures with pte_special can avoid all these
+        * restrictions (same for remap_pfn_range).  However we would like
+        * consistency in testing and feature parity among all, so we should
+        * try to keep these invariants in place for everybody.
+        */
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+                                               (VM_PFNMAP|VM_MIXEDMAP));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+               return -EINVAL;
+
+       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+
+       if (ret)
+               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
+
+       return ret;
+}
 EXPORT_SYMBOL(vm_insert_pfn);
 
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+
+       /*
+        * If we don't have pte special, then we have to use the pfn_valid()
+        * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+        * refcount the page if pfn_valid is true (hence insert_page rather
+        * than insert_pfn).
+        */
+       if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+               struct page *page;
+
+               page = pfn_to_page(pfn);
+               return insert_page(vma, addr, page, vma->vm_page_prot);
+       }
+       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_mixed);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1346,7 +1579,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
        arch_enter_lazy_mmu_mode();
        do {
                BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
+               set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
@@ -1431,14 +1664,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (is_cow_mapping(vma->vm_flags)) {
-               if (addr != vma->vm_start || end != vma->vm_end)
-                       return -EINVAL;
+       if (addr == vma->vm_start && end == vma->vm_end)
                vma->vm_pgoff = pfn;
-       }
+       else if (is_cow_mapping(vma->vm_flags))
+               return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
+       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
+       if (err)
+               return -EINVAL;
+
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
        pgd = pgd_offset(mm, addr);
@@ -1450,6 +1686,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+
+       if (err)
+               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -1460,7 +1700,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 {
        pte_t *pte;
        int err;
-       struct page *pmd_page;
+       pgtable_t token;
        spinlock_t *uninitialized_var(ptl);
 
        pte = (mm == &init_mm) ?
@@ -1471,14 +1711,18 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
        BUG_ON(pmd_huge(*pmd));
 
-       pmd_page = pmd_page(*pmd);
+       arch_enter_lazy_mmu_mode();
+
+       token = pmd_pgtable(*pmd);
 
        do {
-               err = fn(pte, pmd_page, addr, data);
+               err = fn(pte, token, addr, data);
                if (err)
                        break;
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
+       arch_leave_lazy_mmu_mode();
+
        if (mm != &init_mm)
                pte_unmap_unlock(pte-1, ptl);
        return err;
@@ -1492,6 +1736,8 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
        unsigned long next;
        int err;
 
+       BUG_ON(pud_huge(*pud));
+
        pmd = pmd_alloc(mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
@@ -1533,10 +1779,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long end = addr + size;
+       unsigned long start = addr, end = addr + size;
        int err;
 
        BUG_ON(addr >= end);
+       mmu_notifier_invalidate_range_start(mm, start, end);
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1544,6 +1791,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        return err;
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1608,10 +1856,8 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                        memset(kaddr, 0, PAGE_SIZE);
                kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(dst);
-               return;
-
-       }
-       copy_user_highpage(dst, src, va, vma);
+       } else
+               copy_user_highpage(dst, src, va, vma);
 }
 
 /*
@@ -1643,18 +1889,40 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *dirty_page = NULL;
 
        old_page = vm_normal_page(vma, address, orig_pte);
-       if (!old_page)
+       if (!old_page) {
+               /*
+                * VM_MIXEDMAP !pfn_valid() case
+                *
+                * We should not cow pages in a shared writeable mapping.
+                * Just mark the pages writable as we can't do any dirty
+                * accounting on raw pfn maps.
+                */
+               if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+                                    (VM_WRITE|VM_SHARED))
+                       goto reuse;
                goto gotten;
+       }
 
        /*
         * Take out anonymous pages first, anonymous shared vmas are
         * not dirty accountable.
         */
        if (PageAnon(old_page)) {
-               if (!TestSetPageLocked(old_page)) {
-                       reuse = can_share_swap_page(old_page);
-                       unlock_page(old_page);
+               if (!trylock_page(old_page)) {
+                       page_cache_get(old_page);
+                       pte_unmap_unlock(page_table, ptl);
+                       lock_page(old_page);
+                       page_table = pte_offset_map_lock(mm, pmd, address,
+                                                        &ptl);
+                       if (!pte_same(*page_table, orig_pte)) {
+                               unlock_page(old_page);
+                               page_cache_release(old_page);
+                               goto unlock;
+                       }
+                       page_cache_release(old_page);
                }
+               reuse = reuse_swap_page(old_page);
+               unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
                /*
@@ -1697,13 +1965,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        if (reuse) {
+reuse:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
+               if (ptep_set_access_flags(vma, address, page_table, entry,1))
                        update_mmu_cache(vma, address, entry);
-                       lazy_mmu_prot_update(entry);
-               }
                ret |= VM_FAULT_WRITE;
                goto unlock;
        }
@@ -1717,16 +1984,24 @@ gotten:
 
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
-       if (old_page == ZERO_PAGE(address)) {
-               new_page = alloc_zeroed_user_highpage_movable(vma, address);
-               if (!new_page)
-                       goto oom;
-       } else {
-               new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
-               if (!new_page)
-                       goto oom;
-               cow_user_page(new_page, old_page, address, vma);
+       VM_BUG_ON(old_page == ZERO_PAGE(0));
+       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+       if (!new_page)
+               goto oom;
+       /*
+        * Don't let another task, with possibly unlocked vma,
+        * keep the mlocked page.
+        */
+       if (vma->vm_flags & VM_LOCKED) {
+               lock_page(old_page);    /* for LRU manipulation */
+               clear_page_mlock(old_page);
+               unlock_page(old_page);
        }
+       cow_user_page(new_page, old_page, address, vma);
+       __SetPageUptodate(new_page);
+
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
+               goto oom_free_new;
 
        /*
         * Re-check the pte - we dropped the lock
@@ -1734,7 +2009,6 @@ gotten:
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
-                       page_remove_rmap(old_page, vma);
                        if (!PageAnon(old_page)) {
                                dec_mm_counter(mm, file_rss);
                                inc_mm_counter(mm, anon_rss);
@@ -1744,23 +2018,48 @@ gotten:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               lazy_mmu_prot_update(entry);
                /*
                 * Clear the pte entry and flush it first, before updating the
                 * pte with the new entry. This will avoid a race condition
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush(vma, address, page_table);
+               ptep_clear_flush_notify(vma, address, page_table);
+               page_add_new_anon_rmap(new_page, vma, address);
                set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
-               lru_cache_add_active(new_page);
-               page_add_new_anon_rmap(new_page, vma, address);
+               if (old_page) {
+                       /*
+                        * Only after switching the pte to the new page may
+                        * we remove the mapcount here. Otherwise another
+                        * process may come and find the rmap count decremented
+                        * before the pte is switched to the new page, and
+                        * "reuse" the old page writing into it while our pte
+                        * here still points into it and can be read by other
+                        * threads.
+                        *
+                        * The critical issue is to order this
+                        * page_remove_rmap with the ptp_clear_flush above.
+                        * Those stores are ordered by (if nothing else,)
+                        * the barrier present in the atomic_add_negative
+                        * in page_remove_rmap.
+                        *
+                        * Then the TLB flush in ptep_clear_flush ensures that
+                        * no process can access the old page before the
+                        * decremented mapcount is visible. And the old page
+                        * cannot be reused until after the decremented
+                        * mapcount is visible. So transitively, TLBs to
+                        * old page will be flushed before it can be reused.
+                        */
+                       page_remove_rmap(old_page);
+               }
 
                /* Free the old page.. */
                new_page = old_page;
                ret |= VM_FAULT_WRITE;
-       }
+       } else
+               mem_cgroup_uncharge_page(new_page);
+
        if (new_page)
                page_cache_release(new_page);
        if (old_page)
@@ -1768,6 +2067,9 @@ gotten:
 unlock:
        pte_unmap_unlock(page_table, ptl);
        if (dirty_page) {
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+
                /*
                 * Yes, Virginia, this is actually required to prevent a race
                 * with clear_page_dirty_for_io() from clearing the page dirty
@@ -1781,6 +2083,8 @@ unlock:
                put_page(dirty_page);
        }
        return ret;
+oom_free_new:
+       page_cache_release(new_page);
 oom:
        if (old_page)
                page_cache_release(old_page);
@@ -1863,8 +2167,7 @@ again:
 
        restart_addr = zap_page_range(vma, start_addr,
                                        end_addr - start_addr, details);
-       need_break = need_resched() ||
-                       need_lockbreak(details->i_mmap_lock);
+       need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
 
        if (restart_addr >= end_addr) {
                /* We have now completed this vma: mark it so */
@@ -2006,50 +2309,49 @@ EXPORT_SYMBOL(unmap_mapping_range);
  */
 int vmtruncate(struct inode * inode, loff_t offset)
 {
-       struct address_space *mapping = inode->i_mapping;
-       unsigned long limit;
+       if (inode->i_size < offset) {
+               unsigned long limit;
 
-       if (inode->i_size < offset)
-               goto do_expand;
-       /*
-        * truncation of in-use swapfiles is disallowed - it would cause
-        * subsequent swapout to scribble on the now-freed blocks.
-        */
-       if (IS_SWAPFILE(inode))
-               goto out_busy;
-       i_size_write(inode, offset);
+               limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+               if (limit != RLIM_INFINITY && offset > limit)
+                       goto out_sig;
+               if (offset > inode->i_sb->s_maxbytes)
+                       goto out_big;
+               i_size_write(inode, offset);
+       } else {
+               struct address_space *mapping = inode->i_mapping;
 
-       /*
-        * unmap_mapping_range is called twice, first simply for efficiency
-        * so that truncate_inode_pages does fewer single-page unmaps. However
-        * after this first call, and before truncate_inode_pages finishes,
-        * it is possible for private pages to be COWed, which remain after
-        * truncate_inode_pages finishes, hence the second unmap_mapping_range
-        * call must be made for correctness.
-        */
-       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       truncate_inode_pages(mapping, offset);
-       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
-       goto out_truncate;
-
-do_expand:
-       limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-       if (limit != RLIM_INFINITY && offset > limit)
-               goto out_sig;
-       if (offset > inode->i_sb->s_maxbytes)
-               goto out_big;
-       i_size_write(inode, offset);
-
-out_truncate:
-       if (inode->i_op && inode->i_op->truncate)
+               /*
+                * truncation of in-use swapfiles is disallowed - it would
+                * cause subsequent swapout to scribble on the now-freed
+                * blocks.
+                */
+               if (IS_SWAPFILE(inode))
+                       return -ETXTBSY;
+               i_size_write(inode, offset);
+
+               /*
+                * unmap_mapping_range is called twice, first simply for
+                * efficiency so that truncate_inode_pages does fewer
+                * single-page unmaps.  However after this first call, and
+                * before truncate_inode_pages finishes, it is possible for
+                * private pages to be COWed, which remain after
+                * truncate_inode_pages finishes, hence the second
+                * unmap_mapping_range call must be made for correctness.
+                */
+               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+               truncate_inode_pages(mapping, offset);
+               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+       }
+
+       if (inode->i_op->truncate)
                inode->i_op->truncate(inode);
        return 0;
+
 out_sig:
        send_sig(SIGXFSZ, current, 0);
 out_big:
        return -EFBIG;
-out_busy:
-       return -ETXTBSY;
 }
 EXPORT_SYMBOL(vmtruncate);
 
@@ -2062,7 +2364,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
         * a way to truncate a range of blocks (punch a hole) -
         * we should return failure right now.
         */
-       if (!inode->i_op || !inode->i_op->truncate_range)
+       if (!inode->i_op->truncate_range)
                return -ENOSYS;
 
        mutex_lock(&inode->i_mutex);
@@ -2077,67 +2379,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
        return 0;
 }
 
-/**
- * swapin_readahead - swap in pages in hope we need them soon
- * @entry: swap entry of this memory
- * @addr: address to start
- * @vma: user vma this addresses belong to
- *
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time.  We also make sure to queue
- * the 'original' request together with the readahead ones...
- *
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
- */
-void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
-{
-#ifdef CONFIG_NUMA
-       struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
-#endif
-       int i, num;
-       struct page *new_page;
-       unsigned long offset;
-
-       /*
-        * Get the number of handles we should do readahead io to.
-        */
-       num = valid_swaphandles(entry, &offset);
-       for (i = 0; i < num; offset++, i++) {
-               /* Ok, do the async read-ahead now */
-               new_page = read_swap_cache_async(swp_entry(swp_type(entry),
-                                                          offset), vma, addr);
-               if (!new_page)
-                       break;
-               page_cache_release(new_page);
-#ifdef CONFIG_NUMA
-               /*
-                * Find the next applicable VMA for the NUMA policy.
-                */
-               addr += PAGE_SIZE;
-               if (addr == 0)
-                       vma = NULL;
-               if (vma) {
-                       if (addr >= vma->vm_end) {
-                               vma = next_vma;
-                               next_vma = vma ? vma->vm_next : NULL;
-                       }
-                       if (vma && addr < vma->vm_start)
-                               vma = NULL;
-               } else {
-                       if (next_vma && addr >= next_vma->vm_start) {
-                               vma = next_vma;
-                               next_vma = vma->vm_next;
-                       }
-               }
-#endif
-       }
-       lru_add_drain();        /* Push any new pages onto the LRU now */
-}
-
 /*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
@@ -2151,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        swp_entry_t entry;
        pte_t pte;
+       struct mem_cgroup *ptr = NULL;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2165,8 +2407,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page = lookup_swap_cache(entry);
        if (!page) {
                grab_swap_token(); /* Contend for token _before_ read-in */
-               swapin_readahead(entry, address, vma);
-               page = read_swap_cache_async(entry, vma, address);
+               page = swapin_readahead(entry,
+                                       GFP_HIGHUSER_MOVABLE, vma, address);
                if (!page) {
                        /*
                         * Back out if somebody else faulted in this pte
@@ -2184,9 +2426,16 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
-       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
        mark_page_accessed(page);
+
        lock_page(page);
+       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+
+       if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
+               ret = VM_FAULT_OOM;
+               unlock_page(page);
+               goto out;
+       }
 
        /*
         * Back out if somebody else already faulted in this pte.
@@ -2200,11 +2449,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_nomap;
        }
 
-       /* The page isn't present yet, go ahead with the fault. */
+       /*
+        * The page isn't present yet, go ahead with the fault.
+        *
+        * Be careful about the sequence of operations here.
+        * To get its accounting right, reuse_swap_page() must be called
+        * while the page is counted on swap but not yet in mapcount i.e.
+        * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
+        * must be called after the swap_free(), or it will never succeed.
+        * And mem_cgroup_commit_charge_swapin(), which uses the swp_entry
+        * in page->private, must be called before reuse_swap_page(),
+        * which may delete_from_swap_cache().
+        */
 
+       mem_cgroup_commit_charge_swapin(page, ptr);
        inc_mm_counter(mm, anon_rss);
        pte = mk_pte(page, vma->vm_page_prot);
-       if (write_access && can_share_swap_page(page)) {
+       if (write_access && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                write_access = 0;
        }
@@ -2214,15 +2475,14 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page_add_anon_rmap(page, vma, address);
 
        swap_free(entry);
-       if (vm_swap_full())
-               remove_exclusive_swap_page(page);
+       if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+               try_to_free_swap(page);
        unlock_page(page);
 
        if (write_access) {
-               /* XXX: We could OR the do_wp_page code with this one? */
-               if (do_wp_page(mm, vma, address,
-                               page_table, pmd, ptl, pte) & VM_FAULT_OOM)
-                       ret = VM_FAULT_OOM;
+               ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
+               if (ret & VM_FAULT_ERROR)
+                       ret &= VM_FAULT_ERROR;
                goto out;
        }
 
@@ -2233,6 +2493,7 @@ unlock:
 out:
        return ret;
 out_nomap:
+       mem_cgroup_cancel_charge_swapin(ptr);
        pte_unmap_unlock(page_table, ptl);
        unlock_page(page);
        page_cache_release(page);
@@ -2252,50 +2513,40 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
-       if (write_access) {
-               /* Allocate our own private page. */
-               pte_unmap(page_table);
-
-               if (unlikely(anon_vma_prepare(vma)))
-                       goto oom;
-               page = alloc_zeroed_user_highpage_movable(vma, address);
-               if (!page)
-                       goto oom;
+       /* Allocate our own private page. */
+       pte_unmap(page_table);
 
-               entry = mk_pte(page, vma->vm_page_prot);
-               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       if (unlikely(anon_vma_prepare(vma)))
+               goto oom;
+       page = alloc_zeroed_user_highpage_movable(vma, address);
+       if (!page)
+               goto oom;
+       __SetPageUptodate(page);
 
-               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-               if (!pte_none(*page_table))
-                       goto release;
-               inc_mm_counter(mm, anon_rss);
-               lru_cache_add_active(page);
-               page_add_new_anon_rmap(page, vma, address);
-       } else {
-               /* Map the ZERO_PAGE - vm_page_prot is readonly */
-               page = ZERO_PAGE(address);
-               page_cache_get(page);
-               entry = mk_pte(page, vma->vm_page_prot);
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
+               goto oom_free_page;
 
-               ptl = pte_lockptr(mm, pmd);
-               spin_lock(ptl);
-               if (!pte_none(*page_table))
-                       goto release;
-               inc_mm_counter(mm, file_rss);
-               page_add_file_rmap(page);
-       }
+       entry = mk_pte(page, vma->vm_page_prot);
+       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (!pte_none(*page_table))
+               goto release;
+       inc_mm_counter(mm, anon_rss);
+       page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(vma, address, entry);
-       lazy_mmu_prot_update(entry);
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
 release:
+       mem_cgroup_uncharge_page(page);
        page_cache_release(page);
        goto unlock;
+oom_free_page:
+       page_cache_release(page);
 oom:
        return VM_FAULT_OOM;
 }
@@ -2322,6 +2573,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        pte_t entry;
        int anon = 0;
+       int charged = 0;
        struct page *dirty_page = NULL;
        struct vm_fault vmf;
        int ret;
@@ -2332,22 +2584,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.flags = flags;
        vmf.page = NULL;
 
-       BUG_ON(vma->vm_flags & VM_PFNMAP);
-
-       if (likely(vma->vm_ops->fault)) {
-               ret = vma->vm_ops->fault(vma, &vmf);
-               if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
-                       return ret;
-       } else {
-               /* Legacy ->nopage path */
-               ret = 0;
-               vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
-               /* no page was available -- either SIGBUS or OOM */
-               if (unlikely(vmf.page == NOPAGE_SIGBUS))
-                       return VM_FAULT_SIGBUS;
-               else if (unlikely(vmf.page == NOPAGE_OOM))
-                       return VM_FAULT_OOM;
-       }
+       ret = vma->vm_ops->fault(vma, &vmf);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+               return ret;
 
        /*
         * For consistency in subsequent calls, make the faulted page always
@@ -2375,7 +2614,20 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+                               ret = VM_FAULT_OOM;
+                               page_cache_release(page);
+                               goto out;
+                       }
+                       charged = 1;
+                       /*
+                        * Don't let another task, with possibly unlocked vma,
+                        * keep the mlocked page.
+                        */
+                       if (vma->vm_flags & VM_LOCKED)
+                               clear_page_mlock(vmf.page);
                        copy_user_highpage(page, vmf.page, address, vma);
+                       __SetPageUptodate(page);
                } else {
                        /*
                         * If the page will be shareable, see if the backing
@@ -2426,11 +2678,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
                if (anon) {
-                        inc_mm_counter(mm, anon_rss);
-                        lru_cache_add_active(page);
-                        page_add_new_anon_rmap(page, vma, address);
+                       inc_mm_counter(mm, anon_rss);
+                       page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(page);
@@ -2439,11 +2689,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                get_page(dirty_page);
                        }
                }
+               set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
-               lazy_mmu_prot_update(entry);
        } else {
+               if (charged)
+                       mem_cgroup_uncharge_page(page);
                if (anon)
                        page_cache_release(page);
                else
@@ -2458,6 +2710,9 @@ out_unlocked:
        if (anon)
                page_cache_release(vmf.page);
        else if (dirty_page) {
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+
                set_page_dirty_balance(dirty_page, page_mkwrite);
                put_page(dirty_page);
        }
@@ -2470,63 +2725,13 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                int write_access, pte_t orig_pte)
 {
        pgoff_t pgoff = (((address & PAGE_MASK)
-                       - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
+                       - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
 
        pte_unmap(page_table);
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
-
-/*
- * do_no_pfn() tries to create a new page mapping for a page without
- * a struct_page backing it
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
- *
- * It is expected that the ->nopfn handler always returns the same pfn
- * for a given virtual mapping.
- *
- * Mark this `noinline' to prevent it from bloating the main pagefault code.
- */
-static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
-                    unsigned long address, pte_t *page_table, pmd_t *pmd,
-                    int write_access)
-{
-       spinlock_t *ptl;
-       pte_t entry;
-       unsigned long pfn;
-
-       pte_unmap(page_table);
-       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
-       BUG_ON(is_cow_mapping(vma->vm_flags));
-
-       pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
-       if (unlikely(pfn == NOPFN_OOM))
-               return VM_FAULT_OOM;
-       else if (unlikely(pfn == NOPFN_SIGBUS))
-               return VM_FAULT_SIGBUS;
-       else if (unlikely(pfn == NOPFN_REFAULT))
-               return 0;
-
-       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-
-       /* Only go through if we didn't race with anybody else... */
-       if (pte_none(*page_table)) {
-               entry = pfn_pte(pfn, vma->vm_page_prot);
-               if (write_access)
-                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
-       }
-       pte_unmap_unlock(page_table, ptl);
-       return 0;
-}
-
 /*
  * Fault of a previously existing named mapping. Repopulate the pte
  * from the encoded file_pte if possible. This enables swappable
@@ -2547,12 +2752,11 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                return 0;
 
-       if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
-                       !(vma->vm_flags & VM_CAN_NONLINEAR))) {
+       if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
                /*
                 * Page table corrupted: show pte and kill process.
                 */
-               print_bad_pte(vma, orig_pte, address);
+               print_bad_pte(vma, address, orig_pte, NULL);
                return VM_FAULT_OOM;
        }
 
@@ -2584,12 +2788,9 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
-                               if (vma->vm_ops->fault || vma->vm_ops->nopage)
+                               if (likely(vma->vm_ops->fault))
                                        return do_linear_fault(mm, vma, address,
                                                pte, pmd, write_access, entry);
-                               if (unlikely(vma->vm_ops->nopfn))
-                                       return do_no_pfn(mm, vma, address, pte,
-                                                        pmd, write_access);
                        }
                        return do_anonymous_page(mm, vma, address,
                                                 pte, pmd, write_access);
@@ -2614,7 +2815,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        entry = pte_mkyoung(entry);
        if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
                update_mmu_cache(vma, address, entry);
-               lazy_mmu_prot_update(entry);
        } else {
                /*
                 * This is needed only for protection faults but the arch code
@@ -2673,9 +2873,11 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd))          /* Another has populated it */
-               pud_free(new);
+               pud_free(mm, new);
        else
                pgd_populate(mm, pgd, new);
        spin_unlock(&mm->page_table_lock);
@@ -2694,15 +2896,17 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (pud_present(*pud))          /* Another has populated it */
-               pmd_free(new);
+               pmd_free(mm, new);
        else
                pud_populate(mm, pud, new);
 #else
        if (pgd_present(*pud))          /* Another has populated it */
-               pmd_free(new);
+               pmd_free(mm, new);
        else
                pgd_populate(mm, pud, new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */
@@ -2718,7 +2922,7 @@ int make_pages_present(unsigned long addr, unsigned long end)
 
        vma = find_vma(current->mm, addr);
        if (!vma)
-               return -1;
+               return -ENOMEM;
        write = (vma->vm_flags & VM_WRITE) != 0;
        BUG_ON(addr >= end);
        BUG_ON(end > vma->vm_end);
@@ -2727,49 +2931,9 @@ int make_pages_present(unsigned long addr, unsigned long end)
                        len, write, 0, NULL, NULL);
        if (ret < 0)
                return ret;
-       return ret == len ? 0 : -1;
+       return ret == len ? 0 : -EFAULT;
 }
 
-/* 
- * Map a vmalloc()-space virtual address to the physical page.
- */
-struct page * vmalloc_to_page(void * vmalloc_addr)
-{
-       unsigned long addr = (unsigned long) vmalloc_addr;
-       struct page *page = NULL;
-       pgd_t *pgd = pgd_offset_k(addr);
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep, pte;
-  
-       if (!pgd_none(*pgd)) {
-               pud = pud_offset(pgd, addr);
-               if (!pud_none(*pud)) {
-                       pmd = pmd_offset(pud, addr);
-                       if (!pmd_none(*pmd)) {
-                               ptep = pte_offset_map(pmd, addr);
-                               pte = *ptep;
-                               if (pte_present(pte))
-                                       page = pte_page(pte);
-                               pte_unmap(ptep);
-                       }
-               }
-       }
-       return page;
-}
-
-EXPORT_SYMBOL(vmalloc_to_page);
-
-/*
- * Map a vmalloc()-space virtual address to the physical page frame number.
- */
-unsigned long vmalloc_to_pfn(void * vmalloc_addr)
-{
-       return page_to_pfn(vmalloc_to_page(vmalloc_addr));
-}
-
-EXPORT_SYMBOL(vmalloc_to_pfn);
-
 #if !defined(__HAVE_ARCH_GATE_AREA)
 
 #if defined(AT_SYSINFO_EHDR)
@@ -2814,6 +2978,83 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+       resource_size_t phys_addr = 0;
+       struct mm_struct *mm = vma->vm_mm;
+       int ret = -EINVAL;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               goto out;
+
+       pud = pud_offset(pgd, address);
+       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+               goto out;
+
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               goto out;
+
+       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
+       if (pmd_huge(*pmd))
+               goto out;
+
+       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (!ptep)
+               goto out;
+
+       pte = *ptep;
+       if (!pte_present(pte))
+               goto unlock;
+       if ((flags & FOLL_WRITE) && !pte_write(pte))
+               goto unlock;
+       phys_addr = pte_pfn(pte);
+       phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
+
+       *prot = pgprot_val(pte_pgprot(pte));
+       *phys = phys_addr;
+       ret = 0;
+
+unlock:
+       pte_unmap_unlock(ptep, ptl);
+out:
+       return ret;
+}
+
+int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+                       void *buf, int len, int write)
+{
+       resource_size_t phys_addr;
+       unsigned long prot = 0;
+       void __iomem *maddr;
+       int offset = addr & (PAGE_SIZE-1);
+
+       if (follow_phys(vma, addr, write, &prot, &phys_addr))
+               return -EINVAL;
+
+       maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+       if (write)
+               memcpy_toio(maddr + offset, buf, len);
+       else
+               memcpy_fromio(buf, maddr + offset, len);
+       iounmap(maddr);
+
+       return len;
+}
+#endif
+
 /*
  * Access another process' address space.
  * Source/target buffer must be kernel space,
@@ -2823,7 +3064,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
 {
        struct mm_struct *mm;
        struct vm_area_struct *vma;
-       struct page *page;
        void *old_buf = buf;
 
        mm = get_task_mm(tsk);
@@ -2831,32 +3071,48 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
                return 0;
 
        down_read(&mm->mmap_sem);
-       /* ignore errors, just check how much was sucessfully transfered */
+       /* ignore errors, just check how much was successfully transferred */
        while (len) {
                int bytes, ret, offset;
                void *maddr;
+               struct page *page = NULL;
 
                ret = get_user_pages(tsk, mm, addr, 1,
                                write, 1, &page, &vma);
-               if (ret <= 0)
-                       break;
-
-               bytes = len;
-               offset = addr & (PAGE_SIZE-1);
-               if (bytes > PAGE_SIZE-offset)
-                       bytes = PAGE_SIZE-offset;
-
-               maddr = kmap(page);
-               if (write) {
-                       copy_to_user_page(vma, page, addr,
-                                         maddr + offset, buf, bytes);
-                       set_page_dirty_lock(page);
+               if (ret <= 0) {
+                       /*
+                        * Check if this is a VM_IO | VM_PFNMAP VMA, which
+                        * we can access using slightly different code.
+                        */
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+                       vma = find_vma(mm, addr);
+                       if (!vma)
+                               break;
+                       if (vma->vm_ops && vma->vm_ops->access)
+                               ret = vma->vm_ops->access(vma, addr, buf,
+                                                         len, write);
+                       if (ret <= 0)
+#endif
+                               break;
+                       bytes = ret;
                } else {
-                       copy_from_user_page(vma, page, addr,
-                                           buf, maddr + offset, bytes);
+                       bytes = len;
+                       offset = addr & (PAGE_SIZE-1);
+                       if (bytes > PAGE_SIZE-offset)
+                               bytes = PAGE_SIZE-offset;
+
+                       maddr = kmap(page);
+                       if (write) {
+                               copy_to_user_page(vma, page, addr,
+                                                 maddr + offset, buf, bytes);
+                               set_page_dirty_lock(page);
+                       } else {
+                               copy_from_user_page(vma, page, addr,
+                                                   buf, maddr + offset, bytes);
+                       }
+                       kunmap(page);
+                       page_cache_release(page);
                }
-               kunmap(page);
-               page_cache_release(page);
                len -= bytes;
                buf += bytes;
                addr += bytes;
@@ -2866,4 +3122,56 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
 
        return buf - old_buf;
 }
-EXPORT_SYMBOL_GPL(access_process_vm);
+
+/*
+ * Print the name of a VMA.
+ */
+void print_vma_addr(char *prefix, unsigned long ip)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+
+       /*
+        * Do not print if we are in atomic
+        * contexts (in exception stacks, etc.):
+        */
+       if (preempt_count())
+               return;
+
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, ip);
+       if (vma && vma->vm_file) {
+               struct file *f = vma->vm_file;
+               char *buf = (char *)__get_free_page(GFP_KERNEL);
+               if (buf) {
+                       char *p, *s;
+
+                       p = d_path(&f->f_path, buf, PAGE_SIZE);
+                       if (IS_ERR(p))
+                               p = "?";
+                       s = strrchr(p, '/');
+                       if (s)
+                               p = s+1;
+                       printk("%s%s[%lx+%lx]", prefix, p,
+                                       vma->vm_start,
+                                       vma->vm_end - vma->vm_start);
+                       free_page((unsigned long)buf);
+               }
+       }
+       up_read(&current->mm->mmap_sem);
+}
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void)
+{
+       might_sleep();
+       /*
+        * it would be nicer only to annotate paths which are not under
+        * pagefault_disable, however that requires a larger audit and
+        * providing helpers like get_user_atomic.
+        */
+       if (!in_atomic() && current->mm)
+               might_lock_read(&current->mm->mmap_sem);
+}
+EXPORT_SYMBOL(might_fault);
+#endif