ARM: tegra: pm269: sdhci: Support for PM269
[linux-2.6.git] / mm / memory.c
index 9b8a01d..fa2f04e 100644 (file)
@@ -47,7 +47,7 @@
 #include <linux/pagemap.h>
 #include <linux/ksm.h>
 #include <linux/rmap.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/delayacct.h>
 #include <linux/init.h>
 #include <linux/writeback.h>
@@ -293,7 +293,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
        struct mmu_gather_batch *batch;
 
-       tlb->need_flush = 1;
+       VM_BUG_ON(!tlb->need_flush);
 
        if (tlb_fast_mode(tlb)) {
                free_page_and_swap_cache(page);
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                        }
                        if (likely(!non_swap_entry(entry)))
                                rss[MM_SWAPENTS]++;
-                       else if (is_write_migration_entry(entry) &&
-                                       is_cow_mapping(vm_flags)) {
-                               /*
-                                * COW mappings require pages in both parent
-                                * and child to be set to read.
-                                */
-                               make_migration_entry_read(&entry);
-                               pte = swp_entry_to_pte(entry);
-                               set_pte_at(src_mm, addr, src_pte, pte);
+                       else if (is_migration_entry(entry)) {
+                               page = migration_entry_to_page(entry);
+
+                               if (PageAnon(page))
+                                       rss[MM_ANONPAGES]++;
+                               else
+                                       rss[MM_FILEPAGES]++;
+
+                               if (is_write_migration_entry(entry) &&
+                                   is_cow_mapping(vm_flags)) {
+                                       /*
+                                        * COW mappings require pages in both
+                                        * parent and child to be set to read.
+                                        */
+                                       make_migration_entry_read(&entry);
+                                       pte = swp_entry_to_pte(entry);
+                                       set_pte_at(src_mm, addr, src_pte, pte);
+                               }
                        }
                }
                goto out_set_pte;
@@ -1191,6 +1200,16 @@ again:
 
                        if (!non_swap_entry(entry))
                                rss[MM_SWAPENTS]--;
+                       else if (is_migration_entry(entry)) {
+                               struct page *page;
+
+                               page = migration_entry_to_page(entry);
+
+                               if (PageAnon(page))
+                                       rss[MM_ANONPAGES]--;
+                               else
+                                       rss[MM_FILEPAGES]--;
+                       }
                        if (unlikely(!free_swap_and_cache(entry)))
                                print_bad_pte(vma, addr, ptent, NULL);
                }
@@ -1231,7 +1250,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                        if (next-addr != HPAGE_PMD_SIZE) {
                                VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
                                split_huge_page_pmd(vma->vm_mm, pmd);
-                       } else if (zap_huge_pmd(tlb, vma, pmd))
+                       } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                continue;
                        /* fall through */
                }
@@ -1290,13 +1309,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
        return addr;
 }
 
-#ifdef CONFIG_PREEMPT
-# define ZAP_BLOCK_SIZE        (8 * PAGE_SIZE)
-#else
-/* No preempt: go for improved straight-line efficiency */
-# define ZAP_BLOCK_SIZE        (1024 * PAGE_SIZE)
-#endif
-
 /**
  * unmap_vmas - unmap a range of memory covered by a list of vma's
  * @tlb: address of the caller's struct mmu_gather
@@ -1310,10 +1322,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
  *
  * Unmap all pages in the vma list.
  *
- * We aim to not hold locks for too long (for scheduling latency reasons).
- * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
- * return the ending mmu_gather to the caller.
- *
  * Only addresses between `start' and `end' will be unmapped.
  *
  * The VMA list must be sorted in ascending virtual address order.
@@ -1514,7 +1522,7 @@ split_fallthrough:
        }
 
        if (flags & FOLL_GET)
-               get_page(page);
+               get_page_foll(page);
        if (flags & FOLL_TOUCH) {
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
@@ -1816,7 +1824,63 @@ next_page:
 }
 EXPORT_SYMBOL(__get_user_pages);
 
-/**
+/*
+ * fixup_user_fault() - manually resolve a user page fault
+ * @tsk:       the task_struct to use for page fault accounting, or
+ *             NULL if faults are not to be recorded.
+ * @mm:                mm_struct of target mm
+ * @address:   user address
+ * @fault_flags:flags to pass down to handle_mm_fault()
+ *
+ * This is meant to be called in the specific scenario where for locking reasons
+ * we try to access user memory in atomic context (within a pagefault_disable()
+ * section), this returns -EFAULT, and we want to resolve the user fault before
+ * trying again.
+ *
+ * Typically this is meant to be used by the futex code.
+ *
+ * The main difference with get_user_pages() is that this function will
+ * unconditionally call handle_mm_fault() which will in turn perform all the
+ * necessary SW fixup of the dirty and young bits in the PTE, while
+ * handle_mm_fault() only guarantees to update these in the struct page.
+ *
+ * This is important for some architectures where those bits also gate the
+ * access permission to the page because they are maintained in software.  On
+ * such architectures, gup() will not be enough to make a subsequent access
+ * succeed.
+ *
+ * This should be called with the mm_sem held for read.
+ */
+int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+                    unsigned long address, unsigned int fault_flags)
+{
+       struct vm_area_struct *vma;
+       int ret;
+
+       vma = find_extend_vma(mm, address);
+       if (!vma || address < vma->vm_start)
+               return -EFAULT;
+
+       ret = handle_mm_fault(mm, vma, address, fault_flags);
+       if (ret & VM_FAULT_ERROR) {
+               if (ret & VM_FAULT_OOM)
+                       return -ENOMEM;
+               if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+                       return -EHWPOISON;
+               if (ret & VM_FAULT_SIGBUS)
+                       return -EFAULT;
+               BUG();
+       }
+       if (tsk) {
+               if (ret & VM_FAULT_MAJOR)
+                       tsk->maj_flt++;
+               else
+                       tsk->min_flt++;
+       }
+       return 0;
+}
+
+/*
  * get_user_pages() - pin user pages in memory
  * @tsk:       the task_struct to use for page fault accounting, or
  *             NULL if faults are not to be recorded.
@@ -3104,14 +3168,34 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_t *page_table;
        spinlock_t *ptl;
        struct page *page;
+       struct page *cow_page;
        pte_t entry;
        int anon = 0;
-       int charged = 0;
        struct page *dirty_page = NULL;
        struct vm_fault vmf;
        int ret;
        int page_mkwrite = 0;
 
+       /*
+        * If we do COW later, allocate page befor taking lock_page()
+        * on the file cache page. This will reduce lock holding time.
+        */
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+
+               if (unlikely(anon_vma_prepare(vma)))
+                       return VM_FAULT_OOM;
+
+               cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+               if (!cow_page)
+                       return VM_FAULT_OOM;
+
+               if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) {
+                       page_cache_release(cow_page);
+                       return VM_FAULT_OOM;
+               }
+       } else
+               cow_page = NULL;
+
        vmf.virtual_address = (void __user *)(address & PAGE_MASK);
        vmf.pgoff = pgoff;
        vmf.flags = flags;
@@ -3120,12 +3204,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        ret = vma->vm_ops->fault(vma, &vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
                            VM_FAULT_RETRY)))
-               return ret;
+               goto uncharge_out;
 
        if (unlikely(PageHWPoison(vmf.page))) {
                if (ret & VM_FAULT_LOCKED)
                        unlock_page(vmf.page);
-               return VM_FAULT_HWPOISON;
+               ret = VM_FAULT_HWPOISON;
+               goto uncharge_out;
        }
 
        /*
@@ -3143,23 +3228,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        page = vmf.page;
        if (flags & FAULT_FLAG_WRITE) {
                if (!(vma->vm_flags & VM_SHARED)) {
+                       page = cow_page;
                        anon = 1;
-                       if (unlikely(anon_vma_prepare(vma))) {
-                               ret = VM_FAULT_OOM;
-                               goto out;
-                       }
-                       page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
-                                               vma, address);
-                       if (!page) {
-                               ret = VM_FAULT_OOM;
-                               goto out;
-                       }
-                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
-                               ret = VM_FAULT_OOM;
-                               page_cache_release(page);
-                               goto out;
-                       }
-                       charged = 1;
                        copy_user_highpage(page, vmf.page, address, vma);
                        __SetPageUptodate(page);
                } else {
@@ -3228,8 +3298,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, page_table);
        } else {
-               if (charged)
-                       mem_cgroup_uncharge_page(page);
+               if (cow_page)
+                       mem_cgroup_uncharge_page(cow_page);
                if (anon)
                        page_cache_release(page);
                else
@@ -3238,7 +3308,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        pte_unmap_unlock(page_table, ptl);
 
-out:
        if (dirty_page) {
                struct address_space *mapping = page->mapping;
 
@@ -3268,6 +3337,13 @@ out:
 unwritable_page:
        page_cache_release(page);
        return ret;
+uncharge_out:
+       /* fs's fault handler get error */
+       if (cow_page) {
+               mem_cgroup_uncharge_page(cow_page);
+               page_cache_release(cow_page);
+       }
+       return ret;
 }
 
 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,