mm: make stack guard page logic use vm_prev pointer
[linux-2.6.git] / mm / memory.c
index 1c9dc46..2ed2267 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/kallsyms.h>
 #include <linux/swapops.h>
 #include <linux/elf.h>
+#include <linux/gfp.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -121,6 +122,77 @@ static int __init init_zero_pfn(void)
 }
 core_initcall(init_zero_pfn);
 
+
+#if defined(SPLIT_RSS_COUNTING)
+
+static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm)
+{
+       int i;
+
+       for (i = 0; i < NR_MM_COUNTERS; i++) {
+               if (task->rss_stat.count[i]) {
+                       add_mm_counter(mm, i, task->rss_stat.count[i]);
+                       task->rss_stat.count[i] = 0;
+               }
+       }
+       task->rss_stat.events = 0;
+}
+
+static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
+{
+       struct task_struct *task = current;
+
+       if (likely(task->mm == mm))
+               task->rss_stat.count[member] += val;
+       else
+               add_mm_counter(mm, member, val);
+}
+#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
+#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
+
+/* sync counter once per 64 page faults */
+#define TASK_RSS_EVENTS_THRESH (64)
+static void check_sync_rss_stat(struct task_struct *task)
+{
+       if (unlikely(task != current))
+               return;
+       if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
+               __sync_task_rss_stat(task, task->mm);
+}
+
+unsigned long get_mm_counter(struct mm_struct *mm, int member)
+{
+       long val = 0;
+
+       /*
+        * Don't use task->mm here...for avoiding to use task_get_mm()..
+        * The caller must guarantee task->mm is not invalid.
+        */
+       val = atomic_long_read(&mm->rss_stat.count[member]);
+       /*
+        * counter is updated in asynchronous manner and may go to minus.
+        * But it's never be expected number for users.
+        */
+       if (val < 0)
+               return 0;
+       return (unsigned long)val;
+}
+
+void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
+{
+       __sync_task_rss_stat(task, mm);
+}
+#else
+
+#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
+#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
+
+static void check_sync_rss_stat(struct task_struct *task)
+{
+}
+
+#endif
+
 /*
  * If a p?d_bad entry is found while walking page tables, report
  * the error, before resetting entry to p?d_none.  Usually (but
@@ -235,7 +307,6 @@ void free_pgd_range(struct mmu_gather *tlb,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long start;
 
        /*
         * The next few lines have given us lots of grief...
@@ -279,7 +350,6 @@ void free_pgd_range(struct mmu_gather *tlb,
        if (addr > end - 1)
                return;
 
-       start = addr;
        pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -300,7 +370,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 * Hide vma from rmap and truncate_pagecache before freeing
                 * pgtables
                 */
-               anon_vma_unlink(vma);
+               unlink_anon_vmas(vma);
                unlink_file_vma(vma);
 
                if (is_vm_hugetlb_page(vma)) {
@@ -314,7 +384,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                               && !is_vm_hugetlb_page(next)) {
                                vma = next;
                                next = vma->vm_next;
-                               anon_vma_unlink(vma);
+                               unlink_anon_vmas(vma);
                                unlink_file_vma(vma);
                        }
                        free_pgd_range(tlb, addr, vma->vm_end,
@@ -376,12 +446,20 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
        return 0;
 }
 
-static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
+static inline void init_rss_vec(int *rss)
 {
-       if (file_rss)
-               add_mm_counter(mm, file_rss, file_rss);
-       if (anon_rss)
-               add_mm_counter(mm, anon_rss, anon_rss);
+       memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
+}
+
+static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
+{
+       int i;
+
+       if (current->mm == mm)
+               sync_mm_rss(current, mm);
+       for (i = 0; i < NR_MM_COUNTERS; i++)
+               if (rss[i])
+                       add_mm_counter(mm, i, rss[i]);
 }
 
 /*
@@ -430,12 +508,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
                "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
                current->comm,
                (long long)pte_val(pte), (long long)pmd_val(*pmd));
-       if (page) {
-               printk(KERN_ALERT
-               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
-               page, (void *)page->flags, page_count(page),
-               page_mapcount(page), page->mapping, page->index);
-       }
+       if (page)
+               dump_page(page);
        printk(KERN_ALERT
                "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
                (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
@@ -597,7 +671,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                                 &src_mm->mmlist);
                                spin_unlock(&mmlist_lock);
                        }
-                       if (is_write_migration_entry(entry) &&
+                       if (likely(!non_swap_entry(entry)))
+                               rss[MM_SWAPENTS]++;
+                       else if (is_write_migration_entry(entry) &&
                                        is_cow_mapping(vm_flags)) {
                                /*
                                 * COW mappings require pages in both parent
@@ -632,7 +708,10 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (page) {
                get_page(page);
                page_dup_rmap(page);
-               rss[PageAnon(page)]++;
+               if (PageAnon(page))
+                       rss[MM_ANONPAGES]++;
+               else
+                       rss[MM_FILEPAGES]++;
        }
 
 out_set_pte:
@@ -648,11 +727,12 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pte_t *src_pte, *dst_pte;
        spinlock_t *src_ptl, *dst_ptl;
        int progress = 0;
-       int rss[2];
+       int rss[NR_MM_COUNTERS];
        swp_entry_t entry = (swp_entry_t){0};
 
 again:
-       rss[1] = rss[0] = 0;
+       init_rss_vec(rss);
+
        dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
        if (!dst_pte)
                return -ENOMEM;
@@ -688,7 +768,7 @@ again:
        arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
        pte_unmap_nested(orig_src_pte);
-       add_mm_rss(dst_mm, rss[0], rss[1]);
+       add_mm_rss_vec(dst_mm, rss);
        pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
 
@@ -816,8 +896,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        struct mm_struct *mm = tlb->mm;
        pte_t *pte;
        spinlock_t *ptl;
-       int file_rss = 0;
-       int anon_rss = 0;
+       int rss[NR_MM_COUNTERS];
+
+       init_rss_vec(rss);
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
@@ -863,14 +944,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                set_pte_at(mm, addr, pte,
                                           pgoff_to_pte(page->index));
                        if (PageAnon(page))
-                               anon_rss--;
+                               rss[MM_ANONPAGES]--;
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
                                if (pte_young(ptent) &&
                                    likely(!VM_SequentialReadHint(vma)))
                                        mark_page_accessed(page);
-                               file_rss--;
+                               rss[MM_FILEPAGES]--;
                        }
                        page_remove_rmap(page);
                        if (unlikely(page_mapcount(page) < 0))
@@ -887,13 +968,18 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                if (pte_file(ptent)) {
                        if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
                                print_bad_pte(vma, addr, ptent, NULL);
-               } else if
-                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
-                       print_bad_pte(vma, addr, ptent, NULL);
+               } else {
+                       swp_entry_t entry = pte_to_swp_entry(ptent);
+
+                       if (!non_swap_entry(entry))
+                               rss[MM_SWAPENTS]--;
+                       if (unlikely(!free_swap_and_cache(entry)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               }
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
-       add_mm_rss(mm, file_rss, anon_rss);
+       add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
 
@@ -956,6 +1042,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                details = NULL;
 
        BUG_ON(addr >= end);
+       mem_cgroup_uncharge_start();
        tlb_start_vma(tlb, vma);
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
@@ -968,6 +1055,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
                                                zap_work, details);
        } while (pgd++, addr = next, (addr != end && *zap_work > 0));
        tlb_end_vma(tlb, vma);
+       mem_cgroup_uncharge_end();
 
        return addr;
 }
@@ -1137,8 +1225,17 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
-/*
- * Do a quick page-table lookup for a single page.
+/**
+ * follow_page - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
  */
 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                        unsigned int flags)
@@ -1295,10 +1392,20 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                return i ? : -EFAULT;
                        }
                        if (pages) {
-                               struct page *page = vm_normal_page(gate_vma, start, *pte);
+                               struct page *page;
+
+                               page = vm_normal_page(gate_vma, start, *pte);
+                               if (!page) {
+                                       if (!(gup_flags & FOLL_DUMP) &&
+                                            is_zero_pfn(pte_pfn(*pte)))
+                                               page = pte_page(*pte);
+                                       else {
+                                               pte_unmap(pte);
+                                               return i ? : -EFAULT;
+                                       }
+                               }
                                pages[i] = page;
-                               if (page)
-                                       get_page(page);
+                               get_page(page);
                        }
                        pte_unmap(pte);
                        if (vmas)
@@ -1525,7 +1632,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
 
        /* Ok, finally just insert the thing.. */
        get_page(page);
-       inc_mm_counter(mm, file_rss);
+       inc_mm_counter_fast(mm, MM_FILEPAGES);
        page_add_file_rmap(page);
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
@@ -1591,7 +1698,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        /* Ok, finally just insert the thing.. */
        entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+       update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
@@ -1899,11 +2006,10 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long start = addr, end = addr + size;
+       unsigned long end = addr + size;
        int err;
 
        BUG_ON(addr >= end);
-       mmu_notifier_invalidate_range_start(mm, start, end);
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1911,7 +2017,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       mmu_notifier_invalidate_range_end(mm, start, end);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -2042,6 +2148,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_release(old_page);
                }
                reuse = reuse_swap_page(old_page);
+               if (reuse)
+                       /*
+                        * The page is all ours.  Move it to our anon_vma so
+                        * the rmap code will not search our parent or siblings.
+                        * Protected against the rmap code by the page lock.
+                        */
+                       page_move_anon_rmap(old_page, vma, address);
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
@@ -2114,7 +2227,7 @@ reuse:
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (ptep_set_access_flags(vma, address, page_table, entry,1))
-                       update_mmu_cache(vma, address, entry);
+                       update_mmu_cache(vma, address, page_table);
                ret |= VM_FAULT_WRITE;
                goto unlock;
        }
@@ -2161,11 +2274,11 @@ gotten:
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
                        if (!PageAnon(old_page)) {
-                               dec_mm_counter(mm, file_rss);
-                               inc_mm_counter(mm, anon_rss);
+                               dec_mm_counter_fast(mm, MM_FILEPAGES);
+                               inc_mm_counter_fast(mm, MM_ANONPAGES);
                        }
                } else
-                       inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter_fast(mm, MM_ANONPAGES);
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2183,7 +2296,7 @@ gotten:
                 * new page to be mapped directly into the secondary page table.
                 */
                set_pte_at_notify(mm, address, page_table, entry);
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
@@ -2514,6 +2627,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        swp_entry_t entry;
        pte_t pte;
        struct mem_cgroup *ptr = NULL;
+       int exclusive = 0;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2553,6 +2667,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
        } else if (PageHWPoison(page)) {
+               /*
+                * hwpoisoned dirty swapcache pages are kept for killing
+                * owner processes (which may be unknown at hwpoison time)
+                */
                ret = VM_FAULT_HWPOISON;
                delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
                goto out_release;
@@ -2561,6 +2679,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
+       page = ksm_might_need_to_copy(page, vma, address);
+       if (!page) {
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
        if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                goto out_page;
@@ -2592,15 +2716,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * discarded at swap_free().
         */
 
-       inc_mm_counter(mm, anon_rss);
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
+       dec_mm_counter_fast(mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
        if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                flags &= ~FAULT_FLAG_WRITE;
+               ret |= VM_FAULT_WRITE;
+               exclusive = 1;
        }
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
-       page_add_anon_rmap(page, vma, address);
+       do_page_add_anon_rmap(page, vma, address, exclusive);
        /* It's better to call commit-charge after rmap is established */
        mem_cgroup_commit_charge_swapin(page, ptr);
 
@@ -2617,7 +2744,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, pte);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
 out:
@@ -2633,6 +2760,33 @@ out_release:
 }
 
 /*
+ * This is like a special single-page "expand_downwards()",
+ * except we must first make sure that 'address-PAGE_SIZE'
+ * doesn't hit another vma.
+ *
+ * The "find_vma()" will do the right thing even if we wrap
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+       address &= PAGE_MASK;
+       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+               struct vm_area_struct *prev = vma->vm_prev;
+
+               /*
+                * Is there a mapping abutting this one below?
+                *
+                * That's only ok if it's the same stack mapping
+                * that has gotten split..
+                */
+               if (prev && prev->vm_end == address)
+                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+               expand_stack(vma, address - PAGE_SIZE);
+       }
+       return 0;
+}
+
+/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2645,19 +2799,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        spinlock_t *ptl;
        pte_t entry;
 
+       pte_unmap(page_table);
+
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, address) < 0)
+               return VM_FAULT_SIGBUS;
+
+       /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
                                                vma->vm_page_prot));
-               ptl = pte_lockptr(mm, pmd);
-               spin_lock(ptl);
+               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
                if (!pte_none(*page_table))
                        goto unlock;
                goto setpte;
        }
 
        /* Allocate our own private page. */
-       pte_unmap(page_table);
-
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
        page = alloc_zeroed_user_highpage_movable(vma, address);
@@ -2676,13 +2834,13 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_none(*page_table))
                goto release;
 
-       inc_mm_counter(mm, anon_rss);
+       inc_mm_counter_fast(mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, address);
 setpte:
        set_pte_at(mm, address, page_table, entry);
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, address, entry);
+       update_mmu_cache(vma, address, page_table);
 unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
@@ -2830,10 +2988,10 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (anon) {
-                       inc_mm_counter(mm, anon_rss);
+                       inc_mm_counter_fast(mm, MM_ANONPAGES);
                        page_add_new_anon_rmap(page, vma, address);
                } else {
-                       inc_mm_counter(mm, file_rss);
+                       inc_mm_counter_fast(mm, MM_FILEPAGES);
                        page_add_file_rmap(page);
                        if (flags & FAULT_FLAG_WRITE) {
                                dirty_page = page;
@@ -2843,7 +3001,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, page_table);
        } else {
                if (charged)
                        mem_cgroup_uncharge_page(page);
@@ -2980,7 +3138,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        }
        entry = pte_mkyoung(entry);
        if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, pte);
        } else {
                /*
                 * This is needed only for protection faults but the arch code
@@ -3011,6 +3169,9 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        count_vm_event(PGFAULT);
 
+       /* do counter updates before entering really critical section. */
+       check_sync_rss_stat(current);
+
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);