oom: fix integer overflow of points in oom_badness
[linux-2.6.git] / mm / hugetlb.c
index 636be5d..73f17c0 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/hugetlb.h>
 #include <linux/node.h>
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * must either hold the mmap_sem for write, or the mmap_sem for read and
  * the hugetlb_instantiation mutex:
  *
- *     down_write(&mm->mmap_sem);
+ *     down_write(&mm->mmap_sem);
  * or
- *     down_read(&mm->mmap_sem);
- *     mutex_lock(&hugetlb_instantiation_mutex);
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
        struct list_head link;
@@ -146,7 +146,7 @@ static long region_chg(struct list_head *head, long f, long t)
                if (rg->from > t)
                        return chg;
 
-               /* We overlap with this area, if it extends futher than
+               /* We overlap with this area, if it extends further than
                 * us then we must extend ourselves.  Account for its
                 * existing reservation. */
                if (rg->to > t) {
@@ -394,71 +394,6 @@ static int vma_has_reserves(struct vm_area_struct *vma)
        return 0;
 }
 
-static void clear_gigantic_page(struct page *page,
-                       unsigned long addr, unsigned long sz)
-{
-       int i;
-       struct page *p = page;
-
-       might_sleep();
-       for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
-               cond_resched();
-               clear_user_highpage(p, addr + i * PAGE_SIZE);
-       }
-}
-static void clear_huge_page(struct page *page,
-                       unsigned long addr, unsigned long sz)
-{
-       int i;
-
-       if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
-               clear_gigantic_page(page, addr, sz);
-               return;
-       }
-
-       might_sleep();
-       for (i = 0; i < sz/PAGE_SIZE; i++) {
-               cond_resched();
-               clear_user_highpage(page + i, addr + i * PAGE_SIZE);
-       }
-}
-
-static void copy_user_gigantic_page(struct page *dst, struct page *src,
-                          unsigned long addr, struct vm_area_struct *vma)
-{
-       int i;
-       struct hstate *h = hstate_vma(vma);
-       struct page *dst_base = dst;
-       struct page *src_base = src;
-
-       for (i = 0; i < pages_per_huge_page(h); ) {
-               cond_resched();
-               copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
-
-               i++;
-               dst = mem_map_next(dst, dst_base, i);
-               src = mem_map_next(src, src_base, i);
-       }
-}
-
-static void copy_user_huge_page(struct page *dst, struct page *src,
-                          unsigned long addr, struct vm_area_struct *vma)
-{
-       int i;
-       struct hstate *h = hstate_vma(vma);
-
-       if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
-               copy_user_gigantic_page(dst, src, addr, vma);
-               return;
-       }
-
-       might_sleep();
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               cond_resched();
-               copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
-       }
-}
-
 static void copy_gigantic_page(struct page *dst, struct page *src)
 {
        int i;
@@ -540,7 +475,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
 
        /* If reserves cannot be used, ensure enough pages are in the pool */
        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
-               goto err;;
+               goto err;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                MAX_NR_ZONES - 1, nodemask) {
@@ -568,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
-                               1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
-                               1 << PG_private | 1<< PG_writeback);
+               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+                               1 << PG_referenced | 1 << PG_dirty |
+                               1 << PG_active | 1 << PG_reserved |
+                               1 << PG_private | 1 << PG_writeback);
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
@@ -640,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -656,7 +593,6 @@ int PageHuge(struct page *page)
 
        return dtor == free_huge_page;
 }
-
 EXPORT_SYMBOL_GPL(PageHuge);
 
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
@@ -907,7 +843,7 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
 }
 
 /*
- * Increase the hugetlb pool such that it can accomodate a reservation
+ * Increase the hugetlb pool such that it can accommodate a reservation
  * of size 'delta'.
  */
 static int gather_surplus_pages(struct hstate *h, int delta)
@@ -955,7 +891,7 @@ retry:
 
        /*
         * The surplus_list now contains _at_least_ the number of extra pages
-        * needed to accomodate the reservation.  Add the appropriate number
+        * needed to accommodate the reservation.  Add the appropriate number
         * of pages to the hugetlb pool and free the extras back to the buddy
         * allocator.  Commit the entire reservation here to prevent another
         * process from stealing the pages as they are added to the pool but
@@ -1098,10 +1034,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
-               return ERR_PTR(chg);
+               return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
                if (hugetlb_get_quota(inode->i_mapping, chg))
-                       return ERR_PTR(-ENOSPC);
+                       return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1170,12 +1106,28 @@ static void __init gather_bootmem_prealloc(void)
        struct huge_bootmem_page *m;
 
        list_for_each_entry(m, &huge_boot_pages, list) {
-               struct page *page = virt_to_page(m);
                struct hstate *h = m->hstate;
+               struct page *page;
+
+#ifdef CONFIG_HIGHMEM
+               page = pfn_to_page(m->phys >> PAGE_SHIFT);
+               free_bootmem_late((unsigned long)m,
+                                 sizeof(struct huge_bootmem_page));
+#else
+               page = virt_to_page(m);
+#endif
                __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
+               /*
+                * If we had gigantic hugepages allocated at boot time, we need
+                * to restore the 'stolen' pages to totalram_pages in order to
+                * fix confusing memory reports from free(1) and another
+                * side-effects, like CommitLimit going negative.
+                */
+               if (h->order > (MAX_ORDER - 1))
+                       totalram_pages += 1 << h->order;
        }
 }
 
@@ -1428,6 +1380,7 @@ static ssize_t nr_hugepages_show_common(struct kobject *kobj,
 
        return sprintf(buf, "%lu\n", nr_huge_pages);
 }
+
 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                        struct kobject *kobj, struct kobj_attribute *attr,
                        const char *buf, size_t len)
@@ -1440,9 +1393,14 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
 
        err = strict_strtoul(buf, 10, &count);
        if (err)
-               return 0;
+               goto out;
 
        h = kobj_to_hstate(kobj, &nid);
+       if (h->order >= MAX_ORDER) {
+               err = -EINVAL;
+               goto out;
+       }
+
        if (nid == NUMA_NO_NODE) {
                /*
                 * global hstate attribute
@@ -1468,6 +1426,9 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
                NODEMASK_FREE(nodes_allowed);
 
        return len;
+out:
+       NODEMASK_FREE(nodes_allowed);
+       return err;
 }
 
 static ssize_t nr_hugepages_show(struct kobject *kobj,
@@ -1510,6 +1471,7 @@ static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
        struct hstate *h = kobj_to_hstate(kobj, NULL);
        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
 }
+
 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
                struct kobj_attribute *attr, const char *buf, size_t count)
 {
@@ -1517,9 +1479,12 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
        unsigned long input;
        struct hstate *h = kobj_to_hstate(kobj, NULL);
 
+       if (h->order >= MAX_ORDER)
+               return -EINVAL;
+
        err = strict_strtoul(buf, 10, &input);
        if (err)
-               return 0;
+               return err;
 
        spin_lock(&hugetlb_lock);
        h->nr_overcommit_huge_pages = input;
@@ -1922,13 +1887,18 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
 {
        struct hstate *h = &default_hstate;
        unsigned long tmp;
+       int ret;
+
+       tmp = h->max_huge_pages;
 
-       if (!write)
-               tmp = h->max_huge_pages;
+       if (write && h->order >= MAX_ORDER)
+               return -EINVAL;
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
-       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
 
        if (write) {
                NODEMASK_ALLOC(nodemask_t, nodes_allowed,
@@ -1943,8 +1913,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
                if (nodes_allowed != &node_states[N_HIGH_MEMORY])
                        NODEMASK_FREE(nodes_allowed);
        }
-
-       return 0;
+out:
+       return ret;
 }
 
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
@@ -1982,21 +1952,26 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
 {
        struct hstate *h = &default_hstate;
        unsigned long tmp;
+       int ret;
 
-       if (!write)
-               tmp = h->nr_overcommit_huge_pages;
+       tmp = h->nr_overcommit_huge_pages;
+
+       if (write && h->order >= MAX_ORDER)
+               return -EINVAL;
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
-       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       if (ret)
+               goto out;
 
        if (write) {
                spin_lock(&hugetlb_lock);
                h->nr_overcommit_huge_pages = tmp;
                spin_unlock(&hugetlb_lock);
        }
-
-       return 0;
+out:
+       return ret;
 }
 
 #endif /* CONFIG_SYSCTL */
@@ -2085,7 +2060,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
         * This new VMA should share its siblings reservation map if present.
         * The VMA will only ever have a valid reservation map pointer where
         * it is being copied for another still existing VMA.  As that VMA
-        * has a reference to the reservation map it cannot dissappear until
+        * has a reference to the reservation map it cannot disappear until
         * after this open call completes.  It is therefore safe to take a
         * new reference here without additional locking.
         */
@@ -2158,9 +2133,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
-       }
 }
 
 
@@ -2215,9 +2189,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp)) {
+       if (non_swap_entry(swp) && is_migration_entry(swp))
                return 1;
-       } else
+       else
                return 0;
 }
 
@@ -2228,9 +2202,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
                return 1;
-       } else
+       else
                return 0;
 }
 
@@ -2247,7 +2221,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long sz = huge_page_size(h);
 
        /*
-        * A page gathering list, protected by per file i_mmap_lock. The
+        * A page gathering list, protected by per file i_mmap_mutex. The
         * lock is used to avoid list corruption from multiple unmapping
         * of the same page since we are using page->lru.
         */
@@ -2316,9 +2290,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        __unmap_hugepage_range(vma, start, end, ref_page);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 /*
@@ -2350,7 +2324,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * this mapping should be shared between all the VMAs,
         * __unmap_hugepage_range() is called as the lock is already held
         */
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -2368,7 +2342,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                                address, address + huge_page_size(h),
                                page);
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 
        return 1;
 }
@@ -2448,10 +2422,16 @@ retry_avoidcopy:
         * When the original hugepage is shared one, it does not have
         * anon_vma prepared.
         */
-       if (unlikely(anon_vma_prepare(vma)))
+       if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
+               /* Caller expects lock to be held */
+               spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
+       }
 
-       copy_user_huge_page(new_page, old_page, address, vma);
+       copy_user_huge_page(new_page, old_page, address, vma,
+                           pages_per_huge_page(h));
        __SetPageUptodate(new_page);
 
        /*
@@ -2528,7 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /*
         * Currently, we are forced to kill the process in the event the
         * original mapper has unmapped pages from the child due to a failed
-        * COW. Warn that such a situation has occured as it may not be obvious
+        * COW. Warn that such a situation has occurred as it may not be obvious
         */
        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
                printk(KERN_WARNING
@@ -2555,7 +2535,7 @@ retry:
                        ret = -PTR_ERR(page);
                        goto out;
                }
-               clear_huge_page(page, address, huge_page_size(h));
+               clear_huge_page(page, address, pages_per_huge_page(h));
                __SetPageUptodate(page);
 
                if (vma->vm_flags & VM_MAYSHARE) {
@@ -2589,7 +2569,8 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON;
+                       ret = VM_FAULT_HWPOISON |
+                             VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
                page_dup_rmap(page);
@@ -2656,7 +2637,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        migration_entry_wait(mm, (pmd_t *)ptep, address);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
-                       return VM_FAULT_HWPOISON;
+                       return VM_FAULT_HWPOISON_LARGE |
+                              VM_FAULT_SET_HINDEX(h - hstates);
        }
 
        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
@@ -2733,7 +2715,8 @@ out_page_table_lock:
                unlock_page(pagecache_page);
                put_page(pagecache_page);
        }
-       unlock_page(page);
+       if (page != pagecache_page)
+               unlock_page(page);
 
 out_mutex:
        mutex_unlock(&hugetlb_instantiation_mutex);
@@ -2845,7 +2828,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += huge_page_size(h)) {
                ptep = huge_pte_offset(mm, address);
@@ -2860,7 +2843,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
        flush_tlb_range(vma, start, end);
 }
@@ -2868,7 +2851,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
-                                       int acctflag)
+                                       vm_flags_t vm_flags)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
@@ -2878,7 +2861,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * attempt will be made for VM_NORESERVE to allocate a page
         * and filesystem quota without using reserves
         */
-       if (acctflag & VM_NORESERVE)
+       if (vm_flags & VM_NORESERVE)
                return 0;
 
        /*
@@ -2946,6 +2929,8 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
        hugetlb_acct_memory(h, -(chg - freed));
 }
 
+#ifdef CONFIG_MEMORY_FAILURE
+
 /* Should be called in hugetlb_lock */
 static int is_hugepage_on_freelist(struct page *hpage)
 {
@@ -2960,7 +2945,6 @@ static int is_hugepage_on_freelist(struct page *hpage)
        return 0;
 }
 
-#ifdef CONFIG_MEMORY_FAILURE
 /*
  * This function is called from memory failure code.
  * Assume the caller holds page lock of the head page.
@@ -2974,6 +2958,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
        spin_lock(&hugetlb_lock);
        if (is_hugepage_on_freelist(hpage)) {
                list_del(&hpage->lru);
+               set_page_refcounted(hpage);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                ret = 0;