oom: fix integer overflow of points in oom_badness
[linux-2.6.git] / mm / hugetlb.c
index 838fe25..73f17c0 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/hugetlb.h>
 #include <linux/node.h>
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * must either hold the mmap_sem for write, or the mmap_sem for read and
  * the hugetlb_instantiation mutex:
  *
- *     down_write(&mm->mmap_sem);
+ *     down_write(&mm->mmap_sem);
  * or
- *     down_read(&mm->mmap_sem);
- *     mutex_lock(&hugetlb_instantiation_mutex);
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
        struct list_head link;
@@ -146,7 +146,7 @@ static long region_chg(struct list_head *head, long f, long t)
                if (rg->from > t)
                        return chg;
 
-               /* We overlap with this area, if it extends futher than
+               /* We overlap with this area, if it extends further than
                 * us then we must extend ourselves.  Account for its
                 * existing reservation. */
                if (rg->to > t) {
@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
-                               1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
-                               1 << PG_private | 1<< PG_writeback);
+               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+                               1 << PG_referenced | 1 << PG_dirty |
+                               1 << PG_active | 1 << PG_reserved |
+                               1 << PG_private | 1 << PG_writeback);
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
@@ -575,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -591,7 +593,6 @@ int PageHuge(struct page *page)
 
        return dtor == free_huge_page;
 }
-
 EXPORT_SYMBOL_GPL(PageHuge);
 
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
@@ -842,7 +843,7 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
 }
 
 /*
- * Increase the hugetlb pool such that it can accomodate a reservation
+ * Increase the hugetlb pool such that it can accommodate a reservation
  * of size 'delta'.
  */
 static int gather_surplus_pages(struct hstate *h, int delta)
@@ -890,7 +891,7 @@ retry:
 
        /*
         * The surplus_list now contains _at_least_ the number of extra pages
-        * needed to accomodate the reservation.  Add the appropriate number
+        * needed to accommodate the reservation.  Add the appropriate number
         * of pages to the hugetlb pool and free the extras back to the buddy
         * allocator.  Commit the entire reservation here to prevent another
         * process from stealing the pages as they are added to the pool but
@@ -1033,10 +1034,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
-               return ERR_PTR(chg);
+               return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
                if (hugetlb_get_quota(inode->i_mapping, chg))
-                       return ERR_PTR(-ENOSPC);
+                       return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1105,12 +1106,28 @@ static void __init gather_bootmem_prealloc(void)
        struct huge_bootmem_page *m;
 
        list_for_each_entry(m, &huge_boot_pages, list) {
-               struct page *page = virt_to_page(m);
                struct hstate *h = m->hstate;
+               struct page *page;
+
+#ifdef CONFIG_HIGHMEM
+               page = pfn_to_page(m->phys >> PAGE_SHIFT);
+               free_bootmem_late((unsigned long)m,
+                                 sizeof(struct huge_bootmem_page));
+#else
+               page = virt_to_page(m);
+#endif
                __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
+               /*
+                * If we had gigantic hugepages allocated at boot time, we need
+                * to restore the 'stolen' pages to totalram_pages in order to
+                * fix confusing memory reports from free(1) and another
+                * side-effects, like CommitLimit going negative.
+                */
+               if (h->order > (MAX_ORDER - 1))
+                       totalram_pages += 1 << h->order;
        }
 }
 
@@ -1872,8 +1889,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
        unsigned long tmp;
        int ret;
 
-       if (!write)
-               tmp = h->max_huge_pages;
+       tmp = h->max_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
                return -EINVAL;
@@ -1938,8 +1954,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
        unsigned long tmp;
        int ret;
 
-       if (!write)
-               tmp = h->nr_overcommit_huge_pages;
+       tmp = h->nr_overcommit_huge_pages;
 
        if (write && h->order >= MAX_ORDER)
                return -EINVAL;
@@ -2045,7 +2060,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
         * This new VMA should share its siblings reservation map if present.
         * The VMA will only ever have a valid reservation map pointer where
         * it is being copied for another still existing VMA.  As that VMA
-        * has a reference to the reservation map it cannot dissappear until
+        * has a reference to the reservation map it cannot disappear until
         * after this open call completes.  It is therefore safe to take a
         * new reference here without additional locking.
         */
@@ -2118,9 +2133,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
-       }
 }
 
 
@@ -2175,9 +2189,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp)) {
+       if (non_swap_entry(swp) && is_migration_entry(swp))
                return 1;
-       } else
+       else
                return 0;
 }
 
@@ -2188,9 +2202,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
                return 1;
-       } else
+       else
                return 0;
 }
 
@@ -2207,7 +2221,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long sz = huge_page_size(h);
 
        /*
-        * A page gathering list, protected by per file i_mmap_lock. The
+        * A page gathering list, protected by per file i_mmap_mutex. The
         * lock is used to avoid list corruption from multiple unmapping
         * of the same page since we are using page->lru.
         */
@@ -2276,9 +2290,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        __unmap_hugepage_range(vma, start, end, ref_page);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 /*
@@ -2310,7 +2324,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * this mapping should be shared between all the VMAs,
         * __unmap_hugepage_range() is called as the lock is already held
         */
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -2328,7 +2342,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                                address, address + huge_page_size(h),
                                page);
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 
        return 1;
 }
@@ -2409,6 +2423,8 @@ retry_avoidcopy:
         * anon_vma prepared.
         */
        if (unlikely(anon_vma_prepare(vma))) {
+               page_cache_release(new_page);
+               page_cache_release(old_page);
                /* Caller expects lock to be held */
                spin_lock(&mm->page_table_lock);
                return VM_FAULT_OOM;
@@ -2492,7 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /*
         * Currently, we are forced to kill the process in the event the
         * original mapper has unmapped pages from the child due to a failed
-        * COW. Warn that such a situation has occured as it may not be obvious
+        * COW. Warn that such a situation has occurred as it may not be obvious
         */
        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
                printk(KERN_WARNING
@@ -2553,7 +2569,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON | 
+                       ret = VM_FAULT_HWPOISON |
                              VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
@@ -2621,7 +2637,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        migration_entry_wait(mm, (pmd_t *)ptep, address);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
-                       return VM_FAULT_HWPOISON_LARGE | 
+                       return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
        }
 
@@ -2812,7 +2828,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += huge_page_size(h)) {
                ptep = huge_pte_offset(mm, address);
@@ -2827,7 +2843,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
        flush_tlb_range(vma, start, end);
 }
@@ -2835,7 +2851,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
-                                       int acctflag)
+                                       vm_flags_t vm_flags)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
@@ -2845,7 +2861,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * attempt will be made for VM_NORESERVE to allocate a page
         * and filesystem quota without using reserves
         */
-       if (acctflag & VM_NORESERVE)
+       if (vm_flags & VM_NORESERVE)
                return 0;
 
        /*