mm: account for MAP_SHARED mappings using VM_MAYSHARE and not VM_SHARED in hugetlbfs
[linux-2.6.git] / mm / hugetlb.c
index e6afe52..e83ad2c 100644 (file)
@@ -220,6 +220,35 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,
 }
 
 /*
+ * Return the size of the pages allocated when backing a VMA. In the majority
+ * cases this will be same size as used by the page table entries.
+ */
+unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
+{
+       struct hstate *hstate;
+
+       if (!is_vm_hugetlb_page(vma))
+               return PAGE_SIZE;
+
+       hstate = hstate_vma(vma);
+
+       return 1UL << (hstate->order + PAGE_SHIFT);
+}
+
+/*
+ * Return the page size being used by the MMU to back a VMA. In the majority
+ * of cases, the page size used by the kernel matches the MMU size. On
+ * architectures where it differs, an architecture-specific version of this
+ * function is required.
+ */
+#ifndef vma_mmu_pagesize
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+       return vma_kernel_pagesize(vma);
+}
+#endif
+
+/*
  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
  * bits of the reservation map pointer, which are always clear due to
  * alignment.
@@ -287,7 +316,7 @@ static void resv_map_release(struct kref *ref)
 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       if (!(vma->vm_flags & VM_SHARED))
+       if (!(vma->vm_flags & VM_MAYSHARE))
                return (struct resv_map *)(get_vma_private_data(vma) &
                                                        ~HPAGE_RESV_MASK);
        return NULL;
@@ -296,7 +325,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       VM_BUG_ON(vma->vm_flags & VM_SHARED);
+       VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 
        set_vma_private_data(vma, (get_vma_private_data(vma) &
                                HPAGE_RESV_MASK) | (unsigned long)map);
@@ -305,7 +334,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       VM_BUG_ON(vma->vm_flags & VM_SHARED);
+       VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 
        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 }
@@ -324,7 +353,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
        if (vma->vm_flags & VM_NORESERVE)
                return;
 
-       if (vma->vm_flags & VM_SHARED) {
+       if (vma->vm_flags & VM_MAYSHARE) {
                /* Shared mappings always use reserves */
                h->resv_huge_pages--;
        } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
@@ -340,14 +369,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       if (!(vma->vm_flags & VM_SHARED))
+       if (!(vma->vm_flags & VM_MAYSHARE))
                vma->vm_private_data = (void *)0;
 }
 
 /* Returns true if the VMA has associated reserve pages */
 static int vma_has_reserves(struct vm_area_struct *vma)
 {
-       if (vma->vm_flags & VM_SHARED)
+       if (vma->vm_flags & VM_MAYSHARE)
                return 1;
        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                return 1;
@@ -371,8 +400,10 @@ static void clear_huge_page(struct page *page,
 {
        int i;
 
-       if (unlikely(sz > MAX_ORDER_NR_PAGES))
-               return clear_gigantic_page(page, addr, sz);
+       if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
+               clear_gigantic_page(page, addr, sz);
+               return;
+       }
 
        might_sleep();
        for (i = 0; i < sz/PAGE_SIZE; i++) {
@@ -404,8 +435,10 @@ static void copy_huge_page(struct page *dst, struct page *src,
        int i;
        struct hstate *h = hstate_vma(vma);
 
-       if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
-               return copy_gigantic_page(dst, src, addr, vma);
+       if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
+               copy_gigantic_page(dst, src, addr, vma);
+               return;
+       }
 
        might_sleep();
        for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -491,6 +524,8 @@ static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
 
+       VM_BUG_ON(h->order >= MAX_ORDER);
+
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -883,13 +918,13 @@ static void return_unused_surplus_pages(struct hstate *h,
  * an instantiated the change should be committed via vma_commit_reservation.
  * No action is required on failure.
  */
-static int vma_needs_reservation(struct hstate *h,
+static long vma_needs_reservation(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long addr)
 {
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
 
-       if (vma->vm_flags & VM_SHARED) {
+       if (vma->vm_flags & VM_MAYSHARE) {
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
                return region_chg(&inode->i_mapping->private_list,
                                                        idx, idx + 1);
@@ -898,7 +933,7 @@ static int vma_needs_reservation(struct hstate *h,
                return 1;
 
        } else  {
-               int err;
+               long err;
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
                struct resv_map *reservations = vma_resv_map(vma);
 
@@ -914,7 +949,7 @@ static void vma_commit_reservation(struct hstate *h,
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
 
-       if (vma->vm_flags & VM_SHARED) {
+       if (vma->vm_flags & VM_MAYSHARE) {
                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
                region_add(&inode->i_mapping->private_list, idx, idx + 1);
 
@@ -934,7 +969,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        struct page *page;
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
-       unsigned int chg;
+       long chg;
 
        /*
         * Processes that did not create the mapping will have no reserves and
@@ -970,7 +1005,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        return page;
 }
 
-__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
+int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
        int nr_nodes = nodes_weight(node_online_map);
@@ -989,8 +1024,7 @@ __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
                         * puts them into the mem_map).
                         */
                        m = addr;
-                       if (m)
-                               goto found;
+                       goto found;
                }
                hstate_next_node(h);
                nr_nodes--;
@@ -1005,6 +1039,14 @@ found:
        return 1;
 }
 
+static void prep_compound_huge_page(struct page *page, int order)
+{
+       if (unlikely(order > (MAX_ORDER - 1)))
+               prep_compound_gigantic_page(page, order);
+       else
+               prep_compound_page(page, order);
+}
+
 /* Put bootmem huge pages into the standard lists after mem_map is up */
 static void __init gather_bootmem_prealloc(void)
 {
@@ -1015,7 +1057,7 @@ static void __init gather_bootmem_prealloc(void)
                struct hstate *h = m->hstate;
                __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
-               prep_compound_page(page, h->order);
+               prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
        }
 }
@@ -1786,6 +1828,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                                struct page *page, unsigned long address)
 {
+       struct hstate *h = hstate_vma(vma);
        struct vm_area_struct *iter_vma;
        struct address_space *mapping;
        struct prio_tree_iter iter;
@@ -1795,7 +1838,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * vm_pgoff is in PAGE_SIZE units, hence the different calculation
         * from page cache lookup which is in HPAGE_SIZE units.
         */
-       address = address & huge_page_mask(hstate_vma(vma));
+       address = address & huge_page_mask(h);
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
                + (vma->vm_pgoff >> PAGE_SHIFT);
        mapping = (struct address_space *)page_private(page);
@@ -1814,7 +1857,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
                        unmap_hugepage_range(iter_vma,
-                               address, address + HPAGE_SIZE,
+                               address, address + huge_page_size(h),
                                page);
        }
 
@@ -1850,7 +1893,7 @@ retry_avoidcopy:
         * at the time of fork() could consume its reserves on COW instead
         * of the full address range.
         */
-       if (!(vma->vm_flags & VM_SHARED) &&
+       if (!(vma->vm_flags & VM_MAYSHARE) &&
                        is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
                        old_page != pagecache_page)
                outside_reserve = 1;
@@ -1957,7 +2000,7 @@ retry:
                clear_huge_page(page, address, huge_page_size(h));
                __SetPageUptodate(page);
 
-               if (vma->vm_flags & VM_SHARED) {
+               if (vma->vm_flags & VM_MAYSHARE) {
                        int err;
                        struct inode *inode = mapping->host;
 
@@ -2061,7 +2104,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        goto out_mutex;
                }
 
-               if (!(vma->vm_flags & VM_SHARED))
+               if (!(vma->vm_flags & VM_MAYSHARE))
                        pagecache_page = hugetlbfs_pagecache_page(h,
                                                                vma, address);
        }
@@ -2226,12 +2269,18 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
-                                       struct vm_area_struct *vma)
+                                       struct vm_area_struct *vma,
+                                       int acctflag)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
 
-       if (vma && vma->vm_flags & VM_NORESERVE)
+       /*
+        * Only apply hugepage reservation if asked. At fault time, an
+        * attempt will be made for VM_NORESERVE to allocate a page
+        * and filesystem quota without using reserves
+        */
+       if (acctflag & VM_NORESERVE)
                return 0;
 
        /*
@@ -2240,7 +2289,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * to reserve the full area even if read-only as mprotect() may be
         * called to make the mapping read-write. Assume !vma is a shm mapping
         */
-       if (!vma || vma->vm_flags & VM_SHARED)
+       if (!vma || vma->vm_flags & VM_MAYSHARE)
                chg = region_chg(&inode->i_mapping->private_list, from, to);
        else {
                struct resv_map *resv_map = resv_map_alloc();
@@ -2256,14 +2305,32 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (chg < 0)
                return chg;
 
+       /* There must be enough filesystem quota for the mapping */
        if (hugetlb_get_quota(inode->i_mapping, chg))
                return -ENOSPC;
+
+       /*
+        * Check enough hugepages are available for the reservation.
+        * Hand back the quota if there are not
+        */
        ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
                hugetlb_put_quota(inode->i_mapping, chg);
                return ret;
        }
-       if (!vma || vma->vm_flags & VM_SHARED)
+
+       /*
+        * Account for the reservations made. Shared mappings record regions
+        * that have reservations as they are shared by multiple VMAs.
+        * When the last VMA disappears, the region map says how much
+        * the reservation was and the page cache tells how much of
+        * the reservation was consumed. Private mappings are per-VMA and
+        * only the consumed reservations are tracked. When the VMA
+        * disappears, the original reservation is the VMA size and the
+        * consumed reservations are stored in the map. Hence, nothing
+        * else has to be done for private mappings here
+        */
+       if (!vma || vma->vm_flags & VM_MAYSHARE)
                region_add(&inode->i_mapping->private_list, from, to);
        return 0;
 }