memcg: add interface to move charge at task migration
[linux-2.6.git] / mm / hugetlb.c
index a56e6f3..3a5aeb3 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/io.h>
 
 #include <linux/hugetlb.h>
+#include <linux/node.h>
 #include "internal.h"
 
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
@@ -234,6 +235,7 @@ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 
        return 1UL << (hstate->order + PAGE_SHIFT);
 }
+EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 
 /*
  * Return the page size being used by the MMU to back a VMA. In the majority
@@ -400,7 +402,7 @@ static void clear_huge_page(struct page *page,
 {
        int i;
 
-       if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
+       if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
                clear_gigantic_page(page, addr, sz);
                return;
        }
@@ -455,24 +457,6 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        h->free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(struct hstate *h)
-{
-       int nid;
-       struct page *page = NULL;
-
-       for (nid = 0; nid < MAX_NUMNODES; ++nid) {
-               if (!list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-                       break;
-               }
-       }
-       return page;
-}
-
 static struct page *dequeue_huge_page_vma(struct hstate *h,
                                struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve)
@@ -639,41 +623,66 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 }
 
 /*
- * Use a helper variable to find the next node and then
- * copy it back to hugetlb_next_nid afterwards:
- * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
- * But we don't need to use a spin_lock here: it really
- * doesn't matter if occasionally a racer chooses the
- * same nid as we do.  Move nid forward in the mask even
- * if we just successfully allocated a hugepage so that
- * the next caller gets hugepages on the next node.
+ * common helper functions for hstate_next_node_to_{alloc|free}.
+ * We may have allocated or freed a huge page based on a different
+ * nodes_allowed previously, so h->next_node_to_{alloc|free} might
+ * be outside of *nodes_allowed.  Ensure that we use an allowed
+ * node for alloc or free.
  */
-static int hstate_next_node(struct hstate *h)
+static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 {
-       int next_nid;
-       next_nid = next_node(h->hugetlb_next_nid, node_online_map);
-       if (next_nid == MAX_NUMNODES)
-               next_nid = first_node(node_online_map);
-       h->hugetlb_next_nid = next_nid;
-       return next_nid;
+       nid = next_node(nid, *nodes_allowed);
+       if (nid == MAX_NUMNODES)
+               nid = first_node(*nodes_allowed);
+       VM_BUG_ON(nid >= MAX_NUMNODES);
+
+       return nid;
 }
 
-static int alloc_fresh_huge_page(struct hstate *h)
+static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
+{
+       if (!node_isset(nid, *nodes_allowed))
+               nid = next_node_allowed(nid, nodes_allowed);
+       return nid;
+}
+
+/*
+ * returns the previously saved node ["this node"] from which to
+ * allocate a persistent huge page for the pool and advance the
+ * next node from which to allocate, handling wrap at end of node
+ * mask.
+ */
+static int hstate_next_node_to_alloc(struct hstate *h,
+                                       nodemask_t *nodes_allowed)
+{
+       int nid;
+
+       VM_BUG_ON(!nodes_allowed);
+
+       nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
+       h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
+
+       return nid;
+}
+
+static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
        struct page *page;
        int start_nid;
        int next_nid;
        int ret = 0;
 
-       start_nid = h->hugetlb_next_nid;
+       start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+       next_nid = start_nid;
 
        do {
-               page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
-               if (page)
+               page = alloc_fresh_huge_page_node(h, next_nid);
+               if (page) {
                        ret = 1;
-               next_nid = hstate_next_node(h);
-       } while (!page && h->hugetlb_next_nid != start_nid);
+                       break;
+               }
+               next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+       } while (next_nid != start_nid);
 
        if (ret)
                count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -683,6 +692,67 @@ static int alloc_fresh_huge_page(struct hstate *h)
        return ret;
 }
 
+/*
+ * helper for free_pool_huge_page() - return the previously saved
+ * node ["this node"] from which to free a huge page.  Advance the
+ * next node id whether or not we find a free huge page to free so
+ * that the next attempt to free addresses the next node.
+ */
+static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
+{
+       int nid;
+
+       VM_BUG_ON(!nodes_allowed);
+
+       nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
+       h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
+
+       return nid;
+}
+
+/*
+ * Free huge page from pool from next node to free.
+ * Attempt to keep persistent huge pages more or less
+ * balanced over allowed nodes.
+ * Called with hugetlb_lock locked.
+ */
+static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
+                                                        bool acct_surplus)
+{
+       int start_nid;
+       int next_nid;
+       int ret = 0;
+
+       start_nid = hstate_next_node_to_free(h, nodes_allowed);
+       next_nid = start_nid;
+
+       do {
+               /*
+                * If we're returning unused surplus pages, only examine
+                * nodes with surplus pages.
+                */
+               if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
+                   !list_empty(&h->hugepage_freelists[next_nid])) {
+                       struct page *page =
+                               list_entry(h->hugepage_freelists[next_nid].next,
+                                         struct page, lru);
+                       list_del(&page->lru);
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[next_nid]--;
+                       if (acct_surplus) {
+                               h->surplus_huge_pages--;
+                               h->surplus_huge_pages_node[next_nid]--;
+                       }
+                       update_and_free_page(h, page);
+                       ret = 1;
+                       break;
+               }
+               next_nid = hstate_next_node_to_free(h, nodes_allowed);
+       } while (next_nid != start_nid);
+
+       return ret;
+}
+
 static struct page *alloc_buddy_huge_page(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address)
 {
@@ -854,22 +924,13 @@ free:
  * When releasing a hugetlb pool reservation, any surplus pages that were
  * allocated to satisfy the reservation must be explicitly freed if they were
  * never used.
+ * Called with hugetlb_lock held.
  */
 static void return_unused_surplus_pages(struct hstate *h,
                                        unsigned long unused_resv_pages)
 {
-       static int nid = -1;
-       struct page *page;
        unsigned long nr_pages;
 
-       /*
-        * We want to release as many surplus pages as possible, spread
-        * evenly across all nodes. Iterate across all nodes until we
-        * can no longer free unreserved surplus pages. This occurs when
-        * the nodes with surplus pages have no free pages.
-        */
-       unsigned long remaining_iterations = nr_online_nodes;
-
        /* Uncommit the reservation */
        h->resv_huge_pages -= unused_resv_pages;
 
@@ -879,26 +940,17 @@ static void return_unused_surplus_pages(struct hstate *h,
 
        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 
-       while (remaining_iterations-- && nr_pages) {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
-
-               if (!h->surplus_huge_pages_node[nid])
-                       continue;
-
-               if (!list_empty(&h->hugepage_freelists[nid])) {
-                       page = list_entry(h->hugepage_freelists[nid].next,
-                                         struct page, lru);
-                       list_del(&page->lru);
-                       update_and_free_page(h, page);
-                       h->free_huge_pages--;
-                       h->free_huge_pages_node[nid]--;
-                       h->surplus_huge_pages--;
-                       h->surplus_huge_pages_node[nid]--;
-                       nr_pages--;
-                       remaining_iterations = nr_online_nodes;
-               }
+       /*
+        * We want to release as many surplus pages as possible, spread
+        * evenly across all nodes with memory. Iterate across these nodes
+        * until we can no longer free unreserved surplus pages. This occurs
+        * when the nodes with surplus pages have no free pages.
+        * free_pool_huge_page() will balance the the freed pages across the
+        * on-line nodes with memory and will handle the hstate accounting.
+        */
+       while (nr_pages--) {
+               if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
+                       break;
        }
 }
 
@@ -1001,13 +1053,14 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 int __weak alloc_bootmem_huge_page(struct hstate *h)
 {
        struct huge_bootmem_page *m;
-       int nr_nodes = nodes_weight(node_online_map);
+       int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
 
        while (nr_nodes) {
                void *addr;
 
                addr = __alloc_bootmem_node_nopanic(
-                               NODE_DATA(h->hugetlb_next_nid),
+                               NODE_DATA(hstate_next_node_to_alloc(h,
+                                               &node_states[N_HIGH_MEMORY])),
                                huge_page_size(h), huge_page_size(h), 0);
 
                if (addr) {
@@ -1019,7 +1072,6 @@ int __weak alloc_bootmem_huge_page(struct hstate *h)
                        m = addr;
                        goto found;
                }
-               hstate_next_node(h);
                nr_nodes--;
        }
        return 0;
@@ -1063,7 +1115,8 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                if (h->order >= MAX_ORDER) {
                        if (!alloc_bootmem_huge_page(h))
                                break;
-               } else if (!alloc_fresh_huge_page(h))
+               } else if (!alloc_fresh_huge_page(h,
+                                        &node_states[N_HIGH_MEMORY]))
                        break;
        }
        h->max_huge_pages = i;
@@ -1105,14 +1158,15 @@ static void __init report_hugepages(void)
 }
 
 #ifdef CONFIG_HIGHMEM
-static void try_to_free_low(struct hstate *h, unsigned long count)
+static void try_to_free_low(struct hstate *h, unsigned long count,
+                                               nodemask_t *nodes_allowed)
 {
        int i;
 
        if (h->order >= MAX_ORDER)
                return;
 
-       for (i = 0; i < MAX_NUMNODES; ++i) {
+       for_each_node_mask(i, *nodes_allowed) {
                struct page *page, *next;
                struct list_head *freel = &h->hugepage_freelists[i];
                list_for_each_entry_safe(page, next, freel, lru) {
@@ -1128,7 +1182,8 @@ static void try_to_free_low(struct hstate *h, unsigned long count)
        }
 }
 #else
-static inline void try_to_free_low(struct hstate *h, unsigned long count)
+static inline void try_to_free_low(struct hstate *h, unsigned long count,
+                                               nodemask_t *nodes_allowed)
 {
 }
 #endif
@@ -1138,38 +1193,56 @@ static inline void try_to_free_low(struct hstate *h, unsigned long count)
  * balanced by operating on them in a round-robin fashion.
  * Returns 1 if an adjustment was made.
  */
-static int adjust_pool_surplus(struct hstate *h, int delta)
+static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
+                               int delta)
 {
-       static int prev_nid;
-       int nid = prev_nid;
+       int start_nid, next_nid;
        int ret = 0;
 
        VM_BUG_ON(delta != -1 && delta != 1);
-       do {
-               nid = next_node(nid, node_online_map);
-               if (nid == MAX_NUMNODES)
-                       nid = first_node(node_online_map);
 
-               /* To shrink on this node, there must be a surplus page */
-               if (delta < 0 && !h->surplus_huge_pages_node[nid])
-                       continue;
-               /* Surplus cannot exceed the total number of pages */
-               if (delta > 0 && h->surplus_huge_pages_node[nid] >=
-                                               h->nr_huge_pages_node[nid])
-                       continue;
+       if (delta < 0)
+               start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
+       else
+               start_nid = hstate_next_node_to_free(h, nodes_allowed);
+       next_nid = start_nid;
+
+       do {
+               int nid = next_nid;
+               if (delta < 0)  {
+                       /*
+                        * To shrink on this node, there must be a surplus page
+                        */
+                       if (!h->surplus_huge_pages_node[nid]) {
+                               next_nid = hstate_next_node_to_alloc(h,
+                                                               nodes_allowed);
+                               continue;
+                       }
+               }
+               if (delta > 0) {
+                       /*
+                        * Surplus cannot exceed the total number of pages
+                        */
+                       if (h->surplus_huge_pages_node[nid] >=
+                                               h->nr_huge_pages_node[nid]) {
+                               next_nid = hstate_next_node_to_free(h,
+                                                               nodes_allowed);
+                               continue;
+                       }
+               }
 
                h->surplus_huge_pages += delta;
                h->surplus_huge_pages_node[nid] += delta;
                ret = 1;
                break;
-       } while (nid != prev_nid);
+       } while (next_nid != start_nid);
 
-       prev_nid = nid;
        return ret;
 }
 
 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
-static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+                                               nodemask_t *nodes_allowed)
 {
        unsigned long min_count, ret;
 
@@ -1189,7 +1262,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
         */
        spin_lock(&hugetlb_lock);
        while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
-               if (!adjust_pool_surplus(h, -1))
+               if (!adjust_pool_surplus(h, nodes_allowed, -1))
                        break;
        }
 
@@ -1200,11 +1273,14 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
                 * and reducing the surplus.
                 */
                spin_unlock(&hugetlb_lock);
-               ret = alloc_fresh_huge_page(h);
+               ret = alloc_fresh_huge_page(h, nodes_allowed);
                spin_lock(&hugetlb_lock);
                if (!ret)
                        goto out;
 
+               /* Bail for signals. Probably ctrl-c from user */
+               if (signal_pending(current))
+                       goto out;
        }
 
        /*
@@ -1224,15 +1300,13 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
         */
        min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
        min_count = max(count, min_count);
-       try_to_free_low(h, min_count);
+       try_to_free_low(h, min_count, nodes_allowed);
        while (min_count < persistent_huge_pages(h)) {
-               struct page *page = dequeue_huge_page(h);
-               if (!page)
+               if (!free_pool_huge_page(h, nodes_allowed, 0))
                        break;
-               update_and_free_page(h, page);
        }
        while (count < persistent_huge_pages(h)) {
-               if (!adjust_pool_surplus(h, 1))
+               if (!adjust_pool_surplus(h, nodes_allowed, 1))
                        break;
        }
 out:
@@ -1251,43 +1325,117 @@ out:
 static struct kobject *hugepages_kobj;
 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
 
-static struct hstate *kobj_to_hstate(struct kobject *kobj)
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
 {
        int i;
+
        for (i = 0; i < HUGE_MAX_HSTATE; i++)
-               if (hstate_kobjs[i] == kobj)
+               if (hstate_kobjs[i] == kobj) {
+                       if (nidp)
+                               *nidp = NUMA_NO_NODE;
                        return &hstates[i];
-       BUG();
-       return NULL;
+               }
+
+       return kobj_to_node_hstate(kobj, nidp);
 }
 
-static ssize_t nr_hugepages_show(struct kobject *kobj,
+static ssize_t nr_hugepages_show_common(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
-       struct hstate *h = kobj_to_hstate(kobj);
-       return sprintf(buf, "%lu\n", h->nr_huge_pages);
+       struct hstate *h;
+       unsigned long nr_huge_pages;
+       int nid;
+
+       h = kobj_to_hstate(kobj, &nid);
+       if (nid == NUMA_NO_NODE)
+               nr_huge_pages = h->nr_huge_pages;
+       else
+               nr_huge_pages = h->nr_huge_pages_node[nid];
+
+       return sprintf(buf, "%lu\n", nr_huge_pages);
 }
-static ssize_t nr_hugepages_store(struct kobject *kobj,
-               struct kobj_attribute *attr, const char *buf, size_t count)
+static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
+                       struct kobject *kobj, struct kobj_attribute *attr,
+                       const char *buf, size_t len)
 {
        int err;
-       unsigned long input;
-       struct hstate *h = kobj_to_hstate(kobj);
+       int nid;
+       unsigned long count;
+       struct hstate *h;
+       NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
 
-       err = strict_strtoul(buf, 10, &input);
+       err = strict_strtoul(buf, 10, &count);
        if (err)
                return 0;
 
-       h->max_huge_pages = set_max_huge_pages(h, input);
+       h = kobj_to_hstate(kobj, &nid);
+       if (nid == NUMA_NO_NODE) {
+               /*
+                * global hstate attribute
+                */
+               if (!(obey_mempolicy &&
+                               init_nodemask_of_mempolicy(nodes_allowed))) {
+                       NODEMASK_FREE(nodes_allowed);
+                       nodes_allowed = &node_states[N_HIGH_MEMORY];
+               }
+       } else if (nodes_allowed) {
+               /*
+                * per node hstate attribute: adjust count to global,
+                * but restrict alloc/free to the specified node.
+                */
+               count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
+               init_nodemask_of_node(nodes_allowed, nid);
+       } else
+               nodes_allowed = &node_states[N_HIGH_MEMORY];
 
-       return count;
+       h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
+
+       if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+               NODEMASK_FREE(nodes_allowed);
+
+       return len;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *buf)
+{
+       return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+              struct kobj_attribute *attr, const char *buf, size_t len)
+{
+       return nr_hugepages_store_common(false, kobj, attr, buf, len);
 }
 HSTATE_ATTR(nr_hugepages);
 
+#ifdef CONFIG_NUMA
+
+/*
+ * hstate attribute for optionally mempolicy-based constraint on persistent
+ * huge page alloc/free.
+ */
+static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
+                                      struct kobj_attribute *attr, char *buf)
+{
+       return nr_hugepages_show_common(kobj, attr, buf);
+}
+
+static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
+              struct kobj_attribute *attr, const char *buf, size_t len)
+{
+       return nr_hugepages_store_common(true, kobj, attr, buf, len);
+}
+HSTATE_ATTR(nr_hugepages_mempolicy);
+#endif
+
+
 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
-       struct hstate *h = kobj_to_hstate(kobj);
+       struct hstate *h = kobj_to_hstate(kobj, NULL);
        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
 }
 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
@@ -1295,7 +1443,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
 {
        int err;
        unsigned long input;
-       struct hstate *h = kobj_to_hstate(kobj);
+       struct hstate *h = kobj_to_hstate(kobj, NULL);
 
        err = strict_strtoul(buf, 10, &input);
        if (err)
@@ -1312,15 +1460,24 @@ HSTATE_ATTR(nr_overcommit_hugepages);
 static ssize_t free_hugepages_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
-       struct hstate *h = kobj_to_hstate(kobj);
-       return sprintf(buf, "%lu\n", h->free_huge_pages);
+       struct hstate *h;
+       unsigned long free_huge_pages;
+       int nid;
+
+       h = kobj_to_hstate(kobj, &nid);
+       if (nid == NUMA_NO_NODE)
+               free_huge_pages = h->free_huge_pages;
+       else
+               free_huge_pages = h->free_huge_pages_node[nid];
+
+       return sprintf(buf, "%lu\n", free_huge_pages);
 }
 HSTATE_ATTR_RO(free_hugepages);
 
 static ssize_t resv_hugepages_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
-       struct hstate *h = kobj_to_hstate(kobj);
+       struct hstate *h = kobj_to_hstate(kobj, NULL);
        return sprintf(buf, "%lu\n", h->resv_huge_pages);
 }
 HSTATE_ATTR_RO(resv_hugepages);
@@ -1328,8 +1485,17 @@ HSTATE_ATTR_RO(resv_hugepages);
 static ssize_t surplus_hugepages_show(struct kobject *kobj,
                                        struct kobj_attribute *attr, char *buf)
 {
-       struct hstate *h = kobj_to_hstate(kobj);
-       return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+       struct hstate *h;
+       unsigned long surplus_huge_pages;
+       int nid;
+
+       h = kobj_to_hstate(kobj, &nid);
+       if (nid == NUMA_NO_NODE)
+               surplus_huge_pages = h->surplus_huge_pages;
+       else
+               surplus_huge_pages = h->surplus_huge_pages_node[nid];
+
+       return sprintf(buf, "%lu\n", surplus_huge_pages);
 }
 HSTATE_ATTR_RO(surplus_hugepages);
 
@@ -1339,6 +1505,9 @@ static struct attribute *hstate_attrs[] = {
        &free_hugepages_attr.attr,
        &resv_hugepages_attr.attr,
        &surplus_hugepages_attr.attr,
+#ifdef CONFIG_NUMA
+       &nr_hugepages_mempolicy_attr.attr,
+#endif
        NULL,
 };
 
@@ -1346,19 +1515,20 @@ static struct attribute_group hstate_attr_group = {
        .attrs = hstate_attrs,
 };
 
-static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
+                                   struct kobject **hstate_kobjs,
+                                   struct attribute_group *hstate_attr_group)
 {
        int retval;
+       int hi = h - hstates;
 
-       hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
-                                                       hugepages_kobj);
-       if (!hstate_kobjs[h - hstates])
+       hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
+       if (!hstate_kobjs[hi])
                return -ENOMEM;
 
-       retval = sysfs_create_group(hstate_kobjs[h - hstates],
-                                                       &hstate_attr_group);
+       retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
        if (retval)
-               kobject_put(hstate_kobjs[h - hstates]);
+               kobject_put(hstate_kobjs[hi]);
 
        return retval;
 }
@@ -1373,17 +1543,184 @@ static void __init hugetlb_sysfs_init(void)
                return;
 
        for_each_hstate(h) {
-               err = hugetlb_sysfs_add_hstate(h);
+               err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+                                        hstate_kobjs, &hstate_attr_group);
                if (err)
                        printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
                                                                h->name);
        }
 }
 
+#ifdef CONFIG_NUMA
+
+/*
+ * node_hstate/s - associate per node hstate attributes, via their kobjects,
+ * with node sysdevs in node_devices[] using a parallel array.  The array
+ * index of a node sysdev or _hstate == node id.
+ * This is here to avoid any static dependency of the node sysdev driver, in
+ * the base kernel, on the hugetlb module.
+ */
+struct node_hstate {
+       struct kobject          *hugepages_kobj;
+       struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
+};
+struct node_hstate node_hstates[MAX_NUMNODES];
+
+/*
+ * A subset of global hstate attributes for node sysdevs
+ */
+static struct attribute *per_node_hstate_attrs[] = {
+       &nr_hugepages_attr.attr,
+       &free_hugepages_attr.attr,
+       &surplus_hugepages_attr.attr,
+       NULL,
+};
+
+static struct attribute_group per_node_hstate_attr_group = {
+       .attrs = per_node_hstate_attrs,
+};
+
+/*
+ * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
+ * Returns node id via non-NULL nidp.
+ */
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+       int nid;
+
+       for (nid = 0; nid < nr_node_ids; nid++) {
+               struct node_hstate *nhs = &node_hstates[nid];
+               int i;
+               for (i = 0; i < HUGE_MAX_HSTATE; i++)
+                       if (nhs->hstate_kobjs[i] == kobj) {
+                               if (nidp)
+                                       *nidp = nid;
+                               return &hstates[i];
+                       }
+       }
+
+       BUG();
+       return NULL;
+}
+
+/*
+ * Unregister hstate attributes from a single node sysdev.
+ * No-op if no hstate attributes attached.
+ */
+void hugetlb_unregister_node(struct node *node)
+{
+       struct hstate *h;
+       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+
+       if (!nhs->hugepages_kobj)
+               return;         /* no hstate attributes */
+
+       for_each_hstate(h)
+               if (nhs->hstate_kobjs[h - hstates]) {
+                       kobject_put(nhs->hstate_kobjs[h - hstates]);
+                       nhs->hstate_kobjs[h - hstates] = NULL;
+               }
+
+       kobject_put(nhs->hugepages_kobj);
+       nhs->hugepages_kobj = NULL;
+}
+
+/*
+ * hugetlb module exit:  unregister hstate attributes from node sysdevs
+ * that have them.
+ */
+static void hugetlb_unregister_all_nodes(void)
+{
+       int nid;
+
+       /*
+        * disable node sysdev registrations.
+        */
+       register_hugetlbfs_with_node(NULL, NULL);
+
+       /*
+        * remove hstate attributes from any nodes that have them.
+        */
+       for (nid = 0; nid < nr_node_ids; nid++)
+               hugetlb_unregister_node(&node_devices[nid]);
+}
+
+/*
+ * Register hstate attributes for a single node sysdev.
+ * No-op if attributes already registered.
+ */
+void hugetlb_register_node(struct node *node)
+{
+       struct hstate *h;
+       struct node_hstate *nhs = &node_hstates[node->sysdev.id];
+       int err;
+
+       if (nhs->hugepages_kobj)
+               return;         /* already allocated */
+
+       nhs->hugepages_kobj = kobject_create_and_add("hugepages",
+                                                       &node->sysdev.kobj);
+       if (!nhs->hugepages_kobj)
+               return;
+
+       for_each_hstate(h) {
+               err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
+                                               nhs->hstate_kobjs,
+                                               &per_node_hstate_attr_group);
+               if (err) {
+                       printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
+                                       " for node %d\n",
+                                               h->name, node->sysdev.id);
+                       hugetlb_unregister_node(node);
+                       break;
+               }
+       }
+}
+
+/*
+ * hugetlb init time:  register hstate attributes for all registered node
+ * sysdevs of nodes that have memory.  All on-line nodes should have
+ * registered their associated sysdev by this time.
+ */
+static void hugetlb_register_all_nodes(void)
+{
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               struct node *node = &node_devices[nid];
+               if (node->sysdev.id == nid)
+                       hugetlb_register_node(node);
+       }
+
+       /*
+        * Let the node sysdev driver know we're here so it can
+        * [un]register hstate attributes on node hotplug.
+        */
+       register_hugetlbfs_with_node(hugetlb_register_node,
+                                    hugetlb_unregister_node);
+}
+#else  /* !CONFIG_NUMA */
+
+static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
+{
+       BUG();
+       if (nidp)
+               *nidp = -1;
+       return NULL;
+}
+
+static void hugetlb_unregister_all_nodes(void) { }
+
+static void hugetlb_register_all_nodes(void) { }
+
+#endif
+
 static void __exit hugetlb_exit(void)
 {
        struct hstate *h;
 
+       hugetlb_unregister_all_nodes();
+
        for_each_hstate(h) {
                kobject_put(hstate_kobjs[h - hstates]);
        }
@@ -1418,6 +1755,8 @@ static int __init hugetlb_init(void)
 
        hugetlb_sysfs_init();
 
+       hugetlb_register_all_nodes();
+
        return 0;
 }
 module_init(hugetlb_init);
@@ -1441,7 +1780,8 @@ void __init hugetlb_add_hstate(unsigned order)
        h->free_huge_pages = 0;
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
-       h->hugetlb_next_nid = first_node(node_online_map);
+       h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
+       h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
                                        huge_page_size(h)/1024);
 
@@ -1504,9 +1844,9 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
 }
 
 #ifdef CONFIG_SYSCTL
-int hugetlb_sysctl_handler(struct ctl_table *table, int write,
-                          struct file *file, void __user *buffer,
-                          size_t *length, loff_t *ppos)
+static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
+                        struct ctl_table *table, int write,
+                        void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct hstate *h = &default_hstate;
        unsigned long tmp;
@@ -1516,19 +1856,47 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
-       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+
+       if (write) {
+               NODEMASK_ALLOC(nodemask_t, nodes_allowed,
+                                               GFP_KERNEL | __GFP_NORETRY);
+               if (!(obey_mempolicy &&
+                              init_nodemask_of_mempolicy(nodes_allowed))) {
+                       NODEMASK_FREE(nodes_allowed);
+                       nodes_allowed = &node_states[N_HIGH_MEMORY];
+               }
+               h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
 
-       if (write)
-               h->max_huge_pages = set_max_huge_pages(h, tmp);
+               if (nodes_allowed != &node_states[N_HIGH_MEMORY])
+                       NODEMASK_FREE(nodes_allowed);
+       }
 
        return 0;
 }
 
+int hugetlb_sysctl_handler(struct ctl_table *table, int write,
+                         void __user *buffer, size_t *length, loff_t *ppos)
+{
+
+       return hugetlb_sysctl_handler_common(false, table, write,
+                                                       buffer, length, ppos);
+}
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
+                         void __user *buffer, size_t *length, loff_t *ppos)
+{
+       return hugetlb_sysctl_handler_common(true, table, write,
+                                                       buffer, length, ppos);
+}
+#endif /* CONFIG_NUMA */
+
 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
-                       struct file *file, void __user *buffer,
+                       void __user *buffer,
                        size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, file, buffer, length, ppos);
+       proc_dointvec(table, write, buffer, length, ppos);
        if (hugepages_treat_as_movable)
                htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
        else
@@ -1537,7 +1905,7 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
 }
 
 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
-                       struct file *file, void __user *buffer,
+                       void __user *buffer,
                        size_t *length, loff_t *ppos)
 {
        struct hstate *h = &default_hstate;
@@ -1548,7 +1916,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
 
        table->data = &tmp;
        table->maxlen = sizeof(unsigned long);
-       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+       proc_doulongvec_minmax(table, write, buffer, length, ppos);
 
        if (write) {
                spin_lock(&hugetlb_lock);
@@ -1689,7 +2057,7 @@ static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return 0;
 }
 
-struct vm_operations_struct hugetlb_vm_ops = {
+const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
@@ -1719,7 +2087,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
-               update_mmu_cache(vma, address, entry);
+               update_mmu_cache(vma, address, ptep);
        }
 }
 
@@ -1871,6 +2239,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                + (vma->vm_pgoff >> PAGE_SHIFT);
        mapping = (struct address_space *)page_private(page);
 
+       /*
+        * Take the mapping lock for the duration of the table walk. As
+        * this mapping should be shared between all the VMAs,
+        * __unmap_hugepage_range() is called as the lock is already held
+        */
+       spin_lock(&mapping->i_mmap_lock);
        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -1884,10 +2258,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                 * from the time of fork. This would look like data corruption
                 */
                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
-                       unmap_hugepage_range(iter_vma,
+                       __unmap_hugepage_range(iter_vma,
                                address, address + huge_page_size(h),
                                page);
        }
+       spin_unlock(&mapping->i_mmap_lock);
 
        return 1;
 }
@@ -1927,6 +2302,9 @@ retry_avoidcopy:
                outside_reserve = 1;
 
        page_cache_get(old_page);
+
+       /* Drop page_table_lock as buddy allocator may be called */
+       spin_unlock(&mm->page_table_lock);
        new_page = alloc_huge_page(vma, address, outside_reserve);
 
        if (IS_ERR(new_page)) {
@@ -1944,19 +2322,25 @@ retry_avoidcopy:
                        if (unmap_ref_private(mm, vma, old_page, address)) {
                                BUG_ON(page_count(old_page) != 1);
                                BUG_ON(huge_pte_none(pte));
+                               spin_lock(&mm->page_table_lock);
                                goto retry_avoidcopy;
                        }
                        WARN_ON_ONCE(1);
                }
 
+               /* Caller expects lock to be held */
+               spin_lock(&mm->page_table_lock);
                return -PTR_ERR(new_page);
        }
 
-       spin_unlock(&mm->page_table_lock);
        copy_huge_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
-       spin_lock(&mm->page_table_lock);
 
+       /*
+        * Retake the page_table_lock to check for racing updates
+        * before the page tables are altered
+        */
+       spin_lock(&mm->page_table_lock);
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
@@ -1984,8 +2368,28 @@ static struct page *hugetlbfs_pagecache_page(struct hstate *h,
        return find_lock_page(mapping, idx);
 }
 
+/*
+ * Return whether there is a pagecache page to back given address within VMA.
+ * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
+ */
+static bool hugetlbfs_pagecache_present(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long address)
+{
+       struct address_space *mapping;
+       pgoff_t idx;
+       struct page *page;
+
+       mapping = vma->vm_file->f_mapping;
+       idx = vma_hugecache_offset(h, vma, address);
+
+       page = find_get_page(mapping, idx);
+       if (page)
+               put_page(page);
+       return page != NULL;
+}
+
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long address, pte_t *ptep, int write_access)
+                       unsigned long address, pte_t *ptep, unsigned int flags)
 {
        struct hstate *h = hstate_vma(vma);
        int ret = VM_FAULT_SIGBUS;
@@ -2053,7 +2457,7 @@ retry:
         * any allocations necessary to record that reservation occur outside
         * the spinlock.
         */
-       if (write_access && !(vma->vm_flags & VM_SHARED))
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
                if (vma_needs_reservation(h, vma, address) < 0) {
                        ret = VM_FAULT_OOM;
                        goto backout_unlocked;
@@ -2072,7 +2476,7 @@ retry:
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
 
-       if (write_access && !(vma->vm_flags & VM_SHARED)) {
+       if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
                /* Optimization, do the COW without a second fault */
                ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
        }
@@ -2091,7 +2495,7 @@ backout_unlocked:
 }
 
 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long address, int write_access)
+                       unsigned long address, unsigned int flags)
 {
        pte_t *ptep;
        pte_t entry;
@@ -2112,7 +2516,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        mutex_lock(&hugetlb_instantiation_mutex);
        entry = huge_ptep_get(ptep);
        if (huge_pte_none(entry)) {
-               ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
+               ret = hugetlb_no_page(mm, vma, address, ptep, flags);
                goto out_mutex;
        }
 
@@ -2126,7 +2530,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * page now as it is used to determine if a reservation has been
         * consumed.
         */
-       if (write_access && !pte_write(entry)) {
+       if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
                if (vma_needs_reservation(h, vma, address) < 0) {
                        ret = VM_FAULT_OOM;
                        goto out_mutex;
@@ -2143,7 +2547,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_page_table_lock;
 
 
-       if (write_access) {
+       if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry)) {
                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
                                                        pagecache_page);
@@ -2152,8 +2556,9 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, write_access))
-               update_mmu_cache(vma, address, entry);
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry,
+                                               flags & FAULT_FLAG_WRITE))
+               update_mmu_cache(vma, address, ptep);
 
 out_page_table_lock:
        spin_unlock(&mm->page_table_lock);
@@ -2178,54 +2583,55 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
        return NULL;
 }
 
-static int huge_zeropage_ok(pte_t *ptep, int write, int shared)
-{
-       if (!ptep || write || shared)
-               return 0;
-       else
-               return huge_pte_none(huge_ptep_get(ptep));
-}
-
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct page **pages, struct vm_area_struct **vmas,
                        unsigned long *position, int *length, int i,
-                       int write)
+                       unsigned int flags)
 {
        unsigned long pfn_offset;
        unsigned long vaddr = *position;
        int remainder = *length;
        struct hstate *h = hstate_vma(vma);
-       int zeropage_ok = 0;
-       int shared = vma->vm_flags & VM_SHARED;
 
        spin_lock(&mm->page_table_lock);
        while (vaddr < vma->vm_end && remainder) {
                pte_t *pte;
+               int absent;
                struct page *page;
 
                /*
                 * Some archs (sparc64, sh*) have multiple pte_ts to
-                * each hugepage.  We have to make * sure we get the
+                * each hugepage.  We have to make sure we get the
                 * first, for the page indexing below to work.
                 */
                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
-               if (huge_zeropage_ok(pte, write, shared))
-                       zeropage_ok = 1;
+               absent = !pte || huge_pte_none(huge_ptep_get(pte));
+
+               /*
+                * When coredumping, it suits get_dump_page if we just return
+                * an error where there's an empty slot with no huge pagecache
+                * to back it.  This way, we avoid allocating a hugepage, and
+                * the sparse dumpfile avoids allocating disk blocks, but its
+                * huge holes still show up with zeroes where they need to be.
+                */
+               if (absent && (flags & FOLL_DUMP) &&
+                   !hugetlbfs_pagecache_present(h, vma, vaddr)) {
+                       remainder = 0;
+                       break;
+               }
 
-               if (!pte ||
-                   (huge_pte_none(huge_ptep_get(pte)) && !zeropage_ok) ||
-                   (write && !pte_write(huge_ptep_get(pte)))) {
+               if (absent ||
+                   ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
                        spin_unlock(&mm->page_table_lock);
-                       ret = hugetlb_fault(mm, vma, vaddr, write);
+                       ret = hugetlb_fault(mm, vma, vaddr,
+                               (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
                        spin_lock(&mm->page_table_lock);
                        if (!(ret & VM_FAULT_ERROR))
                                continue;
 
                        remainder = 0;
-                       if (!i)
-                               i = -EFAULT;
                        break;
                }
 
@@ -2233,10 +2639,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                page = pte_page(huge_ptep_get(pte));
 same_page:
                if (pages) {
-                       if (zeropage_ok)
-                               pages[i] = ZERO_PAGE(0);
-                       else
-                               pages[i] = mem_map_offset(page, pfn_offset);
+                       pages[i] = mem_map_offset(page, pfn_offset);
                        get_page(pages[i]);
                }
 
@@ -2260,7 +2663,7 @@ same_page:
        *length = remainder;
        *position = vaddr;
 
-       return i;
+       return i ? i : -EFAULT;
 }
 
 void hugetlb_change_protection(struct vm_area_struct *vma,
@@ -2369,7 +2772,7 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
 
        spin_lock(&inode->i_lock);
-       inode->i_blocks -= blocks_per_huge_page(h);
+       inode->i_blocks -= (blocks_per_huge_page(h) * freed);
        spin_unlock(&inode->i_lock);
 
        hugetlb_put_quota(inode->i_mapping, (chg - freed));