KVM: Inject asynchronous page fault into a PV guest if page is swapped out.
[linux-2.6.git] / mm / sparse.c
index ac26eb0..95ac219 100644 (file)
@@ -2,13 +2,17 @@
  * sparse memory mappings.
  */
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/mmzone.h>
 #include <linux/bootmem.h>
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include "internal.h"
 #include <asm/dma.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 
 /*
  * Permanent SPARSEMEM data:
@@ -41,18 +45,30 @@ int page_to_nid(struct page *page)
        return section_to_node_table[page_to_section(page)];
 }
 EXPORT_SYMBOL(page_to_nid);
+
+static void set_section_nid(unsigned long section_nr, int nid)
+{
+       section_to_node_table[section_nr] = nid;
+}
+#else /* !NODE_NOT_IN_PAGE_FLAGS */
+static inline void set_section_nid(unsigned long section_nr, int nid)
+{
+}
 #endif
 
 #ifdef CONFIG_SPARSEMEM_EXTREME
-static struct mem_section *sparse_index_alloc(int nid)
+static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
 {
        struct mem_section *section = NULL;
        unsigned long array_size = SECTIONS_PER_ROOT *
                                   sizeof(struct mem_section);
 
-       if (slab_is_available())
-               section = kmalloc_node(array_size, GFP_KERNEL, nid);
-       else
+       if (slab_is_available()) {
+               if (node_state(nid, N_HIGH_MEMORY))
+                       section = kmalloc_node(array_size, GFP_KERNEL, nid);
+               else
+                       section = kmalloc(array_size, GFP_KERNEL);
+       } else
                section = alloc_bootmem_node(NODE_DATA(nid), array_size);
 
        if (section)
@@ -61,21 +77,19 @@ static struct mem_section *sparse_index_alloc(int nid)
        return section;
 }
 
-static int sparse_index_init(unsigned long section_nr, int nid)
+static int __meminit sparse_index_init(unsigned long section_nr, int nid)
 {
        static DEFINE_SPINLOCK(index_init_lock);
        unsigned long root = SECTION_NR_TO_ROOT(section_nr);
        struct mem_section *section;
        int ret = 0;
 
-#ifdef NODE_NOT_IN_PAGE_FLAGS
-       section_to_node_table[section_nr] = nid;
-#endif
-
        if (mem_section[root])
                return -EEXIST;
 
        section = sparse_index_alloc(nid);
+       if (!section)
+               return -ENOMEM;
        /*
         * This lock keeps two different sections from
         * reallocating for the same index
@@ -101,7 +115,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
 
 /*
  * Although written for the SPARSEMEM_EXTREME case, this happens
- * to also work for the flat array case becase
+ * to also work for the flat array case because
  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
  */
 int __section_nr(struct mem_section* ms)
@@ -137,17 +151,45 @@ static inline int sparse_early_nid(struct mem_section *section)
        return (section->section_mem_map >> SECTION_NID_SHIFT);
 }
 
+/* Validate the physical addressing limitations of the model */
+void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
+                                               unsigned long *end_pfn)
+{
+       unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
+
+       /*
+        * Sanity checks - do not allow an architecture to pass
+        * in larger pfns than the maximum scope of sparsemem:
+        */
+       if (*start_pfn > max_sparsemem_pfn) {
+               mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
+                       "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
+                       *start_pfn, *end_pfn, max_sparsemem_pfn);
+               WARN_ON_ONCE(1);
+               *start_pfn = max_sparsemem_pfn;
+               *end_pfn = max_sparsemem_pfn;
+       } else if (*end_pfn > max_sparsemem_pfn) {
+               mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
+                       "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
+                       *start_pfn, *end_pfn, max_sparsemem_pfn);
+               WARN_ON_ONCE(1);
+               *end_pfn = max_sparsemem_pfn;
+       }
+}
+
 /* Record a memory area against a node. */
-void memory_present(int nid, unsigned long start, unsigned long end)
+void __init memory_present(int nid, unsigned long start, unsigned long end)
 {
        unsigned long pfn;
 
        start &= PAGE_SECTION_MASK;
+       mminit_validate_memmodel_limits(&start, &end);
        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
                unsigned long section = pfn_to_section_nr(pfn);
                struct mem_section *ms;
 
                sparse_index_init(section, nid);
+               set_section_nid(section, nid);
 
                ms = __nr_to_section(section);
                if (!ms->section_mem_map)
@@ -166,11 +208,12 @@ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
        unsigned long pfn;
        unsigned long nr_pages = 0;
 
+       mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
                if (nid != early_pfn_to_nid(pfn))
                        continue;
 
-               if (pfn_valid(pfn))
+               if (pfn_present(pfn))
                        nr_pages += PAGES_PER_SECTION;
        }
 
@@ -188,47 +231,406 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
 }
 
 /*
- * We need this if we ever free the mem_maps.  While not implemented yet,
- * this function is included for parity with its sibling.
+ * Decode mem_map from the coded memmap
  */
-static __attribute((unused))
 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
 {
+       /* mask off the extra low bits of information */
+       coded_mem_map &= SECTION_MAP_MASK;
        return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
 }
 
-static int sparse_init_one_section(struct mem_section *ms,
-               unsigned long pnum, struct page *mem_map)
+static int __meminit sparse_init_one_section(struct mem_section *ms,
+               unsigned long pnum, struct page *mem_map,
+               unsigned long *pageblock_bitmap)
 {
-       if (!valid_section(ms))
+       if (!present_section(ms))
                return -EINVAL;
 
        ms->section_mem_map &= ~SECTION_MAP_MASK;
-       ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
+       ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
+                                                       SECTION_HAS_MEM_MAP;
+       ms->pageblock_flags = pageblock_bitmap;
 
        return 1;
 }
 
-static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
+unsigned long usemap_size(void)
+{
+       unsigned long size_bytes;
+       size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
+       size_bytes = roundup(size_bytes, sizeof(unsigned long));
+       return size_bytes;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static unsigned long *__kmalloc_section_usemap(void)
+{
+       return kmalloc(usemap_size(), GFP_KERNEL);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static unsigned long * __init
+sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
+                                        unsigned long count)
+{
+       unsigned long section_nr;
+
+       /*
+        * A page may contain usemaps for other sections preventing the
+        * page being freed and making a section unremovable while
+        * other sections referencing the usemap retmain active. Similarly,
+        * a pgdat can prevent a section being removed. If section A
+        * contains a pgdat and section B contains the usemap, both
+        * sections become inter-dependent. This allocates usemaps
+        * from the same section as the pgdat where possible to avoid
+        * this problem.
+        */
+       section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+       return alloc_bootmem_section(usemap_size() * count, section_nr);
+}
+
+static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
+{
+       unsigned long usemap_snr, pgdat_snr;
+       static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
+       static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
+       struct pglist_data *pgdat = NODE_DATA(nid);
+       int usemap_nid;
+
+       usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
+       pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
+       if (usemap_snr == pgdat_snr)
+               return;
+
+       if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
+               /* skip redundant message */
+               return;
+
+       old_usemap_snr = usemap_snr;
+       old_pgdat_snr = pgdat_snr;
+
+       usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
+       if (usemap_nid != nid) {
+               printk(KERN_INFO
+                      "node %d must be removed before remove section %ld\n",
+                      nid, usemap_snr);
+               return;
+       }
+       /*
+        * There is a circular dependency.
+        * Some platforms allow un-removable section because they will just
+        * gather other removable sections for dynamic partitioning.
+        * Just notify un-removable section's number here.
+        */
+       printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
+              pgdat_snr, nid);
+       printk(KERN_CONT
+              " have a circular dependency on usemap and pgdat allocations\n");
+}
+#else
+static unsigned long * __init
+sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
+                                        unsigned long count)
+{
+       return NULL;
+}
+
+static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
+{
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
+                                unsigned long pnum_begin,
+                                unsigned long pnum_end,
+                                unsigned long usemap_count, int nodeid)
+{
+       void *usemap;
+       unsigned long pnum;
+       int size = usemap_size();
+
+       usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
+                                                                usemap_count);
+       if (usemap) {
+               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+                       if (!present_section_nr(pnum))
+                               continue;
+                       usemap_map[pnum] = usemap;
+                       usemap += size;
+               }
+               return;
+       }
+
+       usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+       if (usemap) {
+               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+                       if (!present_section_nr(pnum))
+                               continue;
+                       usemap_map[pnum] = usemap;
+                       usemap += size;
+                       check_usemap_section_nr(nodeid, usemap_map[pnum]);
+               }
+               return;
+       }
+
+       printk(KERN_WARNING "%s: allocation failed\n", __func__);
+}
+
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
 {
        struct page *map;
-       struct mem_section *ms = __nr_to_section(pnum);
-       int nid = sparse_early_nid(ms);
+       unsigned long size;
 
        map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
        if (map)
                return map;
 
-       map = alloc_bootmem_node(NODE_DATA(nid),
-                       sizeof(struct page) * PAGES_PER_SECTION);
+       size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
+       map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
+                                        PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       return map;
+}
+void __init sparse_mem_maps_populate_node(struct page **map_map,
+                                         unsigned long pnum_begin,
+                                         unsigned long pnum_end,
+                                         unsigned long map_count, int nodeid)
+{
+       void *map;
+       unsigned long pnum;
+       unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
+
+       map = alloc_remap(nodeid, size * map_count);
+       if (map) {
+               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+                       if (!present_section_nr(pnum))
+                               continue;
+                       map_map[pnum] = map;
+                       map += size;
+               }
+               return;
+       }
+
+       size = PAGE_ALIGN(size);
+       map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
+                                        PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       if (map) {
+               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+                       if (!present_section_nr(pnum))
+                               continue;
+                       map_map[pnum] = map;
+                       map += size;
+               }
+               return;
+       }
+
+       /* fallback */
+       for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+               struct mem_section *ms;
+
+               if (!present_section_nr(pnum))
+                       continue;
+               map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+               if (map_map[pnum])
+                       continue;
+               ms = __nr_to_section(pnum);
+               printk(KERN_ERR "%s: sparsemem memory map backing failed "
+                       "some memory will not be available.\n", __func__);
+               ms->section_mem_map = 0;
+       }
+}
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
+                                unsigned long pnum_begin,
+                                unsigned long pnum_end,
+                                unsigned long map_count, int nodeid)
+{
+       sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
+                                        map_count, nodeid);
+}
+#else
+static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
+{
+       struct page *map;
+       struct mem_section *ms = __nr_to_section(pnum);
+       int nid = sparse_early_nid(ms);
+
+       map = sparse_mem_map_populate(pnum, nid);
        if (map)
                return map;
 
-       printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
+       printk(KERN_ERR "%s: sparsemem memory map backing failed "
+                       "some memory will not be available.\n", __func__);
        ms->section_mem_map = 0;
        return NULL;
 }
+#endif
+
+void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
+{
+}
+
+/*
+ * Allocate the accumulated non-linear sections, allocate a mem_map
+ * for each and record the physical to section mapping.
+ */
+void __init sparse_init(void)
+{
+       unsigned long pnum;
+       struct page *map;
+       unsigned long *usemap;
+       unsigned long **usemap_map;
+       int size;
+       int nodeid_begin = 0;
+       unsigned long pnum_begin = 0;
+       unsigned long usemap_count;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+       unsigned long map_count;
+       int size2;
+       struct page **map_map;
+#endif
 
+       /*
+        * map is using big page (aka 2M in x86 64 bit)
+        * usemap is less one page (aka 24 bytes)
+        * so alloc 2M (with 2M align) and 24 bytes in turn will
+        * make next 2M slip to one more 2M later.
+        * then in big system, the memory will have a lot of holes...
+        * here try to allocate 2M pages continously.
+        *
+        * powerpc need to call sparse_init_one_section right after each
+        * sparse_early_mem_map_alloc, so allocate usemap_map at first.
+        */
+       size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
+       usemap_map = alloc_bootmem(size);
+       if (!usemap_map)
+               panic("can not allocate usemap_map\n");
+
+       for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+               struct mem_section *ms;
+
+               if (!present_section_nr(pnum))
+                       continue;
+               ms = __nr_to_section(pnum);
+               nodeid_begin = sparse_early_nid(ms);
+               pnum_begin = pnum;
+               break;
+       }
+       usemap_count = 1;
+       for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+               struct mem_section *ms;
+               int nodeid;
+
+               if (!present_section_nr(pnum))
+                       continue;
+               ms = __nr_to_section(pnum);
+               nodeid = sparse_early_nid(ms);
+               if (nodeid == nodeid_begin) {
+                       usemap_count++;
+                       continue;
+               }
+               /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+               sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
+                                                usemap_count, nodeid_begin);
+               /* new start, update count etc*/
+               nodeid_begin = nodeid;
+               pnum_begin = pnum;
+               usemap_count = 1;
+       }
+       /* ok, last chunk */
+       sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
+                                        usemap_count, nodeid_begin);
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+       size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
+       map_map = alloc_bootmem(size2);
+       if (!map_map)
+               panic("can not allocate map_map\n");
+
+       for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+               struct mem_section *ms;
+
+               if (!present_section_nr(pnum))
+                       continue;
+               ms = __nr_to_section(pnum);
+               nodeid_begin = sparse_early_nid(ms);
+               pnum_begin = pnum;
+               break;
+       }
+       map_count = 1;
+       for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+               struct mem_section *ms;
+               int nodeid;
+
+               if (!present_section_nr(pnum))
+                       continue;
+               ms = __nr_to_section(pnum);
+               nodeid = sparse_early_nid(ms);
+               if (nodeid == nodeid_begin) {
+                       map_count++;
+                       continue;
+               }
+               /* ok, we need to take cake of from pnum_begin to pnum - 1*/
+               sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
+                                                map_count, nodeid_begin);
+               /* new start, update count etc*/
+               nodeid_begin = nodeid;
+               pnum_begin = pnum;
+               map_count = 1;
+       }
+       /* ok, last chunk */
+       sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
+                                        map_count, nodeid_begin);
+#endif
+
+       for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+               if (!present_section_nr(pnum))
+                       continue;
+
+               usemap = usemap_map[pnum];
+               if (!usemap)
+                       continue;
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+               map = map_map[pnum];
+#else
+               map = sparse_early_mem_map_alloc(pnum);
+#endif
+               if (!map)
+                       continue;
+
+               sparse_init_one_section(__nr_to_section(pnum), pnum, map,
+                                                               usemap);
+       }
+
+       vmemmap_populate_print_last();
+
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+       free_bootmem(__pa(map_map), size2);
+#endif
+       free_bootmem(__pa(usemap_map), size);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+                                                unsigned long nr_pages)
+{
+       /* This will make the necessary allocations eventually. */
+       return sparse_mem_map_populate(pnum, nid);
+}
+static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
+{
+       return; /* XXX: Not implemented yet */
+}
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+}
+#else
 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
 {
        struct page *page, *ret;
@@ -251,40 +653,80 @@ got_map_ptr:
        return ret;
 }
 
-static int vaddr_in_vmalloc_area(void *addr)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+                                                 unsigned long nr_pages)
 {
-       if (addr >= (void *)VMALLOC_START &&
-           addr < (void *)VMALLOC_END)
-               return 1;
-       return 0;
+       return __kmalloc_section_memmap(nr_pages);
 }
 
 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
 {
-       if (vaddr_in_vmalloc_area(memmap))
+       if (is_vmalloc_addr(memmap))
                vfree(memmap);
        else
                free_pages((unsigned long)memmap,
                           get_order(sizeof(struct page) * nr_pages));
 }
 
-/*
- * Allocate the accumulated non-linear sections, allocate a mem_map
- * for each and record the physical to section mapping.
- */
-void sparse_init(void)
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
 {
-       unsigned long pnum;
-       struct page *map;
+       unsigned long maps_section_nr, removing_section_nr, i;
+       int magic;
+
+       for (i = 0; i < nr_pages; i++, page++) {
+               magic = atomic_read(&page->_mapcount);
+
+               BUG_ON(magic == NODE_INFO);
+
+               maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
+               removing_section_nr = page->private;
+
+               /*
+                * When this function is called, the removing section is
+                * logical offlined state. This means all pages are isolated
+                * from page allocator. If removing section's memmap is placed
+                * on the same section, it must not be freed.
+                * If it is freed, page allocator may allocate it which will
+                * be removed physically soon.
+                */
+               if (maps_section_nr != removing_section_nr)
+                       put_page_bootmem(page);
+       }
+}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
-       for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
-               if (!valid_section_nr(pnum))
-                       continue;
+static void free_section_usemap(struct page *memmap, unsigned long *usemap)
+{
+       struct page *usemap_page;
+       unsigned long nr_pages;
 
-               map = sparse_early_mem_map_alloc(pnum);
-               if (!map)
-                       continue;
-               sparse_init_one_section(__nr_to_section(pnum), pnum, map);
+       if (!usemap)
+               return;
+
+       usemap_page = virt_to_page(usemap);
+       /*
+        * Check to see if allocation came from hot-plug-add
+        */
+       if (PageSlab(usemap_page)) {
+               kfree(usemap);
+               if (memmap)
+                       __kfree_section_memmap(memmap, PAGES_PER_SECTION);
+               return;
+       }
+
+       /*
+        * The usemap came from bootmem. This is packed with other usemaps
+        * on the section which has pgdat at boot time. Just keep it as is now.
+        */
+
+       if (memmap) {
+               struct page *memmap_page;
+               memmap_page = virt_to_page(memmap);
+
+               nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+                       >> PAGE_SHIFT;
+
+               free_map_bootmem(memmap_page, nr_pages);
        }
 }
 
@@ -293,13 +735,14 @@ void sparse_init(void)
  * set.  If this is <=0, then that means that the passed-in
  * map was not consumed and must be freed.
  */
-int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
+int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
                           int nr_pages)
 {
        unsigned long section_nr = pfn_to_section_nr(start_pfn);
        struct pglist_data *pgdat = zone->zone_pgdat;
        struct mem_section *ms;
        struct page *memmap;
+       unsigned long *usemap;
        unsigned long flags;
        int ret;
 
@@ -307,8 +750,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
         * no locking for this, because it does its own
         * plus, it does a kmalloc
         */
-       sparse_index_init(section_nr, pgdat->node_id);
-       memmap = __kmalloc_section_memmap(nr_pages);
+       ret = sparse_index_init(section_nr, pgdat->node_id);
+       if (ret < 0 && ret != -EEXIST)
+               return ret;
+       memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
+       if (!memmap)
+               return -ENOMEM;
+       usemap = __kmalloc_section_usemap();
+       if (!usemap) {
+               __kfree_section_memmap(memmap, nr_pages);
+               return -ENOMEM;
+       }
 
        pgdat_resize_lock(pgdat, &flags);
 
@@ -317,13 +769,33 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
                ret = -EEXIST;
                goto out;
        }
+
        ms->section_mem_map |= SECTION_MARKED_PRESENT;
 
-       ret = sparse_init_one_section(ms, section_nr, memmap);
+       ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
 
 out:
        pgdat_resize_unlock(pgdat, &flags);
-       if (ret <= 0)
+       if (ret <= 0) {
+               kfree(usemap);
                __kfree_section_memmap(memmap, nr_pages);
+       }
        return ret;
 }
+
+void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
+{
+       struct page *memmap = NULL;
+       unsigned long *usemap = NULL;
+
+       if (ms->section_mem_map) {
+               usemap = ms->pageblock_flags;
+               memmap = sparse_decode_mem_map(ms->section_mem_map,
+                                               __section_nr(ms));
+               ms->section_mem_map = 0;
+               ms->pageblock_flags = NULL;
+       }
+
+       free_section_usemap(memmap, usemap);
+}
+#endif