]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - arch/ia64/mm/discontig.c
ia64: allocate percpu area for cpu0 like percpu areas for other cpus
[linux-2.6.git] / arch / ia64 / mm / discontig.c
index a88cdb7232f8ea1586473973c65f9a63d8ecdde7..200282b92981cdfaf63c0930ba8de8737500a8c1 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/nmi.h>
 #include <linux/swap.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
  */
 struct early_node_data {
        struct ia64_node_data *node_data;
-       pg_data_t *pgdat;
        unsigned long pernode_addr;
        unsigned long pernode_size;
-       struct bootmem_data bootmem_data;
        unsigned long num_physpages;
+#ifdef CONFIG_ZONE_DMA
        unsigned long num_dma_physpages;
+#endif
        unsigned long min_pfn;
        unsigned long max_pfn;
 };
@@ -46,12 +47,16 @@ struct early_node_data {
 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
 static nodemask_t memory_less_mask __initdata;
 
+pg_data_t *pgdat_list[MAX_NUMNODES];
+
 /*
  * To prevent cache aliasing effects, align per-node structures so that they
  * start at addresses that are strided by node number.
  */
+#define MAX_NODE_ALIGN_OFFSET  (32 * 1024 * 1024)
 #define NODEDATA_ALIGN(addr, node)                                             \
-       ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE)
+       ((((addr) + 1024*1024-1) & ~(1024*1024-1)) +                            \
+            (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
 
 /**
  * build_node_maps - callback to setup bootmem structs for each node
@@ -69,23 +74,20 @@ static nodemask_t memory_less_mask __initdata;
 static int __init build_node_maps(unsigned long start, unsigned long len,
                                  int node)
 {
-       unsigned long cstart, epfn, end = start + len;
-       struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+       unsigned long spfn, epfn, end = start + len;
+       struct bootmem_data *bdp = &bootmem_node_data[node];
 
        epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
-       cstart = GRANULEROUNDDOWN(start);
+       spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
 
        if (!bdp->node_low_pfn) {
-               bdp->node_boot_start = cstart;
+               bdp->node_min_pfn = spfn;
                bdp->node_low_pfn = epfn;
        } else {
-               bdp->node_boot_start = min(cstart, bdp->node_boot_start);
+               bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
                bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
        }
 
-       min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
-       max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
-
        return 0;
 }
 
@@ -97,11 +99,11 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
  * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
  * called yet.  Note that node 0 will also count all non-existent cpus.
  */
-static int __init early_nr_cpus_node(int node)
+static int __meminit early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_possible_early_cpu(cpu)
                if (node == node_cpuid[cpu].nid)
                        n++;
 
@@ -112,7 +114,7 @@ static int __init early_nr_cpus_node(int node)
  * compute_pernodesize - compute size of pernode data
  * @node: the node id.
  */
-static unsigned long __init compute_pernodesize(int node)
+static unsigned long __meminit compute_pernodesize(int node)
 {
        unsigned long pernodesize = 0, cpus;
 
@@ -121,6 +123,7 @@ static unsigned long __init compute_pernodesize(int node)
        pernodesize += node * L1_CACHE_BYTES;
        pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+       pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
        pernodesize = PAGE_ALIGN(pernodesize);
        return pernodesize;
 }
@@ -139,14 +142,31 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
 #ifdef CONFIG_SMP
        int cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               if (node == node_cpuid[cpu].nid) {
-                       memcpy(__va(cpu_data), __phys_per_cpu_start,
-                              __per_cpu_end - __per_cpu_start);
-                       __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
-                               __per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-               }
+       for_each_possible_early_cpu(cpu) {
+               void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+               if (node != node_cpuid[cpu].nid)
+                       continue;
+
+               memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+               __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+                       __per_cpu_start;
+
+               /*
+                * percpu area for cpu0 is moved from the __init area
+                * which is setup by head.S and used till this point.
+                * Update ar.k3.  This move is ensures that percpu
+                * area for cpu0 is on the correct node and its
+                * virtual address isn't insanely far from other
+                * percpu areas which is important for congruent
+                * percpu allocator.
+                */
+               if (cpu == 0)
+                       ia64_set_kr(IA64_KR_PER_CPU_DATA,
+                                   (unsigned long)cpu_data -
+                                   (unsigned long)__per_cpu_start);
+
+               cpu_data += PERCPU_PAGE_SIZE;
        }
 #endif
        return cpu_data;
@@ -163,7 +183,7 @@ static void __init fill_pernode(int node, unsigned long pernode,
 {
        void *cpu_data;
        int cpus = early_nr_cpus_node(node);
-       struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+       struct bootmem_data *bdp = &bootmem_node_data[node];
 
        mem_data[node].pernode_addr = pernode;
        mem_data[node].pernode_size = pernodesize;
@@ -173,13 +193,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
        pernode += PERCPU_PAGE_SIZE * cpus;
        pernode += node * L1_CACHE_BYTES;
 
-       mem_data[node].pgdat = __va(pernode);
+       pgdat_list[node] = __va(pernode);
        pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
        mem_data[node].node_data = __va(pernode);
        pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
 
-       mem_data[node].pgdat->bdata = bdp;
+       pgdat_list[node]->bdata = bdp;
        pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
 
        cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -218,20 +238,21 @@ static void __init fill_pernode(int node, unsigned long pernode,
 static int __init find_pernode_space(unsigned long start, unsigned long len,
                                     int node)
 {
-       unsigned long epfn;
+       unsigned long spfn, epfn;
        unsigned long pernodesize = 0, pernode, pages, mapsize;
-       struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+       struct bootmem_data *bdp = &bootmem_node_data[node];
 
+       spfn = start >> PAGE_SHIFT;
        epfn = (start + len) >> PAGE_SHIFT;
 
-       pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
+       pages = bdp->node_low_pfn - bdp->node_min_pfn;
        mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
 
        /*
         * Make sure this memory falls within this node's usable memory
         * since we may have thrown some away in build_maps().
         */
-       if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
+       if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
                return 0;
 
        /* Don't setup this node's local space twice... */
@@ -266,7 +287,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
 static int __init free_node_bootmem(unsigned long start, unsigned long len,
                                    int node)
 {
-       free_bootmem_node(mem_data[node].pgdat, start, len);
+       free_bootmem_node(pgdat_list[node], start, len);
 
        return 0;
 }
@@ -285,7 +306,7 @@ static void __init reserve_pernode_space(void)
        int node;
 
        for_each_online_node(node) {
-               pg_data_t *pdp = mem_data[node].pgdat;
+               pg_data_t *pdp = pgdat_list[node];
 
                if (node_isset(node, memory_less_mask))
                        continue;
@@ -293,15 +314,36 @@ static void __init reserve_pernode_space(void)
                bdp = pdp->bdata;
 
                /* First the bootmem_map itself */
-               pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
+               pages = bdp->node_low_pfn - bdp->node_min_pfn;
                size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
                base = __pa(bdp->node_bootmem_map);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
 
                /* Now the per-node space */
                size = mem_data[node].pernode_size;
                base = __pa(mem_data[node].pernode_addr);
-               reserve_bootmem_node(pdp, base, size);
+               reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
+       }
+}
+
+static void __meminit scatter_node_data(void)
+{
+       pg_data_t **dst;
+       int node;
+
+       /*
+        * for_each_online_node() can't be used at here.
+        * node_online_map is not set for hot-added nodes at this time,
+        * because we are halfway through initialization of the new node's
+        * structures.  If for_each_online_node() is used, a new node's
+        * pg_data_ptrs will be not initialized. Instead of using it,
+        * pgdat_list[] is checked.
+        */
+       for_each_node(node) {
+               if (pgdat_list[node]) {
+                       dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
+                       memcpy(dst, pgdat_list, sizeof(pgdat_list));
+               }
        }
 }
 
@@ -315,20 +357,13 @@ static void __init reserve_pernode_space(void)
  */
 static void __init initialize_pernode_data(void)
 {
-       pg_data_t *pgdat_list[MAX_NUMNODES];
        int cpu, node;
 
-       for_each_online_node(node)
-               pgdat_list[node] = mem_data[node].pgdat;
+       scatter_node_data();
 
-       /* Copy the pg_data_t list to each node and init the node field */
-       for_each_online_node(node) {
-               memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
-                      sizeof(pgdat_list));
-       }
 #ifdef CONFIG_SMP
        /* Set the node_data pointer for each per-cpu struct */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_early_cpu(cpu) {
                node = node_cpuid[cpu].nid;
                per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
        }
@@ -350,14 +385,12 @@ static void __init initialize_pernode_data(void)
  *     for best.
  * @nid: node id
  * @pernodesize: size of this node's pernode data
- * @align: alignment to use for this node's pernode data
  */
-static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
-       unsigned long align)
+static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
 {
        void *ptr = NULL;
        u8 best = 0xff;
-       int bestnode = -1, node;
+       int bestnode = -1, node, anynode = 0;
 
        for_each_online_node(node) {
                if (node_isset(node, memory_less_mask))
@@ -366,39 +399,16 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
                        best = node_distance(nid, node);
                        bestnode = node;
                }
+               anynode = node;
        }
 
-       ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat,
-               pernodesize, align, __pa(MAX_DMA_ADDRESS));
-
-       if (!ptr)
-               panic("NO memory for memory less node\n");
-       return ptr;
-}
-
-/**
- * pgdat_insert - insert the pgdat into global pgdat_list
- * @pgdat: the pgdat for a node.
- */
-static void __init pgdat_insert(pg_data_t *pgdat)
-{
-       pg_data_t *prev = NULL, *next;
-
-       for_each_pgdat(next)
-               if (pgdat->node_id < next->node_id)
-                       break;
-               else
-                       prev = next;
+       if (bestnode == -1)
+               bestnode = anynode;
 
-       if (prev) {
-               prev->pgdat_next = pgdat;
-               pgdat->pgdat_next = next;
-       } else {
-               pgdat->pgdat_next = pgdat_list;
-               pgdat_list = pgdat;
-       }
+       ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
+               PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 
-       return;
+       return ptr;
 }
 
 /**
@@ -413,45 +423,13 @@ static void __init memory_less_nodes(void)
 
        for_each_node_mask(node, memory_less_mask) {
                pernodesize = compute_pernodesize(node);
-               pernode = memory_less_node_alloc(node, pernodesize,
-                       (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024));
+               pernode = memory_less_node_alloc(node, pernodesize);
                fill_pernode(node, __pa(pernode), pernodesize);
        }
 
        return;
 }
 
-#ifdef CONFIG_SPARSEMEM
-/**
- * register_sparse_mem - notify SPARSEMEM that this memory range exists.
- * @start: physical start of range
- * @end: physical end of range
- * @arg: unused
- *
- * Simply calls SPARSEMEM to register memory section(s).
- */
-static int __init register_sparse_mem(unsigned long start, unsigned long end,
-       void *arg)
-{
-       int nid;
-
-       start = __pa(start) >> PAGE_SHIFT;
-       end = __pa(end) >> PAGE_SHIFT;
-       nid = early_pfn_to_nid(start);
-       memory_present(nid, start, end);
-
-       return 0;
-}
-
-static void __init arch_sparse_init(void)
-{
-       efi_memmap_walk(register_sparse_mem, NULL);
-       sparse_init();
-}
-#else
-#define arch_sparse_init() do {} while (0)
-#endif
-
 /**
  * find_memory - walk the EFI memory map and setup the bootmem allocator
  *
@@ -476,12 +454,16 @@ void __init find_memory(void)
        /* These actually end up getting called by call_pernode_memory() */
        efi_memmap_walk(filter_rsvd_memory, build_node_maps);
        efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
+       efi_memmap_walk(find_max_min_low_pfn, NULL);
 
        for_each_online_node(node)
-               if (mem_data[node].bootmem_data.node_low_pfn) {
+               if (bootmem_node_data[node].node_low_pfn) {
                        node_clear(node, memory_less_mask);
                        mem_data[node].min_pfn = ~0UL;
                }
+
+       efi_memmap_walk(filter_memory, register_active_ranges);
+
        /*
         * Initialize the boot memory maps in reverse order since that's
         * what the bootmem allocator expects
@@ -495,14 +477,14 @@ void __init find_memory(void)
                else if (node_isset(node, memory_less_mask))
                        continue;
 
-               bdp = &mem_data[node].bootmem_data;
+               bdp = &bootmem_node_data[node];
                pernode = mem_data[node].pernode_addr;
                pernodesize = mem_data[node].pernode_size;
                map = pernode + pernodesize;
 
-               init_bootmem_node(mem_data[node].pgdat,
+               init_bootmem_node(pgdat_list[node],
                                  map>>PAGE_SHIFT,
-                                 bdp->node_boot_start>>PAGE_SHIFT,
+                                 bdp->node_min_pfn,
                                  bdp->node_low_pfn);
        }
 
@@ -524,15 +506,16 @@ void __init find_memory(void)
  * find_pernode_space() does most of this already, we just need to set
  * local_per_cpu_offset
  */
-void *per_cpu_init(void)
+void __cpuinit *per_cpu_init(void)
 {
        int cpu;
+       static int first_time = 1;
 
-       if (smp_processor_id() != 0)
-               return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
-               per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+       if (first_time) {
+               first_time = 0;
+               for_each_possible_early_cpu(cpu)
+                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+       }
 
        return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 }
@@ -551,23 +534,27 @@ void show_mem(void)
        unsigned long total_present = 0;
        pg_data_t *pgdat;
 
-       printk("Mem-info:\n");
+       printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       for_each_pgdat(pgdat) {
+       printk(KERN_INFO "Node memory in pages:\n");
+       for_each_online_pgdat(pgdat) {
                unsigned long present;
                unsigned long flags;
                int shared = 0, cached = 0, reserved = 0;
 
-               printk("Node ID: %d\n", pgdat->node_id);
                pgdat_resize_lock(pgdat, &flags);
                present = pgdat->node_present_pages;
                for(i = 0; i < pgdat->node_spanned_pages; i++) {
                        struct page *page;
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+                               touch_nmi_watchdog();
                        if (pfn_valid(pgdat->node_start_pfn + i))
                                page = pfn_to_page(pgdat->node_start_pfn + i);
-                       else
+                       else {
+                               i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+                                        i) - 1;
                                continue;
+                       }
                        if (PageReserved(page))
                                reserved++;
                        else if (PageSwapCache(page))
@@ -580,18 +567,17 @@ void show_mem(void)
                total_reserved += reserved;
                total_cached += cached;
                total_shared += shared;
-               printk("\t%ld pages of RAM\n", present);
-               printk("\t%d reserved pages\n", reserved);
-               printk("\t%d pages shared\n", shared);
-               printk("\t%d pages swap cached\n", cached);
+               printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
+                      "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+                      present, reserved, shared, cached);
        }
-       printk("%ld pages of RAM\n", total_present);
-       printk("%d reserved pages\n", total_reserved);
-       printk("%d pages shared\n", total_shared);
-       printk("%d pages swap cached\n", total_cached);
-       printk("Total of %ld pages in page table cache\n",
-               pgtable_quicklist_total_size());
-       printk("%d free buffer pages\n", nr_free_buffer_pages());
+       printk(KERN_INFO "%ld pages of RAM\n", total_present);
+       printk(KERN_INFO "%d reserved pages\n", total_reserved);
+       printk(KERN_INFO "%d pages shared\n", total_shared);
+       printk(KERN_INFO "%d pages swap cached\n", total_cached);
+       printk(KERN_INFO "Total of %ld pages in page table cache\n",
+              quicklist_total_size());
+       printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
 }
 
 /**
@@ -656,11 +642,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
        unsigned long end = start + len;
 
        mem_data[node].num_physpages += len >> PAGE_SHIFT;
+#ifdef CONFIG_ZONE_DMA
        if (start <= __pa(MAX_DMA_ADDRESS))
                mem_data[node].num_dma_physpages +=
                        (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
+#endif
        start = GRANULEROUNDDOWN(start);
-       start = ORDERROUNDDOWN(start);
        end = GRANULEROUNDUP(end);
        mem_data[node].max_pfn = max(mem_data[node].max_pfn,
                                     end >> PAGE_SHIFT);
@@ -679,71 +666,71 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
 void __init paging_init(void)
 {
        unsigned long max_dma;
-       unsigned long zones_size[MAX_NR_ZONES];
-       unsigned long zholes_size[MAX_NR_ZONES];
        unsigned long pfn_offset = 0;
+       unsigned long max_pfn = 0;
        int node;
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
 
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 
-       arch_sparse_init();
-
        efi_memmap_walk(filter_rsvd_memory, count_node_pages);
 
+       sparse_memory_present_with_active_regions(MAX_NUMNODES);
+       sparse_init();
+
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-       vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
-       vmem_map = (struct page *) vmalloc_end;
+       VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+               sizeof(struct page));
+       vmem_map = (struct page *) VMALLOC_END;
        efi_memmap_walk(create_mem_map_page_table, NULL);
        printk("Virtual mem_map starts at 0x%p\n", vmem_map);
 #endif
 
        for_each_online_node(node) {
-               memset(zones_size, 0, sizeof(zones_size));
-               memset(zholes_size, 0, sizeof(zholes_size));
-
                num_physpages += mem_data[node].num_physpages;
-
-               if (mem_data[node].min_pfn >= max_dma) {
-                       /* All of this node's memory is above ZONE_DMA */
-                       zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn;
-                       zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn -
-                               mem_data[node].num_physpages;
-               } else if (mem_data[node].max_pfn < max_dma) {
-                       /* All of this node's memory is in ZONE_DMA */
-                       zones_size[ZONE_DMA] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn;
-                       zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
-                               mem_data[node].min_pfn -
-                               mem_data[node].num_dma_physpages;
-               } else {
-                       /* This node has memory in both zones */
-                       zones_size[ZONE_DMA] = max_dma -
-                               mem_data[node].min_pfn;
-                       zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
-                               mem_data[node].num_dma_physpages;
-                       zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
-                               max_dma;
-                       zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
-                               (mem_data[node].num_physpages -
-                                mem_data[node].num_dma_physpages);
-               }
-
                pfn_offset = mem_data[node].min_pfn;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
                NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
 #endif
-               free_area_init_node(node, NODE_DATA(node), zones_size,
-                                   pfn_offset, zholes_size);
+               if (mem_data[node].max_pfn > max_pfn)
+                       max_pfn = mem_data[node].max_pfn;
        }
 
-       /*
-        * Make memory less nodes become a member of the known nodes.
-        */
-       for_each_node_mask(node, memory_less_mask)
-               pgdat_insert(mem_data[node].pgdat);
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
+       max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
+       max_zone_pfns[ZONE_NORMAL] = max_pfn;
+       free_area_init_nodes(max_zone_pfns);
 
        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+pg_data_t *arch_alloc_nodedata(int nid)
+{
+       unsigned long size = compute_pernodesize(nid);
+
+       return kzalloc(size, GFP_KERNEL);
+}
+
+void arch_free_nodedata(pg_data_t *pgdat)
+{
+       kfree(pgdat);
+}
+
+void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
+{
+       pgdat_list[update_node] = update_pgdat;
+       scatter_node_data();
+}
+#endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(struct page *start_page,
+                                               unsigned long size, int node)
+{
+       return vmemmap_populate_basepages(start_page, size, node);
+}
+#endif