x86: Remove old bootmem code
[linux-2.6.git] / arch / x86 / mm / numa_32.c
index 847c164..70ddeb7 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/mm.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/mmzone.h>
 #include <linux/highmem.h>
 #include <linux/initrd.h>
@@ -120,7 +121,7 @@ int __init get_memcfg_numa_flat(void)
 
        node_start_pfn[0] = 0;
        node_end_pfn[0] = max_pfn;
-       e820_register_active_regions(0, 0, max_pfn);
+       memblock_x86_register_active_regions(0, 0, max_pfn);
        memory_present(0, 0, max_pfn);
        node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
 
@@ -161,14 +162,14 @@ static void __init allocate_pgdat(int nid)
                NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
        else {
                unsigned long pgdat_phys;
-               pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT,
+               pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT,
                                 max_pfn_mapped<<PAGE_SHIFT,
                                 sizeof(pg_data_t),
                                 PAGE_SIZE);
                NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
                memset(buf, 0, sizeof(buf));
                sprintf(buf, "NODE_DATA %d",  nid);
-               reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
+               memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf);
        }
        printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
                nid, (unsigned long)NODE_DATA(nid));
@@ -194,7 +195,7 @@ void *alloc_remap(int nid, unsigned long size)
        size = ALIGN(size, L1_CACHE_BYTES);
 
        if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
-               return 0;
+               return NULL;
 
        node_remap_alloc_vaddr[nid] += size;
        memset(allocation, 0, size);
@@ -222,7 +223,42 @@ static void __init remap_numa_kva(void)
        }
 }
 
-static unsigned long calculate_numa_remap_pages(void)
+#ifdef CONFIG_HIBERNATION
+/**
+ * resume_map_numa_kva - add KVA mapping to the temporary page tables created
+ *                       during resume from hibernation
+ * @pgd_base - temporary resume page directory
+ */
+void resume_map_numa_kva(pgd_t *pgd_base)
+{
+       int node;
+
+       for_each_online_node(node) {
+               unsigned long start_va, start_pfn, size, pfn;
+
+               start_va = (unsigned long)node_remap_start_vaddr[node];
+               start_pfn = node_remap_start_pfn[node];
+               size = node_remap_size[node];
+
+               printk(KERN_DEBUG "%s: node %d\n", __func__, node);
+
+               for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
+                       unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
+                       pgd_t *pgd = pgd_base + pgd_index(vaddr);
+                       pud_t *pud = pud_offset(pgd, vaddr);
+                       pmd_t *pmd = pmd_offset(pud, vaddr);
+
+                       set_pmd(pmd, pfn_pmd(start_pfn + pfn,
+                                               PAGE_KERNEL_LARGE_EXEC));
+
+                       printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
+                               __func__, vaddr, start_pfn + pfn);
+               }
+       }
+}
+#endif
+
+static __init unsigned long calculate_numa_remap_pages(void)
 {
        int nid;
        unsigned long size, reserve_pages = 0;
@@ -256,15 +292,15 @@ static unsigned long calculate_numa_remap_pages(void)
                                                 PTRS_PER_PTE);
                node_kva_target <<= PAGE_SHIFT;
                do {
-                       node_kva_final = find_e820_area(node_kva_target,
+                       node_kva_final = memblock_find_in_range(node_kva_target,
                                        ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
                                                ((u64)size)<<PAGE_SHIFT,
                                                LARGE_PAGE_BYTES);
                        node_kva_target -= LARGE_PAGE_BYTES;
-               } while (node_kva_final == -1ULL &&
+               } while (node_kva_final == MEMBLOCK_ERROR &&
                         (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
 
-               if (node_kva_final == -1ULL)
+               if (node_kva_final == MEMBLOCK_ERROR)
                        panic("Can not get kva ram\n");
 
                node_remap_size[nid] = size;
@@ -283,9 +319,9 @@ static unsigned long calculate_numa_remap_pages(void)
                 *  but we could have some hole in high memory, and it will only
                 *  check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide
                 *  to use it as free.
-                *  So reserve_early here, hope we don't run out of that array
+                *  So memblock_x86_reserve_range here, hope we don't run out of that array
                 */
-               reserve_early(node_kva_final,
+               memblock_x86_reserve_range(node_kva_final,
                              node_kva_final+(((u64)size)<<PAGE_SHIFT),
                              "KVA RAM");
 
@@ -312,8 +348,8 @@ static void init_remap_allocator(int nid)
                (ulong) node_remap_end_vaddr[nid]);
 }
 
-void __init initmem_init(unsigned long start_pfn,
-                                 unsigned long end_pfn)
+void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
+                               int acpi, int k8)
 {
        int nid;
        long kva_target_pfn;
@@ -332,14 +368,14 @@ void __init initmem_init(unsigned long start_pfn,
 
        kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
        do {
-               kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
+               kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT,
                                        max_low_pfn<<PAGE_SHIFT,
                                        kva_pages<<PAGE_SHIFT,
                                        PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
                kva_target_pfn -= PTRS_PER_PTE;
-       } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn);
+       } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn);
 
-       if (kva_start_pfn == -1UL)
+       if (kva_start_pfn == MEMBLOCK_ERROR)
                panic("Can not get kva space\n");
 
        printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n",
@@ -347,7 +383,7 @@ void __init initmem_init(unsigned long start_pfn,
        printk(KERN_INFO "max_pfn = %lx\n", max_pfn);
 
        /* avoid clash with initrd */
-       reserve_early(kva_start_pfn<<PAGE_SHIFT,
+       memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT,
                      (kva_start_pfn + kva_pages)<<PAGE_SHIFT,
                     "KVA PG");
 #ifdef CONFIG_HIGHMEM
@@ -381,39 +417,14 @@ void __init initmem_init(unsigned long start_pfn,
        for_each_online_node(nid)
                propagate_e820_map_node(nid);
 
-       for_each_online_node(nid)
+       for_each_online_node(nid) {
                memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+               NODE_DATA(nid)->node_id = nid;
+       }
 
-       NODE_DATA(0)->bdata = &bootmem_node_data[0];
        setup_bootmem_allocator();
 }
 
-void __init set_highmem_pages_init(void)
-{
-#ifdef CONFIG_HIGHMEM
-       struct zone *zone;
-       int nid;
-
-       for_each_zone(zone) {
-               unsigned long zone_start_pfn, zone_end_pfn;
-
-               if (!is_highmem(zone))
-                       continue;
-
-               zone_start_pfn = zone->zone_start_pfn;
-               zone_end_pfn = zone_start_pfn + zone->spanned_pages;
-
-               nid = zone_to_nid(zone);
-               printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
-                               zone->name, nid, zone_start_pfn, zone_end_pfn);
-
-               add_highpages_with_active_regions(nid, zone_start_pfn,
-                                zone_end_pfn);
-       }
-       totalram_pages += totalhigh_pages;
-#endif
-}
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 static int paddr_to_nid(u64 addr)
 {