[PATCH] remove zone_mem_map
This patch removes zone_mem_map.
pfn_to_page uses pgdat, page_to_pfn uses zone. page_to_pfn can use pgdat
instead of zone, which is only one user of zone_mem_map. By modifing it,
we can remove zone_mem_map.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h
index c900439..192d80c 100644
--- a/include/asm-alpha/mmzone.h
+++ b/include/asm-alpha/mmzone.h
@@ -83,8 +83,7 @@
pte_t pte; \
unsigned long pfn; \
\
- pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
- pfn += page_zone(page)->zone_start_pfn << 32; \
+ pfn = page_to_pfn(page) << 32; \
pte_val(pte) = pfn | pgprot_val(pgprot); \
\
pte; \
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index a7bb497..0cfb086 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -45,11 +45,11 @@
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
})
-#define page_to_pfn(pg) \
-({ struct page *__pg = (pg); \
- struct zone *__zone = page_zone(__pg); \
- (unsigned long)(__pg - __zone->zone_mem_map) + \
- __zone->zone_start_pfn; \
+#define page_to_pfn(pg) \
+({ struct page *__pg = (pg); \
+ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
+ (unsigned long)(__pg - __pgdat->node_mem_map) + \
+ __pgdat->node_start_pfn; \
})
#elif defined(CONFIG_SPARSEMEM)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c1c0c0..ace31c5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -225,7 +225,6 @@
* Discontig memory support fields.
*/
struct pglist_data *zone_pgdat;
- struct page *zone_mem_map;
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 349b328..8dc8f27 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2042,7 +2042,6 @@
zone_wait_table_init(zone, size);
pgdat->nr_zones = zone_idx(zone) + 1;
- zone->zone_mem_map = pfn_to_page(zone_start_pfn);
zone->zone_start_pfn = zone_start_pfn;
memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
@@ -2768,9 +2767,8 @@
}
unsigned long page_to_pfn(struct page *page)
{
- struct zone *zone = page_zone(page);
- return (page - zone->zone_mem_map) + zone->zone_start_pfn;
-
+ struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
+ return (page - pgdat->node_mem_map) + pgdat->node_start_pfn;
}
#elif defined(CONFIG_SPARSEMEM)
struct page *pfn_to_page(unsigned long pfn)