mem-hotplug: fix potential race while building zonelist for new populated zone
Haicheng Li [Mon, 24 May 2010 21:32:52 +0000 (14:32 -0700)]
Add global mutex zonelists_mutex to fix the possible race:

     CPU0                                  CPU1                    CPU2
(1) zone->present_pages += online_pages;
(2)                                       build_all_zonelists();
(3)                                                               alloc_page();
(4)                                                               free_page();
(5) build_all_zonelists();
(6)   __build_all_zonelists();
(7)     zone->pageset = alloc_percpu();

In step (3,4), zone->pageset still points to boot_pageset, so bad
things may happen if 2+ nodes are in this state. Even if only 1 node
is accessing the boot_pageset, (3) may still consume too much memory
to fail the memory allocations in step (7).

Besides, atomic operation ensures alloc_percpu() in step (7) will never fail
since there is a new fresh memory block added in step(6).

[haicheng.li@linux.intel.com: hold zonelists_mutex when build_all_zonelists]
Signed-off-by: Haicheng Li <haicheng.li@linux.intel.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Reviewed-by: Andi Kleen <andi.kleen@intel.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

include/linux/mmzone.h
kernel/cpu.c
mm/memory_hotplug.c
mm/page_alloc.c

index a367ed5..0fa4913 100644 (file)
@@ -650,6 +650,7 @@ typedef struct pglist_data {
 
 #include <linux/memory_hotplug.h>
 
+extern struct mutex zonelists_mutex;
 void get_zone_counts(unsigned long *active, unsigned long *inactive,
                        unsigned long *free);
 void build_all_zonelists(void *data);
index 3e8b3ba..124ad9d 100644 (file)
@@ -357,8 +357,11 @@ int __cpuinit cpu_up(unsigned int cpu)
                return -ENOMEM;
        }
 
-       if (pgdat->node_zonelists->_zonerefs->zone == NULL)
+       if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
+               mutex_lock(&zonelists_mutex);
                build_all_zonelists(NULL);
+               mutex_unlock(&zonelists_mutex);
+       }
 #endif
 
        cpu_maps_update_begin();
index 089cc97..a4cfcdc 100644 (file)
@@ -389,11 +389,6 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
        int nid;
        int ret;
        struct memory_notify arg;
-       /*
-        * mutex to protect zone->pageset when it's still shared
-        * in onlined_pages()
-        */
-       static DEFINE_MUTEX(zone_pageset_mutex);
 
        arg.start_pfn = pfn;
        arg.nr_pages = nr_pages;
@@ -420,14 +415,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
         * This means the page allocator ignores this zone.
         * So, zonelist must be updated after online.
         */
-       mutex_lock(&zone_pageset_mutex);
+       mutex_lock(&zonelists_mutex);
        if (!populated_zone(zone))
                need_zonelists_rebuild = 1;
 
        ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
                online_pages_range);
        if (ret) {
-               mutex_unlock(&zone_pageset_mutex);
+               mutex_unlock(&zonelists_mutex);
                printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
                        nr_pages, pfn);
                memory_notify(MEM_CANCEL_ONLINE, &arg);
@@ -441,7 +436,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
        else
                zone_pcp_update(zone);
 
-       mutex_unlock(&zone_pageset_mutex);
+       mutex_unlock(&zonelists_mutex);
        setup_per_zone_wmarks();
        calculate_zone_inactive_ratio(zone);
        if (onlined_pages) {
index 21c52d2..08b3499 100644 (file)
@@ -2571,8 +2571,11 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
                        strncpy((char*)table->data, saved_string,
                                NUMA_ZONELIST_ORDER_LEN);
                        user_zonelist_order = oldval;
-               } else if (oldval != user_zonelist_order)
+               } else if (oldval != user_zonelist_order) {
+                       mutex_lock(&zonelists_mutex);
                        build_all_zonelists(NULL);
+                       mutex_unlock(&zonelists_mutex);
+               }
        }
 out:
        mutex_unlock(&zl_order_mutex);
@@ -2924,6 +2927,12 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
 static void setup_zone_pageset(struct zone *zone);
 
+/*
+ * Global mutex to protect against size modification of zonelists
+ * as well as to serialize pageset setup for the new populated zone.
+ */
+DEFINE_MUTEX(zonelists_mutex);
+
 /* return values int ....just for stop_machine() */
 static __init_refok int __build_all_zonelists(void *data)
 {
@@ -2967,6 +2976,10 @@ static __init_refok int __build_all_zonelists(void *data)
        return 0;
 }
 
+/*
+ * Called with zonelists_mutex held always
+ * unless system_state == SYSTEM_BOOTING.
+ */
 void build_all_zonelists(void *data)
 {
        set_zonelist_order();