mm: Close races between THP migration and PMD numa clearing
[linux-3.10.git] / mm / percpu.c
index 86c5bdb..8c8e08f 100644 (file)
@@ -631,7 +631,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
        if (!chunk)
                return;
        pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
-       kfree(chunk);
+       pcpu_mem_free(chunk, pcpu_chunk_struct_size);
 }
 
 /*
@@ -1029,9 +1029,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
                if (!is_vmalloc_addr(addr))
                        return __pa(addr);
                else
-                       return page_to_phys(vmalloc_to_page(addr));
+                       return page_to_phys(vmalloc_to_page(addr)) +
+                              offset_in_page(addr);
        } else
-               return page_to_phys(pcpu_addr_to_page(addr));
+               return page_to_phys(pcpu_addr_to_page(addr)) +
+                      offset_in_page(addr);
 }
 
 /**
@@ -1130,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl,
                for (alloc_end += gi->nr_units / upa;
                     alloc < alloc_end; alloc++) {
                        if (!(alloc % apl)) {
-                               printk("\n");
+                               printk(KERN_CONT "\n");
                                printk("%spcpu-alloc: ", lvl);
                        }
-                       printk("[%0*d] ", group_width, group);
+                       printk(KERN_CONT "[%0*d] ", group_width, group);
 
                        for (unit_end += upa; unit < unit_end; unit++)
                                if (gi->cpu_map[unit] != NR_CPUS)
-                                       printk("%0*d ", cpu_width,
+                                       printk(KERN_CONT "%0*d ", cpu_width,
                                               gi->cpu_map[unit]);
                                else
-                                       printk("%s ", empty_str);
+                                       printk(KERN_CONT "%s ", empty_str);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 /**
@@ -1368,7 +1370,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
 #ifdef CONFIG_SMP
 
-const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
+const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
        [PCPU_FC_AUTO]  = "auto",
        [PCPU_FC_EMBED] = "embed",
        [PCPU_FC_PAGE]  = "page",
@@ -1378,6 +1380,9 @@ enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
 
 static int __init percpu_alloc_setup(char *str)
 {
+       if (!str)
+               return -EINVAL;
+
        if (0)
                /* nada */;
 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
@@ -1648,6 +1653,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
                areas[group] = ptr;
 
                base = min(ptr, base);
+       }
+
+       /*
+        * Copy data and free unused parts.  This should happen after all
+        * allocations are complete; otherwise, we may end up with
+        * overlapping groups.
+        */
+       for (group = 0; group < ai->nr_groups; group++) {
+               struct pcpu_group_info *gi = &ai->groups[group];
+               void *ptr = areas[group];
 
                for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
                        if (gi->cpu_map[i] == NR_CPUS) {
@@ -1883,6 +1898,8 @@ void __init setup_per_cpu_areas(void)
        fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
        if (!ai || !fc)
                panic("Failed to allocate memory for percpu areas.");
+       /* kmemleak tracks the percpu allocations separately */
+       kmemleak_free(fc);
 
        ai->dyn_size = unit_size;
        ai->unit_size = unit_size;