]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - mm/percpu.c
mm: purge fragmented percpu vmap blocks
[linux-3.10.git] / mm / percpu.c
index 7971997de310151c8e8e8159fe7dd98afc192748..083e7c91e5f62b10ec0b85eaa2f307a1bb2921df 100644 (file)
@@ -46,8 +46,6 @@
  *
  * To use this allocator, arch code should do the followings.
  *
- * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
- *
  * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  *   regular address to percpu pointer and back if they need to be
  *   different from the default
@@ -58,6 +56,7 @@
 
 #include <linux/bitmap.h>
 #include <linux/bootmem.h>
+#include <linux/err.h>
 #include <linux/list.h>
 #include <linux/log2.h>
 #include <linux/mm.h>
@@ -73,6 +72,7 @@
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
+#include <asm/io.h>
 
 #define PCPU_SLOT_BASE_SHIFT           5       /* 1-31 shares the same slot */
 #define PCPU_DFL_MAP_ALLOC             16      /* start a map with 16 ents */
@@ -93,10 +93,11 @@ struct pcpu_chunk {
        struct list_head        list;           /* linked to pcpu_slot lists */
        int                     free_size;      /* free bytes in the chunk */
        int                     contig_hint;    /* max contiguous size hint */
-       struct vm_struct        *vm;            /* mapped vmalloc region */
+       void                    *base_addr;     /* base address of this chunk */
        int                     map_used;       /* # of map entries used */
        int                     map_alloc;      /* # of map entries allocated */
        int                     *map;           /* allocation map */
+       struct vm_struct        **vms;          /* mapped vmalloc regions */
        bool                    immutable;      /* no [de]population allowed */
        unsigned long           populated[];    /* populated bitmap */
 };
@@ -104,7 +105,7 @@ struct pcpu_chunk {
 static int pcpu_unit_pages __read_mostly;
 static int pcpu_unit_size __read_mostly;
 static int pcpu_nr_units __read_mostly;
-static int pcpu_chunk_size __read_mostly;
+static int pcpu_atom_size __read_mostly;
 static int pcpu_nr_slots __read_mostly;
 static size_t pcpu_chunk_struct_size __read_mostly;
 
@@ -116,8 +117,13 @@ static unsigned int pcpu_last_unit_cpu __read_mostly;
 void *pcpu_base_addr __read_mostly;
 EXPORT_SYMBOL_GPL(pcpu_base_addr);
 
-/* cpu -> unit map */
-const int *pcpu_unit_map __read_mostly;
+static const int *pcpu_unit_map __read_mostly;         /* cpu -> unit */
+const unsigned long *pcpu_unit_offsets __read_mostly;  /* cpu -> unit offset */
+
+/* group information, used for vm allocation */
+static int pcpu_nr_groups __read_mostly;
+static const unsigned long *pcpu_group_offsets __read_mostly;
+static const size_t *pcpu_group_sizes __read_mostly;
 
 /*
  * The first chunk which always exists.  Note that unlike other
@@ -146,7 +152,10 @@ static int pcpu_reserved_chunk_limit;
  *
  * During allocation, pcpu_alloc_mutex is kept locked all the time and
  * pcpu_lock is grabbed and released as necessary.  All actual memory
- * allocations are done using GFP_KERNEL with pcpu_lock released.
+ * allocations are done using GFP_KERNEL with pcpu_lock released.  In
+ * general, percpu memory can't be allocated with irq off but
+ * irqsave/restore are still used in alloc path so that it can be used
+ * from early init path - sched_init() specifically.
  *
  * Free path accesses and alters only the index data structures, so it
  * can be safely called from atomic context.  When memory needs to be
@@ -195,8 +204,8 @@ static int pcpu_page_idx(unsigned int cpu, int page_idx)
 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
                                     unsigned int cpu, int page_idx)
 {
-       return (unsigned long)chunk->vm->addr +
-               (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
+       return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
+               (page_idx << PAGE_SHIFT);
 }
 
 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
@@ -323,7 +332,7 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
  */
 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 {
-       void *first_start = pcpu_first_chunk->vm->addr;
+       void *first_start = pcpu_first_chunk->base_addr;
 
        /* is it in the first chunk? */
        if (addr >= first_start && addr < first_start + pcpu_unit_size) {
@@ -340,67 +349,91 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
         * space.  Note that any possible cpu id can be used here, so
         * there's no need to worry about preemption or cpu hotplug.
         */
-       addr += pcpu_unit_map[smp_processor_id()] * pcpu_unit_size;
+       addr += pcpu_unit_offsets[raw_smp_processor_id()];
        return pcpu_get_page_chunk(vmalloc_to_page(addr));
 }
 
 /**
- * pcpu_extend_area_map - extend area map for allocation
- * @chunk: target chunk
+ * pcpu_need_to_extend - determine whether chunk area map needs to be extended
+ * @chunk: chunk of interest
  *
- * Extend area map of @chunk so that it can accomodate an allocation.
- * A single allocation can split an area into three areas, so this
- * function makes sure that @chunk->map has at least two extra slots.
+ * Determine whether area map of @chunk needs to be extended to
+ * accomodate a new allocation.
  *
  * CONTEXT:
- * pcpu_alloc_mutex, pcpu_lock.  pcpu_lock is released and reacquired
- * if area map is extended.
+ * pcpu_lock.
  *
  * RETURNS:
- * 0 if noop, 1 if successfully extended, -errno on failure.
+ * New target map allocation length if extension is necessary, 0
+ * otherwise.
  */
-static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
+static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 {
        int new_alloc;
-       int *new;
-       size_t size;
 
-       /* has enough? */
        if (chunk->map_alloc >= chunk->map_used + 2)
                return 0;
 
-       spin_unlock_irq(&pcpu_lock);
-
        new_alloc = PCPU_DFL_MAP_ALLOC;
        while (new_alloc < chunk->map_used + 2)
                new_alloc *= 2;
 
-       new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
-       if (!new) {
-               spin_lock_irq(&pcpu_lock);
+       return new_alloc;
+}
+
+/**
+ * pcpu_extend_area_map - extend area map of a chunk
+ * @chunk: chunk of interest
+ * @new_alloc: new target allocation length of the area map
+ *
+ * Extend area map of @chunk to have @new_alloc entries.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
+{
+       int *old = NULL, *new = NULL;
+       size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+       unsigned long flags;
+
+       new = pcpu_mem_alloc(new_size);
+       if (!new)
                return -ENOMEM;
-       }
 
-       /*
-        * Acquire pcpu_lock and switch to new area map.  Only free
-        * could have happened inbetween, so map_used couldn't have
-        * grown.
-        */
-       spin_lock_irq(&pcpu_lock);
-       BUG_ON(new_alloc < chunk->map_used + 2);
+       /* acquire pcpu_lock and switch to new area map */
+       spin_lock_irqsave(&pcpu_lock, flags);
+
+       if (new_alloc <= chunk->map_alloc)
+               goto out_unlock;
 
-       size = chunk->map_alloc * sizeof(chunk->map[0]);
-       memcpy(new, chunk->map, size);
+       old_size = chunk->map_alloc * sizeof(chunk->map[0]);
+       memcpy(new, chunk->map, old_size);
 
        /*
         * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
         * one of the first chunks and still using static map.
         */
        if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
-               pcpu_mem_free(chunk->map, size);
+               old = chunk->map;
 
        chunk->map_alloc = new_alloc;
        chunk->map = new;
+       new = NULL;
+
+out_unlock:
+       spin_unlock_irqrestore(&pcpu_lock, flags);
+
+       /*
+        * pcpu_mem_free() might end up calling vfree() which uses
+        * IRQ-unsafe lock and thus can't be called under pcpu_lock.
+        */
+       pcpu_mem_free(old, old_size);
+       pcpu_mem_free(new, new_size);
+
        return 0;
 }
 
@@ -986,8 +1019,8 @@ static void free_pcpu_chunk(struct pcpu_chunk *chunk)
 {
        if (!chunk)
                return;
-       if (chunk->vm)
-               free_vm_area(chunk->vm);
+       if (chunk->vms)
+               pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
        pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
        kfree(chunk);
 }
@@ -1004,8 +1037,10 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
        chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
        chunk->map[chunk->map_used++] = pcpu_unit_size;
 
-       chunk->vm = get_vm_area(pcpu_chunk_size, VM_ALLOC);
-       if (!chunk->vm) {
+       chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
+                                      pcpu_nr_groups, pcpu_atom_size,
+                                      GFP_KERNEL);
+       if (!chunk->vms) {
                free_pcpu_chunk(chunk);
                return NULL;
        }
@@ -1013,6 +1048,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
        INIT_LIST_HEAD(&chunk->list);
        chunk->free_size = pcpu_unit_size;
        chunk->contig_hint = pcpu_unit_size;
+       chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
 
        return chunk;
 }
@@ -1033,8 +1069,11 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
  */
 static void *pcpu_alloc(size_t size, size_t align, bool reserved)
 {
+       static int warn_limit = 10;
        struct pcpu_chunk *chunk;
-       int slot, off;
+       const char *err;
+       int slot, off, new_alloc;
+       unsigned long flags;
 
        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
                WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -1043,17 +1082,31 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
        }
 
        mutex_lock(&pcpu_alloc_mutex);
-       spin_lock_irq(&pcpu_lock);
+       spin_lock_irqsave(&pcpu_lock, flags);
 
        /* serve reserved allocations from the reserved chunk if available */
        if (reserved && pcpu_reserved_chunk) {
                chunk = pcpu_reserved_chunk;
-               if (size > chunk->contig_hint ||
-                   pcpu_extend_area_map(chunk) < 0)
+
+               if (size > chunk->contig_hint) {
+                       err = "alloc from reserved chunk failed";
                        goto fail_unlock;
+               }
+
+               while ((new_alloc = pcpu_need_to_extend(chunk))) {
+                       spin_unlock_irqrestore(&pcpu_lock, flags);
+                       if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
+                               err = "failed to extend area map of reserved chunk";
+                               goto fail_unlock_mutex;
+                       }
+                       spin_lock_irqsave(&pcpu_lock, flags);
+               }
+
                off = pcpu_alloc_area(chunk, size, align);
                if (off >= 0)
                        goto area_found;
+
+               err = "alloc from reserved chunk failed";
                goto fail_unlock;
        }
 
@@ -1064,13 +1117,20 @@ restart:
                        if (size > chunk->contig_hint)
                                continue;
 
-                       switch (pcpu_extend_area_map(chunk)) {
-                       case 0:
-                               break;
-                       case 1:
-                               goto restart;   /* pcpu_lock dropped, restart */
-                       default:
-                               goto fail_unlock;
+                       new_alloc = pcpu_need_to_extend(chunk);
+                       if (new_alloc) {
+                               spin_unlock_irqrestore(&pcpu_lock, flags);
+                               if (pcpu_extend_area_map(chunk,
+                                                        new_alloc) < 0) {
+                                       err = "failed to extend area map";
+                                       goto fail_unlock_mutex;
+                               }
+                               spin_lock_irqsave(&pcpu_lock, flags);
+                               /*
+                                * pcpu_lock has been dropped, need to
+                                * restart cpu_slot list walking.
+                                */
+                               goto restart;
                        }
 
                        off = pcpu_alloc_area(chunk, size, align);
@@ -1080,35 +1140,45 @@ restart:
        }
 
        /* hmmm... no space left, create a new chunk */
-       spin_unlock_irq(&pcpu_lock);
+       spin_unlock_irqrestore(&pcpu_lock, flags);
 
        chunk = alloc_pcpu_chunk();
-       if (!chunk)
+       if (!chunk) {
+               err = "failed to allocate new chunk";
                goto fail_unlock_mutex;
+       }
 
-       spin_lock_irq(&pcpu_lock);
+       spin_lock_irqsave(&pcpu_lock, flags);
        pcpu_chunk_relocate(chunk, -1);
        goto restart;
 
 area_found:
-       spin_unlock_irq(&pcpu_lock);
+       spin_unlock_irqrestore(&pcpu_lock, flags);
 
        /* populate, map and clear the area */
        if (pcpu_populate_chunk(chunk, off, size)) {
-               spin_lock_irq(&pcpu_lock);
+               spin_lock_irqsave(&pcpu_lock, flags);
                pcpu_free_area(chunk, off);
+               err = "failed to populate";
                goto fail_unlock;
        }
 
        mutex_unlock(&pcpu_alloc_mutex);
 
-       /* return address relative to unit0 */
-       return __addr_to_pcpu_ptr(chunk->vm->addr + off);
+       /* return address relative to base address */
+       return __addr_to_pcpu_ptr(chunk->base_addr + off);
 
 fail_unlock:
-       spin_unlock_irq(&pcpu_lock);
+       spin_unlock_irqrestore(&pcpu_lock, flags);
 fail_unlock_mutex:
        mutex_unlock(&pcpu_alloc_mutex);
+       if (warn_limit) {
+               pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
+                          "%s\n", size, align, err);
+               dump_stack();
+               if (!--warn_limit)
+                       pr_info("PERCPU: limit reached, disable warning\n");
+       }
        return NULL;
 }
 
@@ -1201,7 +1271,7 @@ static void pcpu_reclaim(struct work_struct *work)
  */
 void free_percpu(void *ptr)
 {
-       void *addr = __pcpu_ptr_to_addr(ptr);
+       void *addr;
        struct pcpu_chunk *chunk;
        unsigned long flags;
        int off;
@@ -1209,10 +1279,12 @@ void free_percpu(void *ptr)
        if (!ptr)
                return;
 
+       addr = __pcpu_ptr_to_addr(ptr);
+
        spin_lock_irqsave(&pcpu_lock, flags);
 
        chunk = pcpu_chunk_addr_search(addr);
-       off = addr - chunk->vm->addr;
+       off = addr - chunk->base_addr;
 
        pcpu_free_area(chunk, off);
 
@@ -1232,19 +1304,319 @@ void free_percpu(void *ptr)
 EXPORT_SYMBOL_GPL(free_percpu);
 
 /**
- * pcpu_setup_first_chunk - initialize the first percpu chunk
- * @static_size: the size of static percpu area in bytes
- * @reserved_size: the size of reserved percpu area in bytes, 0 for none
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
+ * @addr: the address to be converted to physical address
+ *
+ * Given @addr which is dereferenceable address obtained via one of
+ * percpu access macros, this function translates it into its physical
+ * address.  The caller is responsible for ensuring @addr stays valid
+ * until this function finishes.
+ *
+ * RETURNS:
+ * The physical address for @addr.
+ */
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+       if ((unsigned long)addr < VMALLOC_START ||
+                       (unsigned long)addr >= VMALLOC_END)
+               return __pa(addr);
+       else
+               return page_to_phys(vmalloc_to_page(addr));
+}
+
+static inline size_t pcpu_calc_fc_sizes(size_t static_size,
+                                       size_t reserved_size,
+                                       ssize_t *dyn_sizep)
+{
+       size_t size_sum;
+
+       size_sum = PFN_ALIGN(static_size + reserved_size +
+                            (*dyn_sizep >= 0 ? *dyn_sizep : 0));
+       if (*dyn_sizep != 0)
+               *dyn_sizep = size_sum - static_size - reserved_size;
+
+       return size_sum;
+}
+
+/**
+ * pcpu_alloc_alloc_info - allocate percpu allocation info
+ * @nr_groups: the number of groups
+ * @nr_units: the number of units
+ *
+ * Allocate ai which is large enough for @nr_groups groups containing
+ * @nr_units units.  The returned ai's groups[0].cpu_map points to the
+ * cpu_map array which is long enough for @nr_units and filled with
+ * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
+ * pointer of other groups.
+ *
+ * RETURNS:
+ * Pointer to the allocated pcpu_alloc_info on success, NULL on
+ * failure.
+ */
+struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
+                                                     int nr_units)
+{
+       struct pcpu_alloc_info *ai;
+       size_t base_size, ai_size;
+       void *ptr;
+       int unit;
+
+       base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
+                         __alignof__(ai->groups[0].cpu_map[0]));
+       ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
+
+       ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
+       if (!ptr)
+               return NULL;
+       ai = ptr;
+       ptr += base_size;
+
+       ai->groups[0].cpu_map = ptr;
+
+       for (unit = 0; unit < nr_units; unit++)
+               ai->groups[0].cpu_map[unit] = NR_CPUS;
+
+       ai->nr_groups = nr_groups;
+       ai->__ai_size = PFN_ALIGN(ai_size);
+
+       return ai;
+}
+
+/**
+ * pcpu_free_alloc_info - free percpu allocation info
+ * @ai: pcpu_alloc_info to free
+ *
+ * Free @ai which was allocated by pcpu_alloc_alloc_info().
+ */
+void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
+{
+       free_bootmem(__pa(ai), ai->__ai_size);
+}
+
+/**
+ * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
+ * @reserved_size: the size of reserved percpu area in bytes
  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
- * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE
+ * @atom_size: allocation atom size
+ * @cpu_distance_fn: callback to determine distance between cpus, optional
+ *
+ * This function determines grouping of units, their mappings to cpus
+ * and other parameters considering needed percpu size, allocation
+ * atom size and distances between CPUs.
+ *
+ * Groups are always mutliples of atom size and CPUs which are of
+ * LOCAL_DISTANCE both ways are grouped together and share space for
+ * units in the same group.  The returned configuration is guaranteed
+ * to have CPUs on different nodes on different groups and >=75% usage
+ * of allocated virtual address space.
+ *
+ * RETURNS:
+ * On success, pointer to the new allocation_info is returned.  On
+ * failure, ERR_PTR value is returned.
+ */
+struct pcpu_alloc_info * __init pcpu_build_alloc_info(
+                               size_t reserved_size, ssize_t dyn_size,
+                               size_t atom_size,
+                               pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
+{
+       static int group_map[NR_CPUS] __initdata;
+       static int group_cnt[NR_CPUS] __initdata;
+       const size_t static_size = __per_cpu_end - __per_cpu_start;
+       int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
+       size_t size_sum, min_unit_size, alloc_size;
+       int upa, max_upa, uninitialized_var(best_upa);  /* units_per_alloc */
+       int last_allocs, group, unit;
+       unsigned int cpu, tcpu;
+       struct pcpu_alloc_info *ai;
+       unsigned int *cpu_map;
+
+       /* this function may be called multiple times */
+       memset(group_map, 0, sizeof(group_map));
+       memset(group_cnt, 0, sizeof(group_map));
+
+       /*
+        * Determine min_unit_size, alloc_size and max_upa such that
+        * alloc_size is multiple of atom_size and is the smallest
+        * which can accomodate 4k aligned segments which are equal to
+        * or larger than min_unit_size.
+        */
+       size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
+       min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
+
+       alloc_size = roundup(min_unit_size, atom_size);
+       upa = alloc_size / min_unit_size;
+       while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
+               upa--;
+       max_upa = upa;
+
+       /* group cpus according to their proximity */
+       for_each_possible_cpu(cpu) {
+               group = 0;
+       next_group:
+               for_each_possible_cpu(tcpu) {
+                       if (cpu == tcpu)
+                               break;
+                       if (group_map[tcpu] == group && cpu_distance_fn &&
+                           (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
+                            cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
+                               group++;
+                               nr_groups = max(nr_groups, group + 1);
+                               goto next_group;
+                       }
+               }
+               group_map[cpu] = group;
+               group_cnt[group]++;
+               group_cnt_max = max(group_cnt_max, group_cnt[group]);
+       }
+
+       /*
+        * Expand unit size until address space usage goes over 75%
+        * and then as much as possible without using more address
+        * space.
+        */
+       last_allocs = INT_MAX;
+       for (upa = max_upa; upa; upa--) {
+               int allocs = 0, wasted = 0;
+
+               if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
+                       continue;
+
+               for (group = 0; group < nr_groups; group++) {
+                       int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
+                       allocs += this_allocs;
+                       wasted += this_allocs * upa - group_cnt[group];
+               }
+
+               /*
+                * Don't accept if wastage is over 25%.  The
+                * greater-than comparison ensures upa==1 always
+                * passes the following check.
+                */
+               if (wasted > num_possible_cpus() / 3)
+                       continue;
+
+               /* and then don't consume more memory */
+               if (allocs > last_allocs)
+                       break;
+               last_allocs = allocs;
+               best_upa = upa;
+       }
+       upa = best_upa;
+
+       /* allocate and fill alloc_info */
+       for (group = 0; group < nr_groups; group++)
+               nr_units += roundup(group_cnt[group], upa);
+
+       ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
+       if (!ai)
+               return ERR_PTR(-ENOMEM);
+       cpu_map = ai->groups[0].cpu_map;
+
+       for (group = 0; group < nr_groups; group++) {
+               ai->groups[group].cpu_map = cpu_map;
+               cpu_map += roundup(group_cnt[group], upa);
+       }
+
+       ai->static_size = static_size;
+       ai->reserved_size = reserved_size;
+       ai->dyn_size = dyn_size;
+       ai->unit_size = alloc_size / upa;
+       ai->atom_size = atom_size;
+       ai->alloc_size = alloc_size;
+
+       for (group = 0, unit = 0; group_cnt[group]; group++) {
+               struct pcpu_group_info *gi = &ai->groups[group];
+
+               /*
+                * Initialize base_offset as if all groups are located
+                * back-to-back.  The caller should update this to
+                * reflect actual allocation.
+                */
+               gi->base_offset = unit * ai->unit_size;
+
+               for_each_possible_cpu(cpu)
+                       if (group_map[cpu] == group)
+                               gi->cpu_map[gi->nr_units++] = cpu;
+               gi->nr_units = roundup(gi->nr_units, upa);
+               unit += gi->nr_units;
+       }
+       BUG_ON(unit != nr_units);
+
+       return ai;
+}
+
+/**
+ * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
+ * @lvl: loglevel
+ * @ai: allocation info to dump
+ *
+ * Print out information about @ai using loglevel @lvl.
+ */
+static void pcpu_dump_alloc_info(const char *lvl,
+                                const struct pcpu_alloc_info *ai)
+{
+       int group_width = 1, cpu_width = 1, width;
+       char empty_str[] = "--------";
+       int alloc = 0, alloc_end = 0;
+       int group, v;
+       int upa, apl;   /* units per alloc, allocs per line */
+
+       v = ai->nr_groups;
+       while (v /= 10)
+               group_width++;
+
+       v = num_possible_cpus();
+       while (v /= 10)
+               cpu_width++;
+       empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
+
+       upa = ai->alloc_size / ai->unit_size;
+       width = upa * (cpu_width + 1) + group_width + 3;
+       apl = rounddown_pow_of_two(max(60 / width, 1));
+
+       printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
+              lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
+              ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
+
+       for (group = 0; group < ai->nr_groups; group++) {
+               const struct pcpu_group_info *gi = &ai->groups[group];
+               int unit = 0, unit_end = 0;
+
+               BUG_ON(gi->nr_units % upa);
+               for (alloc_end += gi->nr_units / upa;
+                    alloc < alloc_end; alloc++) {
+                       if (!(alloc % apl)) {
+                               printk("\n");
+                               printk("%spcpu-alloc: ", lvl);
+                       }
+                       printk("[%0*d] ", group_width, group);
+
+                       for (unit_end += upa; unit < unit_end; unit++)
+                               if (gi->cpu_map[unit] != NR_CPUS)
+                                       printk("%0*d ", cpu_width,
+                                              gi->cpu_map[unit]);
+                               else
+                                       printk("%s ", empty_str);
+               }
+       }
+       printk("\n");
+}
+
+/**
+ * pcpu_setup_first_chunk - initialize the first percpu chunk
+ * @ai: pcpu_alloc_info describing how to percpu area is shaped
  * @base_addr: mapped address
- * @unit_map: cpu -> unit map, NULL for sequential mapping
  *
  * Initialize the first percpu chunk which contains the kernel static
  * perpcu area.  This function is to be called from arch percpu area
  * setup path.
  *
- * @reserved_size, if non-zero, specifies the amount of bytes to
+ * @ai contains all information necessary to initialize the first
+ * chunk and prime the dynamic percpu allocator.
+ *
+ * @ai->static_size is the size of static percpu area.
+ *
+ * @ai->reserved_size, if non-zero, specifies the amount of bytes to
  * reserve after the static area in the first chunk.  This reserves
  * the first chunk such that it's available only through reserved
  * percpu allocation.  This is primarily used to serve module percpu
@@ -1252,14 +1624,26 @@ EXPORT_SYMBOL_GPL(free_percpu);
  * limited offset range for symbol relocations to guarantee module
  * percpu symbols fall inside the relocatable range.
  *
- * @dyn_size, if non-negative, determines the number of bytes
- * available for dynamic allocation in the first chunk.  Specifying
- * non-negative value makes percpu leave alone the area beyond
- * @static_size + @reserved_size + @dyn_size.
+ * @ai->dyn_size determines the number of bytes available for dynamic
+ * allocation in the first chunk.  The area between @ai->static_size +
+ * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
+ *
+ * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
+ * and equal to or larger than @ai->static_size + @ai->reserved_size +
+ * @ai->dyn_size.
+ *
+ * @ai->atom_size is the allocation atom size and used as alignment
+ * for vm areas.
  *
- * @unit_size specifies unit size and must be aligned to PAGE_SIZE and
- * equal to or larger than @static_size + @reserved_size + if
- * non-negative, @dyn_size.
+ * @ai->alloc_size is the allocation size and always multiple of
+ * @ai->atom_size.  This is larger than @ai->atom_size if
+ * @ai->unit_size is larger than @ai->atom_size.
+ *
+ * @ai->nr_groups and @ai->groups describe virtual memory layout of
+ * percpu areas.  Units which should be colocated are put into the
+ * same group.  Dynamic VM areas will be allocated according to these
+ * groupings.  If @ai->nr_groups is zero, a single group containing
+ * all units is assumed.
  *
  * The caller should have mapped the first chunk at @base_addr and
  * copied static data to each unit.
@@ -1272,86 +1656,99 @@ EXPORT_SYMBOL_GPL(free_percpu);
  * and available for dynamic allocation like any other chunks.
  *
  * RETURNS:
- * The determined pcpu_unit_size which can be used to initialize
- * percpu access.
+ * 0 on success, -errno on failure.
  */
-size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
-                                    ssize_t dyn_size, size_t unit_size,
-                                    void *base_addr, const int *unit_map)
+int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+                                 void *base_addr)
 {
-       static struct vm_struct first_vm;
+       static char cpus_buf[4096] __initdata;
        static int smap[2], dmap[2];
-       size_t size_sum = static_size + reserved_size +
-                         (dyn_size >= 0 ? dyn_size : 0);
+       size_t dyn_size = ai->dyn_size;
+       size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
        struct pcpu_chunk *schunk, *dchunk = NULL;
-       unsigned int cpu, tcpu;
-       int i;
+       unsigned long *group_offsets;
+       size_t *group_sizes;
+       unsigned long *unit_off;
+       unsigned int cpu;
+       int *unit_map;
+       int group, unit, i;
+
+       cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
+
+#define PCPU_SETUP_BUG_ON(cond)        do {                                    \
+       if (unlikely(cond)) {                                           \
+               pr_emerg("PERCPU: failed to initialize, %s", #cond);    \
+               pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);   \
+               pcpu_dump_alloc_info(KERN_EMERG, ai);                   \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
 
        /* sanity checks */
        BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
                     ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
-       BUG_ON(!static_size);
-       BUG_ON(!base_addr);
-       BUG_ON(unit_size < size_sum);
-       BUG_ON(unit_size & ~PAGE_MASK);
-       BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
-
-       /* determine number of units and verify and initialize pcpu_unit_map */
-       if (unit_map) {
-               int first_unit = INT_MAX, last_unit = INT_MIN;
-
-               for_each_possible_cpu(cpu) {
-                       int unit = unit_map[cpu];
-
-                       BUG_ON(unit < 0);
-                       for_each_possible_cpu(tcpu) {
-                               if (tcpu == cpu)
-                                       break;
-                               /* the mapping should be one-to-one */
-                               BUG_ON(unit_map[tcpu] == unit);
-                       }
+       PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
+       PCPU_SETUP_BUG_ON(!ai->static_size);
+       PCPU_SETUP_BUG_ON(!base_addr);
+       PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
+       PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
+       PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
+
+       /* process group information and build config tables accordingly */
+       group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
+       group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
+       unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
+       unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
+
+       for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+               unit_map[cpu] = UINT_MAX;
+       pcpu_first_unit_cpu = NR_CPUS;
+
+       for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
+               const struct pcpu_group_info *gi = &ai->groups[group];
+
+               group_offsets[group] = gi->base_offset;
+               group_sizes[group] = gi->nr_units * ai->unit_size;
+
+               for (i = 0; i < gi->nr_units; i++) {
+                       cpu = gi->cpu_map[i];
+                       if (cpu == NR_CPUS)
+                               continue;
+
+                       PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
+                       PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
+                       PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
+
+                       unit_map[cpu] = unit + i;
+                       unit_off[cpu] = gi->base_offset + i * ai->unit_size;
 
-                       if (unit < first_unit) {
+                       if (pcpu_first_unit_cpu == NR_CPUS)
                                pcpu_first_unit_cpu = cpu;
-                               first_unit = unit;
-                       }
-                       if (unit > last_unit) {
-                               pcpu_last_unit_cpu = cpu;
-                               last_unit = unit;
-                       }
                }
-               pcpu_nr_units = last_unit + 1;
-               pcpu_unit_map = unit_map;
-       } else {
-               int *identity_map;
+       }
+       pcpu_last_unit_cpu = cpu;
+       pcpu_nr_units = unit;
 
-               /* #units == #cpus, identity mapped */
-               identity_map = alloc_bootmem(nr_cpu_ids *
-                                            sizeof(identity_map[0]));
+       for_each_possible_cpu(cpu)
+               PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
 
-               for_each_possible_cpu(cpu)
-                       identity_map[cpu] = cpu;
+       /* we're done parsing the input, undefine BUG macro and dump config */
+#undef PCPU_SETUP_BUG_ON
+       pcpu_dump_alloc_info(KERN_INFO, ai);
 
-               pcpu_first_unit_cpu = 0;
-               pcpu_last_unit_cpu = pcpu_nr_units - 1;
-               pcpu_nr_units = nr_cpu_ids;
-               pcpu_unit_map = identity_map;
-       }
+       pcpu_nr_groups = ai->nr_groups;
+       pcpu_group_offsets = group_offsets;
+       pcpu_group_sizes = group_sizes;
+       pcpu_unit_map = unit_map;
+       pcpu_unit_offsets = unit_off;
 
        /* determine basic parameters */
-       pcpu_unit_pages = unit_size >> PAGE_SHIFT;
+       pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
        pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
-       pcpu_chunk_size = pcpu_nr_units * pcpu_unit_size;
+       pcpu_atom_size = ai->atom_size;
        pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
                BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
 
-       if (dyn_size < 0)
-               dyn_size = pcpu_unit_size - static_size - reserved_size;
-
-       first_vm.flags = VM_ALLOC;
-       first_vm.size = pcpu_chunk_size;
-       first_vm.addr = base_addr;
-
        /*
         * Allocate chunk slots.  The additional last slot is for
         * empty chunks.
@@ -1370,23 +1767,23 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
         */
        schunk = alloc_bootmem(pcpu_chunk_struct_size);
        INIT_LIST_HEAD(&schunk->list);
-       schunk->vm = &first_vm;
+       schunk->base_addr = base_addr;
        schunk->map = smap;
        schunk->map_alloc = ARRAY_SIZE(smap);
        schunk->immutable = true;
        bitmap_fill(schunk->populated, pcpu_unit_pages);
 
-       if (reserved_size) {
-               schunk->free_size = reserved_size;
+       if (ai->reserved_size) {
+               schunk->free_size = ai->reserved_size;
                pcpu_reserved_chunk = schunk;
-               pcpu_reserved_chunk_limit = static_size + reserved_size;
+               pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
        } else {
                schunk->free_size = dyn_size;
                dyn_size = 0;                   /* dynamic area covered */
        }
        schunk->contig_hint = schunk->free_size;
 
-       schunk->map[schunk->map_used++] = -static_size;
+       schunk->map[schunk->map_used++] = -ai->static_size;
        if (schunk->free_size)
                schunk->map[schunk->map_used++] = schunk->free_size;
 
@@ -1394,7 +1791,7 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
        if (dyn_size) {
                dchunk = alloc_bootmem(pcpu_chunk_struct_size);
                INIT_LIST_HEAD(&dchunk->list);
-               dchunk->vm = &first_vm;
+               dchunk->base_addr = base_addr;
                dchunk->map = dmap;
                dchunk->map_alloc = ARRAY_SIZE(dmap);
                dchunk->immutable = true;
@@ -1410,40 +1807,62 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
        pcpu_chunk_relocate(pcpu_first_chunk, -1);
 
        /* we're done */
-       pcpu_base_addr = schunk->vm->addr;
-       return pcpu_unit_size;
+       pcpu_base_addr = base_addr;
+       return 0;
 }
 
-static inline size_t pcpu_calc_fc_sizes(size_t static_size,
-                                       size_t reserved_size,
-                                       ssize_t *dyn_sizep)
-{
-       size_t size_sum;
+const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
+       [PCPU_FC_AUTO]  = "auto",
+       [PCPU_FC_EMBED] = "embed",
+       [PCPU_FC_PAGE]  = "page",
+};
 
-       size_sum = PFN_ALIGN(static_size + reserved_size +
-                            (*dyn_sizep >= 0 ? *dyn_sizep : 0));
-       if (*dyn_sizep != 0)
-               *dyn_sizep = size_sum - static_size - reserved_size;
+enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
 
-       return size_sum;
+static int __init percpu_alloc_setup(char *str)
+{
+       if (0)
+               /* nada */;
+#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
+       else if (!strcmp(str, "embed"))
+               pcpu_chosen_fc = PCPU_FC_EMBED;
+#endif
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+       else if (!strcmp(str, "page"))
+               pcpu_chosen_fc = PCPU_FC_PAGE;
+#endif
+       else
+               pr_warning("PERCPU: unknown allocator %s specified\n", str);
+
+       return 0;
 }
+early_param("percpu_alloc", percpu_alloc_setup);
 
 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
        !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
 /**
  * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
- * @static_size: the size of static percpu area in bytes
  * @reserved_size: the size of reserved percpu area in bytes
  * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @atom_size: allocation atom size
+ * @cpu_distance_fn: callback to determine distance between cpus, optional
+ * @alloc_fn: function to allocate percpu page
+ * @free_fn: funtion to free percpu page
  *
  * This is a helper to ease setting up embedded first percpu chunk and
  * can be called where pcpu_setup_first_chunk() is expected.
  *
  * If this function is used to setup the first chunk, it is allocated
- * as a contiguous area using bootmem allocator and used as-is without
- * being mapped into vmalloc area.  This enables the first chunk to
- * piggy back on the linear physical mapping which often uses larger
- * page size.
+ * by calling @alloc_fn and used as-is without being mapped into
+ * vmalloc area.  Allocations are always whole multiples of @atom_size
+ * aligned to @atom_size.
+ *
+ * This enables the first chunk to piggy back on the linear physical
+ * mapping which often uses larger page size.  Please note that this
+ * can result in very sparse cpu->unit mapping on NUMA machines thus
+ * requiring large vmalloc address space.  Don't use this allocator if
+ * vmalloc space is not orders of magnitude larger than distances
+ * between node memory addresses (ie. 32bit NUMA machines).
  *
  * When @dyn_size is positive, dynamic area might be larger than
  * specified to fill page alignment.  When @dyn_size is auto,
@@ -1451,52 +1870,106 @@ static inline size_t pcpu_calc_fc_sizes(size_t static_size,
  * and reserved areas.
  *
  * If the needed size is smaller than the minimum or specified unit
- * size, the leftover is returned to the bootmem allocator.
+ * size, the leftover is returned using @free_fn.
  *
  * RETURNS:
- * The determined pcpu_unit_size which can be used to initialize
- * percpu access on success, -errno on failure.
+ * 0 on success, -errno on failure.
  */
-ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
-                                     ssize_t dyn_size)
+int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
+                                 size_t atom_size,
+                                 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
+                                 pcpu_fc_alloc_fn_t alloc_fn,
+                                 pcpu_fc_free_fn_t free_fn)
 {
-       size_t size_sum, unit_size, chunk_size;
-       void *base;
-       unsigned int cpu;
+       void *base = (void *)ULONG_MAX;
+       void **areas = NULL;
+       struct pcpu_alloc_info *ai;
+       size_t size_sum, areas_size, max_distance;
+       int group, i, rc;
+
+       ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
+                                  cpu_distance_fn);
+       if (IS_ERR(ai))
+               return PTR_ERR(ai);
+
+       size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
+       areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
+
+       areas = alloc_bootmem_nopanic(areas_size);
+       if (!areas) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
 
-       /* determine parameters and allocate */
-       size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
+       /* allocate, copy and determine base address */
+       for (group = 0; group < ai->nr_groups; group++) {
+               struct pcpu_group_info *gi = &ai->groups[group];
+               unsigned int cpu = NR_CPUS;
+               void *ptr;
 
-       unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
-       chunk_size = unit_size * nr_cpu_ids;
+               for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
+                       cpu = gi->cpu_map[i];
+               BUG_ON(cpu == NR_CPUS);
 
-       base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
-                                      __pa(MAX_DMA_ADDRESS));
-       if (!base) {
-               pr_warning("PERCPU: failed to allocate %zu bytes for "
-                          "embedding\n", chunk_size);
-               return -ENOMEM;
+               /* allocate space for the whole group */
+               ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
+               if (!ptr) {
+                       rc = -ENOMEM;
+                       goto out_free_areas;
+               }
+               areas[group] = ptr;
+
+               base = min(ptr, base);
+
+               for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
+                       if (gi->cpu_map[i] == NR_CPUS) {
+                               /* unused unit, free whole */
+                               free_fn(ptr, ai->unit_size);
+                               continue;
+                       }
+                       /* copy and return the unused part */
+                       memcpy(ptr, __per_cpu_load, ai->static_size);
+                       free_fn(ptr + size_sum, ai->unit_size - size_sum);
+               }
        }
 
-       /* return the leftover and copy */
-       for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-               void *ptr = base + cpu * unit_size;
+       /* base address is now known, determine group base offsets */
+       max_distance = 0;
+       for (group = 0; group < ai->nr_groups; group++) {
+               ai->groups[group].base_offset = areas[group] - base;
+               max_distance = max_t(size_t, max_distance,
+                                    ai->groups[group].base_offset);
+       }
+       max_distance += ai->unit_size;
 
-               if (cpu_possible(cpu)) {
-                       free_bootmem(__pa(ptr + size_sum),
-                                    unit_size - size_sum);
-                       memcpy(ptr, __per_cpu_load, static_size);
-               } else
-                       free_bootmem(__pa(ptr), unit_size);
+       /* warn if maximum distance is further than 75% of vmalloc space */
+       if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
+               pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
+                          "space 0x%lx\n",
+                          max_distance, VMALLOC_END - VMALLOC_START);
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+               /* and fail if we have fallback */
+               rc = -EINVAL;
+               goto out_free;
+#endif
        }
 
-       /* we're ready, commit */
        pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
-               PFN_DOWN(size_sum), base, static_size, reserved_size, dyn_size,
-               unit_size);
-
-       return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size,
-                                     unit_size, base, NULL);
+               PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
+               ai->dyn_size, ai->unit_size);
+
+       rc = pcpu_setup_first_chunk(ai, base);
+       goto out_free;
+
+out_free_areas:
+       for (group = 0; group < ai->nr_groups; group++)
+               free_fn(areas[group],
+                       ai->groups[group].nr_units * ai->unit_size);
+out_free:
+       pcpu_free_alloc_info(ai);
+       if (areas)
+               free_bootmem(__pa(areas), areas_size);
+       return rc;
 }
 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
          !CONFIG_HAVE_SETUP_PER_CPU_AREA */
@@ -1504,7 +1977,6 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
 /**
  * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
- * @static_size: the size of static percpu area in bytes
  * @reserved_size: the size of reserved percpu area in bytes
  * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
  * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
@@ -1517,39 +1989,44 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
  * page-by-page into vmalloc area.
  *
  * RETURNS:
- * The determined pcpu_unit_size which can be used to initialize
- * percpu access on success, -errno on failure.
+ * 0 on success, -errno on failure.
  */
-ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size,
-                                    pcpu_fc_alloc_fn_t alloc_fn,
-                                    pcpu_fc_free_fn_t free_fn,
-                                    pcpu_fc_populate_pte_fn_t populate_pte_fn)
+int __init pcpu_page_first_chunk(size_t reserved_size,
+                                pcpu_fc_alloc_fn_t alloc_fn,
+                                pcpu_fc_free_fn_t free_fn,
+                                pcpu_fc_populate_pte_fn_t populate_pte_fn)
 {
        static struct vm_struct vm;
+       struct pcpu_alloc_info *ai;
        char psize_str[16];
        int unit_pages;
        size_t pages_size;
        struct page **pages;
-       unsigned int cpu;
-       int i, j;
-       ssize_t ret;
+       int unit, i, j, rc;
 
        snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
 
-       unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size,
-                                 PCPU_MIN_UNIT_SIZE));
+       ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
+       if (IS_ERR(ai))
+               return PTR_ERR(ai);
+       BUG_ON(ai->nr_groups != 1);
+       BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
+
+       unit_pages = ai->unit_size >> PAGE_SHIFT;
 
        /* unaligned allocations can't be freed, round up to page size */
-       pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0]));
+       pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
+                              sizeof(pages[0]));
        pages = alloc_bootmem(pages_size);
 
        /* allocate pages */
        j = 0;
-       for_each_possible_cpu(cpu)
+       for (unit = 0; unit < num_possible_cpus(); unit++)
                for (i = 0; i < unit_pages; i++) {
+                       unsigned int cpu = ai->groups[0].cpu_map[unit];
                        void *ptr;
 
-                       ptr = alloc_fn(cpu, PAGE_SIZE);
+                       ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
                        if (!ptr) {
                                pr_warning("PERCPU: failed to allocate %s page "
                                           "for cpu%u\n", psize_str, cpu);
@@ -1560,21 +2037,21 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size,
 
        /* allocate vm area, map the pages and copy static data */
        vm.flags = VM_ALLOC;
-       vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT;
+       vm.size = num_possible_cpus() * ai->unit_size;
        vm_area_register_early(&vm, PAGE_SIZE);
 
-       for_each_possible_cpu(cpu) {
-               unsigned long unit_addr = (unsigned long)vm.addr +
-                       (cpu * unit_pages << PAGE_SHIFT);
+       for (unit = 0; unit < num_possible_cpus(); unit++) {
+               unsigned long unit_addr =
+                       (unsigned long)vm.addr + unit * ai->unit_size;
 
                for (i = 0; i < unit_pages; i++)
                        populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
 
                /* pte already populated, the following shouldn't fail */
-               ret = __pcpu_map_pages(unit_addr, &pages[cpu * unit_pages],
-                                      unit_pages);
-               if (ret < 0)
-                       panic("failed to map percpu area, err=%zd\n", ret);
+               rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
+                                     unit_pages);
+               if (rc < 0)
+                       panic("failed to map percpu area, err=%d\n", rc);
 
                /*
                 * FIXME: Archs with virtual cache should flush local
@@ -1585,408 +2062,28 @@ ssize_t __init pcpu_page_first_chunk(size_t static_size, size_t reserved_size,
                 */
 
                /* copy static data */
-               memcpy((void *)unit_addr, __per_cpu_load, static_size);
+               memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
        }
 
        /* we're ready, commit */
-       pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu\n",
-               unit_pages, psize_str, vm.addr, static_size, reserved_size);
+       pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
+               unit_pages, psize_str, vm.addr, ai->static_size,
+               ai->reserved_size, ai->dyn_size);
 
-       ret = pcpu_setup_first_chunk(static_size, reserved_size, -1,
-                                    unit_pages << PAGE_SHIFT, vm.addr, NULL);
+       rc = pcpu_setup_first_chunk(ai, vm.addr);
        goto out_free_ar;
 
 enomem:
        while (--j >= 0)
                free_fn(page_address(pages[j]), PAGE_SIZE);
-       ret = -ENOMEM;
+       rc = -ENOMEM;
 out_free_ar:
        free_bootmem(__pa(pages), pages_size);
-       return ret;
+       pcpu_free_alloc_info(ai);
+       return rc;
 }
 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
 
-#ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
-/**
- * pcpu_lpage_build_unit_map - build unit_map for large page remapping
- * @static_size: the size of static percpu area in bytes
- * @reserved_size: the size of reserved percpu area in bytes
- * @dyn_sizep: in/out parameter for dynamic size, -1 for auto
- * @unit_sizep: out parameter for unit size
- * @unit_map: unit_map to be filled
- * @cpu_distance_fn: callback to determine distance between cpus
- *
- * This function builds cpu -> unit map and determine other parameters
- * considering needed percpu size, large page size and distances
- * between CPUs in NUMA.
- *
- * CPUs which are of LOCAL_DISTANCE both ways are grouped together and
- * may share units in the same large page.  The returned configuration
- * is guaranteed to have CPUs on different nodes on different large
- * pages and >=75% usage of allocated virtual address space.
- *
- * RETURNS:
- * On success, fills in @unit_map, sets *@dyn_sizep, *@unit_sizep and
- * returns the number of units to be allocated.  -errno on failure.
- */
-int __init pcpu_lpage_build_unit_map(size_t static_size, size_t reserved_size,
-                                    ssize_t *dyn_sizep, size_t *unit_sizep,
-                                    size_t lpage_size, int *unit_map,
-                                    pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
-{
-       static int group_map[NR_CPUS] __initdata;
-       static int group_cnt[NR_CPUS] __initdata;
-       int group_cnt_max = 0;
-       size_t size_sum, min_unit_size, alloc_size;
-       int upa, max_upa, uninitialized_var(best_upa);  /* units_per_alloc */
-       int last_allocs;
-       unsigned int cpu, tcpu;
-       int group, unit;
-
-       /*
-        * Determine min_unit_size, alloc_size and max_upa such that
-        * alloc_size is multiple of lpage_size and is the smallest
-        * which can accomodate 4k aligned segments which are equal to
-        * or larger than min_unit_size.
-        */
-       size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, dyn_sizep);
-       min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
-
-       alloc_size = roundup(min_unit_size, lpage_size);
-       upa = alloc_size / min_unit_size;
-       while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
-               upa--;
-       max_upa = upa;
-
-       /* group cpus according to their proximity */
-       for_each_possible_cpu(cpu) {
-               group = 0;
-       next_group:
-               for_each_possible_cpu(tcpu) {
-                       if (cpu == tcpu)
-                               break;
-                       if (group_map[tcpu] == group &&
-                           (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
-                            cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
-                               group++;
-                               goto next_group;
-                       }
-               }
-               group_map[cpu] = group;
-               group_cnt[group]++;
-               group_cnt_max = max(group_cnt_max, group_cnt[group]);
-       }
-
-       /*
-        * Expand unit size until address space usage goes over 75%
-        * and then as much as possible without using more address
-        * space.
-        */
-       last_allocs = INT_MAX;
-       for (upa = max_upa; upa; upa--) {
-               int allocs = 0, wasted = 0;
-
-               if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
-                       continue;
-
-               for (group = 0; group_cnt[group]; group++) {
-                       int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
-                       allocs += this_allocs;
-                       wasted += this_allocs * upa - group_cnt[group];
-               }
-
-               /*
-                * Don't accept if wastage is over 25%.  The
-                * greater-than comparison ensures upa==1 always
-                * passes the following check.
-                */
-               if (wasted > num_possible_cpus() / 3)
-                       continue;
-
-               /* and then don't consume more memory */
-               if (allocs > last_allocs)
-                       break;
-               last_allocs = allocs;
-               best_upa = upa;
-       }
-       *unit_sizep = alloc_size / best_upa;
-
-       /* assign units to cpus accordingly */
-       unit = 0;
-       for (group = 0; group_cnt[group]; group++) {
-               for_each_possible_cpu(cpu)
-                       if (group_map[cpu] == group)
-                               unit_map[cpu] = unit++;
-               unit = roundup(unit, best_upa);
-       }
-
-       return unit;    /* unit contains aligned number of units */
-}
-
-struct pcpul_ent {
-       void            *ptr;
-       void            *map_addr;
-};
-
-static size_t pcpul_size;
-static size_t pcpul_lpage_size;
-static int pcpul_nr_lpages;
-static struct pcpul_ent *pcpul_map;
-
-static bool __init pcpul_unit_to_cpu(int unit, const int *unit_map,
-                                    unsigned int *cpup)
-{
-       unsigned int cpu;
-
-       for_each_possible_cpu(cpu)
-               if (unit_map[cpu] == unit) {
-                       if (cpup)
-                               *cpup = cpu;
-                       return true;
-               }
-
-       return false;
-}
-
-static void __init pcpul_lpage_dump_cfg(const char *lvl, size_t static_size,
-                                       size_t reserved_size, size_t dyn_size,
-                                       size_t unit_size, size_t lpage_size,
-                                       const int *unit_map, int nr_units)
-{
-       int width = 1, v = nr_units;
-       char empty_str[] = "--------";
-       int upl, lpl;   /* units per lpage, lpage per line */
-       unsigned int cpu;
-       int lpage, unit;
-
-       while (v /= 10)
-               width++;
-       empty_str[min_t(int, width, sizeof(empty_str) - 1)] = '\0';
-
-       upl = max_t(int, lpage_size / unit_size, 1);
-       lpl = rounddown_pow_of_two(max_t(int, 60 / (upl * (width + 1) + 2), 1));
-
-       printk("%spcpu-lpage: sta/res/dyn=%zu/%zu/%zu unit=%zu lpage=%zu", lvl,
-              static_size, reserved_size, dyn_size, unit_size, lpage_size);
-
-       for (lpage = 0, unit = 0; unit < nr_units; unit++) {
-               if (!(unit % upl)) {
-                       if (!(lpage++ % lpl)) {
-                               printk("\n");
-                               printk("%spcpu-lpage: ", lvl);
-                       } else
-                               printk("| ");
-               }
-               if (pcpul_unit_to_cpu(unit, unit_map, &cpu))
-                       printk("%0*d ", width, cpu);
-               else
-                       printk("%s ", empty_str);
-       }
-       printk("\n");
-}
-
-/**
- * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
- * @static_size: the size of static percpu area in bytes
- * @reserved_size: the size of reserved percpu area in bytes
- * @dyn_size: free size for dynamic allocation in bytes
- * @unit_size: unit size in bytes
- * @lpage_size: the size of a large page
- * @unit_map: cpu -> unit mapping
- * @nr_units: the number of units
- * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
- * @free_fn: function to free percpu memory, @size <= lpage_size
- * @map_fn: function to map percpu lpage, always called with lpage_size
- *
- * This allocator uses large page to build and map the first chunk.
- * Unlike other helpers, the caller should always specify @dyn_size
- * and @unit_size.  These parameters along with @unit_map and
- * @nr_units can be determined using pcpu_lpage_build_unit_map().
- * This two stage initialization is to allow arch code to evaluate the
- * parameters before committing to it.
- *
- * Large pages are allocated as directed by @unit_map and other
- * parameters and mapped to vmalloc space.  Unused holes are returned
- * to the page allocator.  Note that these holes end up being actively
- * mapped twice - once to the physical mapping and to the vmalloc area
- * for the first percpu chunk.  Depending on architecture, this might
- * cause problem when changing page attributes of the returned area.
- * These double mapped areas can be detected using
- * pcpu_lpage_remapped().
- *
- * RETURNS:
- * The determined pcpu_unit_size which can be used to initialize
- * percpu access on success, -errno on failure.
- */
-ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size,
-                                     size_t dyn_size, size_t unit_size,
-                                     size_t lpage_size, const int *unit_map,
-                                     int nr_units,
-                                     pcpu_fc_alloc_fn_t alloc_fn,
-                                     pcpu_fc_free_fn_t free_fn,
-                                     pcpu_fc_map_fn_t map_fn)
-{
-       static struct vm_struct vm;
-       size_t chunk_size = unit_size * nr_units;
-       size_t map_size;
-       unsigned int cpu;
-       ssize_t ret;
-       int i, j, unit;
-
-       pcpul_lpage_dump_cfg(KERN_DEBUG, static_size, reserved_size, dyn_size,
-                            unit_size, lpage_size, unit_map, nr_units);
-
-       BUG_ON(chunk_size % lpage_size);
-
-       pcpul_size = static_size + reserved_size + dyn_size;
-       pcpul_lpage_size = lpage_size;
-       pcpul_nr_lpages = chunk_size / lpage_size;
-
-       /* allocate pointer array and alloc large pages */
-       map_size = pcpul_nr_lpages * sizeof(pcpul_map[0]);
-       pcpul_map = alloc_bootmem(map_size);
-
-       /* allocate all pages */
-       for (i = 0; i < pcpul_nr_lpages; i++) {
-               size_t offset = i * lpage_size;
-               int first_unit = offset / unit_size;
-               int last_unit = (offset + lpage_size - 1) / unit_size;
-               void *ptr;
-
-               /* find out which cpu is mapped to this unit */
-               for (unit = first_unit; unit <= last_unit; unit++)
-                       if (pcpul_unit_to_cpu(unit, unit_map, &cpu))
-                               goto found;
-               continue;
-       found:
-               ptr = alloc_fn(cpu, lpage_size);
-               if (!ptr) {
-                       pr_warning("PERCPU: failed to allocate large page "
-                                  "for cpu%u\n", cpu);
-                       goto enomem;
-               }
-
-               pcpul_map[i].ptr = ptr;
-       }
-
-       /* return unused holes */
-       for (unit = 0; unit < nr_units; unit++) {
-               size_t start = unit * unit_size;
-               size_t end = start + unit_size;
-               size_t off, next;
-
-               /* don't free used part of occupied unit */
-               if (pcpul_unit_to_cpu(unit, unit_map, NULL))
-                       start += pcpul_size;
-
-               /* unit can span more than one page, punch the holes */
-               for (off = start; off < end; off = next) {
-                       void *ptr = pcpul_map[off / lpage_size].ptr;
-                       next = min(roundup(off + 1, lpage_size), end);
-                       if (ptr)
-                               free_fn(ptr + off % lpage_size, next - off);
-               }
-       }
-
-       /* allocate address, map and copy */
-       vm.flags = VM_ALLOC;
-       vm.size = chunk_size;
-       vm_area_register_early(&vm, unit_size);
-
-       for (i = 0; i < pcpul_nr_lpages; i++) {
-               if (!pcpul_map[i].ptr)
-                       continue;
-               pcpul_map[i].map_addr = vm.addr + i * lpage_size;
-               map_fn(pcpul_map[i].ptr, lpage_size, pcpul_map[i].map_addr);
-       }
-
-       for_each_possible_cpu(cpu)
-               memcpy(vm.addr + unit_map[cpu] * unit_size, __per_cpu_load,
-                      static_size);
-
-       /* we're ready, commit */
-       pr_info("PERCPU: large pages @%p s%zu r%zu d%zu u%zu\n",
-               vm.addr, static_size, reserved_size, dyn_size, unit_size);
-
-       ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size,
-                                    unit_size, vm.addr, unit_map);
-
-       /*
-        * Sort pcpul_map array for pcpu_lpage_remapped().  Unmapped
-        * lpages are pushed to the end and trimmed.
-        */
-       for (i = 0; i < pcpul_nr_lpages - 1; i++)
-               for (j = i + 1; j < pcpul_nr_lpages; j++) {
-                       struct pcpul_ent tmp;
-
-                       if (!pcpul_map[j].ptr)
-                               continue;
-                       if (pcpul_map[i].ptr &&
-                           pcpul_map[i].ptr < pcpul_map[j].ptr)
-                               continue;
-
-                       tmp = pcpul_map[i];
-                       pcpul_map[i] = pcpul_map[j];
-                       pcpul_map[j] = tmp;
-               }
-
-       while (pcpul_nr_lpages && !pcpul_map[pcpul_nr_lpages - 1].ptr)
-               pcpul_nr_lpages--;
-
-       return ret;
-
-enomem:
-       for (i = 0; i < pcpul_nr_lpages; i++)
-               if (pcpul_map[i].ptr)
-                       free_fn(pcpul_map[i].ptr, lpage_size);
-       free_bootmem(__pa(pcpul_map), map_size);
-       return -ENOMEM;
-}
-
-/**
- * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
- * @kaddr: the kernel address in question
- *
- * Determine whether @kaddr falls in the pcpul recycled area.  This is
- * used by pageattr to detect VM aliases and break up the pcpu large
- * page mapping such that the same physical page is not mapped under
- * different attributes.
- *
- * The recycled area is always at the tail of a partially used large
- * page.
- *
- * RETURNS:
- * Address of corresponding remapped pcpu address if match is found;
- * otherwise, NULL.
- */
-void *pcpu_lpage_remapped(void *kaddr)
-{
-       unsigned long lpage_mask = pcpul_lpage_size - 1;
-       void *lpage_addr = (void *)((unsigned long)kaddr & ~lpage_mask);
-       unsigned long offset = (unsigned long)kaddr & lpage_mask;
-       int left = 0, right = pcpul_nr_lpages - 1;
-       int pos;
-
-       /* pcpul in use at all? */
-       if (!pcpul_map)
-               return NULL;
-
-       /* okay, perform binary search */
-       while (left <= right) {
-               pos = (left + right) / 2;
-
-               if (pcpul_map[pos].ptr < lpage_addr)
-                       left = pos + 1;
-               else if (pcpul_map[pos].ptr > lpage_addr)
-                       right = pos - 1;
-               else
-                       return pcpul_map[pos].map_addr + offset;
-       }
-
-       return NULL;
-}
-#endif /* CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK */
-
 /*
  * Generic percpu area setup.
  *
@@ -2003,24 +2100,35 @@ void *pcpu_lpage_remapped(void *kaddr)
 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 EXPORT_SYMBOL(__per_cpu_offset);
 
+static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
+                                      size_t align)
+{
+       return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
+}
+
+static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
+{
+       free_bootmem(__pa(ptr), size);
+}
+
 void __init setup_per_cpu_areas(void)
 {
-       size_t static_size = __per_cpu_end - __per_cpu_start;
-       ssize_t unit_size;
        unsigned long delta;
        unsigned int cpu;
+       int rc;
 
        /*
         * Always reserve area for module percpu variables.  That's
         * what the legacy allocator did.
         */
-       unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
-                                          PERCPU_DYNAMIC_RESERVE);
-       if (unit_size < 0)
+       rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+                                   PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
+                                   pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
+       if (rc < 0)
                panic("Failed to initialized percpu areas.");
 
        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
        for_each_possible_cpu(cpu)
-               __per_cpu_offset[cpu] = delta + cpu * unit_size;
+               __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
 }
 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */