b961d99e64165e90231d663eccf3626934652c9c
[linux-2.6.git] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
21
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
27
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
30
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
36
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
39
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
42 };
43 EXPORT_SYMBOL(__per_cpu_offset);
44
45 /*
46  * On x86_64 symbols referenced from code should be reachable using
47  * 32bit relocations.  Reserve space for static percpu variables in
48  * modules so that they are always served from the first chunk which
49  * is located at the percpu segment base.  On x86_32, anything can
50  * address anywhere.  No need to reserve space in the first chunk.
51  */
52 #ifdef CONFIG_X86_64
53 #define PERCPU_FIRST_CHUNK_RESERVE      PERCPU_MODULE_RESERVE
54 #else
55 #define PERCPU_FIRST_CHUNK_RESERVE      0
56 #endif
57
58 /**
59  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60  *
61  * If NUMA is not configured or there is only one NUMA node available,
62  * there is no reason to consider NUMA.  This function determines
63  * whether percpu allocation should consider NUMA or not.
64  *
65  * RETURNS:
66  * true if NUMA should be considered; otherwise, false.
67  */
68 static bool __init pcpu_need_numa(void)
69 {
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71         pg_data_t *last = NULL;
72         unsigned int cpu;
73
74         for_each_possible_cpu(cpu) {
75                 int node = early_cpu_to_node(cpu);
76
77                 if (node_online(node) && NODE_DATA(node) &&
78                     last && last != NODE_DATA(node))
79                         return true;
80
81                 last = NODE_DATA(node);
82         }
83 #endif
84         return false;
85 }
86
87 /**
88  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89  * @cpu: cpu to allocate for
90  * @size: size allocation in bytes
91  * @align: alignment
92  *
93  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
94  * does the right thing for NUMA regardless of the current
95  * configuration.
96  *
97  * RETURNS:
98  * Pointer to the allocated area on success, NULL on failure.
99  */
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101                                         unsigned long align)
102 {
103         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105         int node = early_cpu_to_node(cpu);
106         void *ptr;
107
108         if (!node_online(node) || !NODE_DATA(node)) {
109                 ptr = __alloc_bootmem_nopanic(size, align, goal);
110                 pr_info("cpu %d has no node %d or node-local memory\n",
111                         cpu, node);
112                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113                          cpu, size, __pa(ptr));
114         } else {
115                 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116                                                    size, align, goal);
117                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118                          "%016lx\n", cpu, size, node, __pa(ptr));
119         }
120         return ptr;
121 #else
122         return __alloc_bootmem_nopanic(size, align, goal);
123 #endif
124 }
125
126 /*
127  * Helpers for first chunk memory allocation
128  */
129 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size)
130 {
131         return pcpu_alloc_bootmem(cpu, size, size);
132 }
133
134 static void __init pcpu_fc_free(void *ptr, size_t size)
135 {
136         free_bootmem(__pa(ptr), size);
137 }
138
139 /*
140  * Large page remapping allocator
141  */
142 #ifdef CONFIG_NEED_MULTIPLE_NODES
143 static void __init pcpul_map(void *ptr, size_t size, void *addr)
144 {
145         pmd_t *pmd, pmd_v;
146
147         pmd = populate_extra_pmd((unsigned long)addr);
148         pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
149         set_pmd(pmd, pmd_v);
150 }
151
152 static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
153 {
154         if (early_cpu_to_node(from) == early_cpu_to_node(to))
155                 return LOCAL_DISTANCE;
156         else
157                 return REMOTE_DISTANCE;
158 }
159
160 static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
161 {
162         size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163         size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
164         size_t unit_map_size, unit_size;
165         int *unit_map;
166         int nr_units;
167         ssize_t ret;
168
169         /* on non-NUMA, embedding is better */
170         if (!chosen && !pcpu_need_numa())
171                 return -EINVAL;
172
173         /* need PSE */
174         if (!cpu_has_pse) {
175                 pr_warning("PERCPU: lpage allocator requires PSE\n");
176                 return -EINVAL;
177         }
178
179         /* allocate and build unit_map */
180         unit_map_size = nr_cpu_ids * sizeof(int);
181         unit_map = alloc_bootmem_nopanic(unit_map_size);
182         if (!unit_map) {
183                 pr_warning("PERCPU: failed to allocate unit_map\n");
184                 return -ENOMEM;
185         }
186
187         ret = pcpu_lpage_build_unit_map(static_size,
188                                         PERCPU_FIRST_CHUNK_RESERVE,
189                                         &dyn_size, &unit_size, PMD_SIZE,
190                                         unit_map, pcpu_lpage_cpu_distance);
191         if (ret < 0) {
192                 pr_warning("PERCPU: failed to build unit_map\n");
193                 goto out_free;
194         }
195         nr_units = ret;
196
197         /* do the parameters look okay? */
198         if (!chosen) {
199                 size_t vm_size = VMALLOC_END - VMALLOC_START;
200                 size_t tot_size = nr_units * unit_size;
201
202                 /* don't consume more than 20% of vmalloc area */
203                 if (tot_size > vm_size / 5) {
204                         pr_info("PERCPU: too large chunk size %zuMB for "
205                                 "large page remap\n", tot_size >> 20);
206                         ret = -EINVAL;
207                         goto out_free;
208                 }
209         }
210
211         ret = pcpu_lpage_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
212                                      dyn_size, unit_size, PMD_SIZE,
213                                      unit_map, nr_units,
214                                      pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
215 out_free:
216         if (ret < 0)
217                 free_bootmem(__pa(unit_map), unit_map_size);
218         return ret;
219 }
220 #else
221 static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
222 {
223         return -EINVAL;
224 }
225 #endif
226
227 /*
228  * Embedding allocator
229  *
230  * The first chunk is sized to just contain the static area plus
231  * module and dynamic reserves and embedded into linear physical
232  * mapping so that it can use PMD mapping without additional TLB
233  * pressure.
234  */
235 static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
236 {
237         size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
238
239         /*
240          * If large page isn't supported, there's no benefit in doing
241          * this.  Also, embedding allocation doesn't play well with
242          * NUMA.
243          */
244         if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
245                 return -EINVAL;
246
247         return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
248                                       reserve - PERCPU_FIRST_CHUNK_RESERVE);
249 }
250
251 /*
252  * Page allocator
253  *
254  * Boring fallback 4k page allocator.  This allocator puts more
255  * pressure on PTE TLBs but other than that behaves nicely on both UMA
256  * and NUMA.
257  */
258 static void __init pcpup_populate_pte(unsigned long addr)
259 {
260         populate_extra_pte(addr);
261 }
262
263 static ssize_t __init setup_pcpu_page(size_t static_size)
264 {
265         return pcpu_page_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
266                                      pcpu_fc_alloc, pcpu_fc_free,
267                                      pcpup_populate_pte);
268 }
269
270 static inline void setup_percpu_segment(int cpu)
271 {
272 #ifdef CONFIG_X86_32
273         struct desc_struct gdt;
274
275         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
276                         0x2 | DESCTYPE_S, 0x8);
277         gdt.s = 1;
278         write_gdt_entry(get_cpu_gdt_table(cpu),
279                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
280 #endif
281 }
282
283 void __init setup_per_cpu_areas(void)
284 {
285         size_t static_size = __per_cpu_end - __per_cpu_start;
286         unsigned int cpu;
287         unsigned long delta;
288         size_t pcpu_unit_size;
289         ssize_t ret;
290
291         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
292                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
293
294         /*
295          * Allocate percpu area.  If PSE is supported, try to make use
296          * of large page mappings.  Please read comments on top of
297          * each allocator for details.
298          */
299         ret = -EINVAL;
300         if (pcpu_chosen_fc != PCPU_FC_AUTO) {
301                 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
302                         if (pcpu_chosen_fc == PCPU_FC_LPAGE)
303                                 ret = setup_pcpu_lpage(static_size, true);
304                         else
305                                 ret = setup_pcpu_embed(static_size, true);
306
307                         if (ret < 0)
308                                 pr_warning("PERCPU: %s allocator failed (%zd), "
309                                            "falling back to page size\n",
310                                            pcpu_fc_names[pcpu_chosen_fc], ret);
311                 }
312         } else {
313                 ret = setup_pcpu_lpage(static_size, false);
314                 if (ret < 0)
315                         ret = setup_pcpu_embed(static_size, false);
316         }
317         if (ret < 0)
318                 ret = setup_pcpu_page(static_size);
319         if (ret < 0)
320                 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
321                       static_size, ret);
322
323         pcpu_unit_size = ret;
324
325         /* alrighty, percpu areas up and running */
326         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
327         for_each_possible_cpu(cpu) {
328                 per_cpu_offset(cpu) =
329                         delta + pcpu_unit_map[cpu] * pcpu_unit_size;
330                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
331                 per_cpu(cpu_number, cpu) = cpu;
332                 setup_percpu_segment(cpu);
333                 setup_stack_canary_segment(cpu);
334                 /*
335                  * Copy data used in early init routines from the
336                  * initial arrays to the per cpu data areas.  These
337                  * arrays then become expendable and the *_early_ptr's
338                  * are zeroed indicating that the static arrays are
339                  * gone.
340                  */
341 #ifdef CONFIG_X86_LOCAL_APIC
342                 per_cpu(x86_cpu_to_apicid, cpu) =
343                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
344                 per_cpu(x86_bios_cpu_apicid, cpu) =
345                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
346 #endif
347 #ifdef CONFIG_X86_64
348                 per_cpu(irq_stack_ptr, cpu) =
349                         per_cpu(irq_stack_union.irq_stack, cpu) +
350                         IRQ_STACK_SIZE - 64;
351 #ifdef CONFIG_NUMA
352                 per_cpu(x86_cpu_to_node_map, cpu) =
353                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
354 #endif
355 #endif
356                 /*
357                  * Up to this point, the boot CPU has been using .data.init
358                  * area.  Reload any changed state for the boot CPU.
359                  */
360                 if (cpu == boot_cpu_id)
361                         switch_to_new_gdt(cpu);
362         }
363
364         /* indicate the early static arrays will soon be gone */
365 #ifdef CONFIG_X86_LOCAL_APIC
366         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
367         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
368 #endif
369 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
370         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
371 #endif
372
373 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
374         /*
375          * make sure boot cpu node_number is right, when boot cpu is on the
376          * node that doesn't have mem installed
377          */
378         per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
379 #endif
380
381         /* Setup node to cpumask map */
382         setup_node_to_cpumask_map();
383
384         /* Setup cpu initialized, callin, callout masks */
385         setup_cpu_local_masks();
386 }