early_res: Add free_early_partial()
[linux-2.6.git] / arch / x86 / kernel / setup_percpu.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/percpu.h>
8 #include <linux/kexec.h>
9 #include <linux/crash_dump.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/pfn.h>
13 #include <asm/sections.h>
14 #include <asm/processor.h>
15 #include <asm/setup.h>
16 #include <asm/mpspec.h>
17 #include <asm/apicdef.h>
18 #include <asm/highmem.h>
19 #include <asm/proto.h>
20 #include <asm/cpumask.h>
21 #include <asm/cpu.h>
22 #include <asm/stackprotector.h>
23
24 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
25 # define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__)
26 #else
27 # define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
28 #endif
29
30 DEFINE_PER_CPU(int, cpu_number);
31 EXPORT_PER_CPU_SYMBOL(cpu_number);
32
33 #ifdef CONFIG_X86_64
34 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
35 #else
36 #define BOOT_PERCPU_OFFSET 0
37 #endif
38
39 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
40 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
41
42 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
43         [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
44 };
45 EXPORT_SYMBOL(__per_cpu_offset);
46
47 /*
48  * On x86_64 symbols referenced from code should be reachable using
49  * 32bit relocations.  Reserve space for static percpu variables in
50  * modules so that they are always served from the first chunk which
51  * is located at the percpu segment base.  On x86_32, anything can
52  * address anywhere.  No need to reserve space in the first chunk.
53  */
54 #ifdef CONFIG_X86_64
55 #define PERCPU_FIRST_CHUNK_RESERVE      PERCPU_MODULE_RESERVE
56 #else
57 #define PERCPU_FIRST_CHUNK_RESERVE      0
58 #endif
59
60 #ifdef CONFIG_X86_32
61 /**
62  * pcpu_need_numa - determine percpu allocation needs to consider NUMA
63  *
64  * If NUMA is not configured or there is only one NUMA node available,
65  * there is no reason to consider NUMA.  This function determines
66  * whether percpu allocation should consider NUMA or not.
67  *
68  * RETURNS:
69  * true if NUMA should be considered; otherwise, false.
70  */
71 static bool __init pcpu_need_numa(void)
72 {
73 #ifdef CONFIG_NEED_MULTIPLE_NODES
74         pg_data_t *last = NULL;
75         unsigned int cpu;
76
77         for_each_possible_cpu(cpu) {
78                 int node = early_cpu_to_node(cpu);
79
80                 if (node_online(node) && NODE_DATA(node) &&
81                     last && last != NODE_DATA(node))
82                         return true;
83
84                 last = NODE_DATA(node);
85         }
86 #endif
87         return false;
88 }
89 #endif
90
91 /**
92  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
93  * @cpu: cpu to allocate for
94  * @size: size allocation in bytes
95  * @align: alignment
96  *
97  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
98  * does the right thing for NUMA regardless of the current
99  * configuration.
100  *
101  * RETURNS:
102  * Pointer to the allocated area on success, NULL on failure.
103  */
104 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
105                                         unsigned long align)
106 {
107         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
108 #ifdef CONFIG_NEED_MULTIPLE_NODES
109         int node = early_cpu_to_node(cpu);
110         void *ptr;
111
112         if (!node_online(node) || !NODE_DATA(node)) {
113                 ptr = __alloc_bootmem_nopanic(size, align, goal);
114                 pr_info("cpu %d has no node %d or node-local memory\n",
115                         cpu, node);
116                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
117                          cpu, size, __pa(ptr));
118         } else {
119                 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
120                                                    size, align, goal);
121                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
122                          cpu, size, node, __pa(ptr));
123         }
124         return ptr;
125 #else
126         return __alloc_bootmem_nopanic(size, align, goal);
127 #endif
128 }
129
130 /*
131  * Helpers for first chunk memory allocation
132  */
133 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
134 {
135         return pcpu_alloc_bootmem(cpu, size, align);
136 }
137
138 static void __init pcpu_fc_free(void *ptr, size_t size)
139 {
140 #ifdef CONFIG_NO_BOOTMEM
141         u64 start = __pa(ptr);
142         u64 end = start + size;
143         free_early_partial(start, end);
144 #else
145         free_bootmem(__pa(ptr), size);
146 #endif
147 }
148
149 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
150 {
151 #ifdef CONFIG_NEED_MULTIPLE_NODES
152         if (early_cpu_to_node(from) == early_cpu_to_node(to))
153                 return LOCAL_DISTANCE;
154         else
155                 return REMOTE_DISTANCE;
156 #else
157         return LOCAL_DISTANCE;
158 #endif
159 }
160
161 static void __init pcpup_populate_pte(unsigned long addr)
162 {
163         populate_extra_pte(addr);
164 }
165
166 static inline void setup_percpu_segment(int cpu)
167 {
168 #ifdef CONFIG_X86_32
169         struct desc_struct gdt;
170
171         pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
172                         0x2 | DESCTYPE_S, 0x8);
173         gdt.s = 1;
174         write_gdt_entry(get_cpu_gdt_table(cpu),
175                         GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
176 #endif
177 }
178
179 void __init setup_per_cpu_areas(void)
180 {
181         unsigned int cpu;
182         unsigned long delta;
183         int rc;
184
185         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
186                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
187
188         /*
189          * Allocate percpu area.  Embedding allocator is our favorite;
190          * however, on NUMA configurations, it can result in very
191          * sparse unit mapping and vmalloc area isn't spacious enough
192          * on 32bit.  Use page in that case.
193          */
194 #ifdef CONFIG_X86_32
195         if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
196                 pcpu_chosen_fc = PCPU_FC_PAGE;
197 #endif
198         rc = -EINVAL;
199         if (pcpu_chosen_fc != PCPU_FC_PAGE) {
200                 const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
201                 const size_t dyn_size = PERCPU_MODULE_RESERVE +
202                         PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
203
204                 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
205                                             dyn_size, atom_size,
206                                             pcpu_cpu_distance,
207                                             pcpu_fc_alloc, pcpu_fc_free);
208                 if (rc < 0)
209                         pr_warning("%s allocator failed (%d), falling back to page size\n",
210                                    pcpu_fc_names[pcpu_chosen_fc], rc);
211         }
212         if (rc < 0)
213                 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
214                                            pcpu_fc_alloc, pcpu_fc_free,
215                                            pcpup_populate_pte);
216         if (rc < 0)
217                 panic("cannot initialize percpu area (err=%d)", rc);
218
219         /* alrighty, percpu areas up and running */
220         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
221         for_each_possible_cpu(cpu) {
222                 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
223                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
224                 per_cpu(cpu_number, cpu) = cpu;
225                 setup_percpu_segment(cpu);
226                 setup_stack_canary_segment(cpu);
227                 /*
228                  * Copy data used in early init routines from the
229                  * initial arrays to the per cpu data areas.  These
230                  * arrays then become expendable and the *_early_ptr's
231                  * are zeroed indicating that the static arrays are
232                  * gone.
233                  */
234 #ifdef CONFIG_X86_LOCAL_APIC
235                 per_cpu(x86_cpu_to_apicid, cpu) =
236                         early_per_cpu_map(x86_cpu_to_apicid, cpu);
237                 per_cpu(x86_bios_cpu_apicid, cpu) =
238                         early_per_cpu_map(x86_bios_cpu_apicid, cpu);
239 #endif
240 #ifdef CONFIG_X86_64
241                 per_cpu(irq_stack_ptr, cpu) =
242                         per_cpu(irq_stack_union.irq_stack, cpu) +
243                         IRQ_STACK_SIZE - 64;
244 #ifdef CONFIG_NUMA
245                 per_cpu(x86_cpu_to_node_map, cpu) =
246                         early_per_cpu_map(x86_cpu_to_node_map, cpu);
247 #endif
248 #endif
249                 /*
250                  * Up to this point, the boot CPU has been using .data.init
251                  * area.  Reload any changed state for the boot CPU.
252                  */
253                 if (cpu == boot_cpu_id)
254                         switch_to_new_gdt(cpu);
255         }
256
257         /* indicate the early static arrays will soon be gone */
258 #ifdef CONFIG_X86_LOCAL_APIC
259         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
260         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
261 #endif
262 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
263         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
264 #endif
265
266 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
267         /*
268          * make sure boot cpu node_number is right, when boot cpu is on the
269          * node that doesn't have mem installed
270          */
271         per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
272 #endif
273
274         /* Setup node to cpumask map */
275         setup_node_to_cpumask_map();
276
277         /* Setup cpu initialized, callin, callout masks */
278         setup_cpu_local_masks();
279 }