]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - arch/x86/kernel/apic/apic_flat_64.c
Merge branches 'x86-detect-hyper-for-linus', 'x86-fpu-for-linus', 'x86-kexec-for...
[linux-2.6.git] / arch / x86 / kernel / apic / apic_flat_64.c
1 /*
2  * Copyright 2004 James Cleverdon, IBM.
3  * Subject to the GNU Public License, v.2
4  *
5  * Flat APIC subarch code.
6  *
7  * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8  * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9  * James Cleverdon.
10  */
11 #include <linux/errno.h>
12 #include <linux/threads.h>
13 #include <linux/cpumask.h>
14 #include <linux/string.h>
15 #include <linux/kernel.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/hardirq.h>
19 #include <linux/module.h>
20 #include <asm/smp.h>
21 #include <asm/apic.h>
22 #include <asm/ipi.h>
23
24 #ifdef CONFIG_ACPI
25 #include <acpi/acpi_bus.h>
26 #endif
27
28 static struct apic apic_physflat;
29 static struct apic apic_flat;
30
31 struct apic __read_mostly *apic = &apic_flat;
32 EXPORT_SYMBOL_GPL(apic);
33
34 static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
35 {
36         return 1;
37 }
38
39 static const struct cpumask *flat_target_cpus(void)
40 {
41         return cpu_online_mask;
42 }
43
44 static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
45 {
46         /* Careful. Some cpus do not strictly honor the set of cpus
47          * specified in the interrupt destination when using lowest
48          * priority interrupt delivery mode.
49          *
50          * In particular there was a hyperthreading cpu observed to
51          * deliver interrupts to the wrong hyperthread when only one
52          * hyperthread was specified in the interrupt desitination.
53          */
54         cpumask_clear(retmask);
55         cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
56 }
57
58 /*
59  * Set up the logical destination ID.
60  *
61  * Intel recommends to set DFR, LDR and TPR before enabling
62  * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
63  * document number 292116).  So here it goes...
64  */
65 static void flat_init_apic_ldr(void)
66 {
67         unsigned long val;
68         unsigned long num, id;
69
70         num = smp_processor_id();
71         id = 1UL << num;
72         apic_write(APIC_DFR, APIC_DFR_FLAT);
73         val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
74         val |= SET_APIC_LOGICAL_ID(id);
75         apic_write(APIC_LDR, val);
76 }
77
78 static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
79 {
80         unsigned long flags;
81
82         local_irq_save(flags);
83         __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
84         local_irq_restore(flags);
85 }
86
87 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
88 {
89         unsigned long mask = cpumask_bits(cpumask)[0];
90
91         _flat_send_IPI_mask(mask, vector);
92 }
93
94 static void
95  flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
96 {
97         unsigned long mask = cpumask_bits(cpumask)[0];
98         int cpu = smp_processor_id();
99
100         if (cpu < BITS_PER_LONG)
101                 clear_bit(cpu, &mask);
102
103         _flat_send_IPI_mask(mask, vector);
104 }
105
106 static void flat_send_IPI_allbutself(int vector)
107 {
108         int cpu = smp_processor_id();
109 #ifdef  CONFIG_HOTPLUG_CPU
110         int hotplug = 1;
111 #else
112         int hotplug = 0;
113 #endif
114         if (hotplug || vector == NMI_VECTOR) {
115                 if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
116                         unsigned long mask = cpumask_bits(cpu_online_mask)[0];
117
118                         if (cpu < BITS_PER_LONG)
119                                 clear_bit(cpu, &mask);
120
121                         _flat_send_IPI_mask(mask, vector);
122                 }
123         } else if (num_online_cpus() > 1) {
124                 __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
125                                             vector, apic->dest_logical);
126         }
127 }
128
129 static void flat_send_IPI_all(int vector)
130 {
131         if (vector == NMI_VECTOR) {
132                 flat_send_IPI_mask(cpu_online_mask, vector);
133         } else {
134                 __default_send_IPI_shortcut(APIC_DEST_ALLINC,
135                                             vector, apic->dest_logical);
136         }
137 }
138
139 static unsigned int flat_get_apic_id(unsigned long x)
140 {
141         unsigned int id;
142
143         id = (((x)>>24) & 0xFFu);
144
145         return id;
146 }
147
148 static unsigned long set_apic_id(unsigned int id)
149 {
150         unsigned long x;
151
152         x = ((id & 0xFFu)<<24);
153         return x;
154 }
155
156 static unsigned int read_xapic_id(void)
157 {
158         unsigned int id;
159
160         id = flat_get_apic_id(apic_read(APIC_ID));
161         return id;
162 }
163
164 static int flat_apic_id_registered(void)
165 {
166         return physid_isset(read_xapic_id(), phys_cpu_present_map);
167 }
168
169 static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
170 {
171         return initial_apic_id >> index_msb;
172 }
173
174 static struct apic apic_flat =  {
175         .name                           = "flat",
176         .probe                          = NULL,
177         .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
178         .apic_id_registered             = flat_apic_id_registered,
179
180         .irq_delivery_mode              = dest_LowestPrio,
181         .irq_dest_mode                  = 1, /* logical */
182
183         .target_cpus                    = flat_target_cpus,
184         .disable_esr                    = 0,
185         .dest_logical                   = APIC_DEST_LOGICAL,
186         .check_apicid_used              = NULL,
187         .check_apicid_present           = NULL,
188
189         .vector_allocation_domain       = flat_vector_allocation_domain,
190         .init_apic_ldr                  = flat_init_apic_ldr,
191
192         .ioapic_phys_id_map             = NULL,
193         .setup_apic_routing             = NULL,
194         .multi_timer_check              = NULL,
195         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
196         .apicid_to_cpu_present          = NULL,
197         .setup_portio_remap             = NULL,
198         .check_phys_apicid_present      = default_check_phys_apicid_present,
199         .enable_apic_mode               = NULL,
200         .phys_pkg_id                    = flat_phys_pkg_id,
201         .mps_oem_check                  = NULL,
202
203         .get_apic_id                    = flat_get_apic_id,
204         .set_apic_id                    = set_apic_id,
205         .apic_id_mask                   = 0xFFu << 24,
206
207         .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
208         .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
209
210         .send_IPI_mask                  = flat_send_IPI_mask,
211         .send_IPI_mask_allbutself       = flat_send_IPI_mask_allbutself,
212         .send_IPI_allbutself            = flat_send_IPI_allbutself,
213         .send_IPI_all                   = flat_send_IPI_all,
214         .send_IPI_self                  = apic_send_IPI_self,
215
216         .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
217         .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
218         .wait_for_init_deassert         = NULL,
219         .smp_callin_clear_local_apic    = NULL,
220         .inquire_remote_apic            = default_inquire_remote_apic,
221
222         .read                           = native_apic_mem_read,
223         .write                          = native_apic_mem_write,
224         .icr_read                       = native_apic_icr_read,
225         .icr_write                      = native_apic_icr_write,
226         .wait_icr_idle                  = native_apic_wait_icr_idle,
227         .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
228 };
229
230 /*
231  * Physflat mode is used when there are more than 8 CPUs on a system.
232  * We cannot use logical delivery in this case because the mask
233  * overflows, so use physical mode.
234  */
235 static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
236 {
237 #ifdef CONFIG_ACPI
238         /*
239          * Quirk: some x86_64 machines can only use physical APIC mode
240          * regardless of how many processors are present (x86_64 ES7000
241          * is an example).
242          */
243         if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
244                 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
245                 printk(KERN_DEBUG "system APIC only can use physical flat");
246                 return 1;
247         }
248
249         if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
250                 printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
251                 return 1;
252         }
253 #endif
254
255         return 0;
256 }
257
258 static const struct cpumask *physflat_target_cpus(void)
259 {
260         return cpu_online_mask;
261 }
262
263 static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
264 {
265         cpumask_clear(retmask);
266         cpumask_set_cpu(cpu, retmask);
267 }
268
269 static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
270 {
271         default_send_IPI_mask_sequence_phys(cpumask, vector);
272 }
273
274 static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
275                                               int vector)
276 {
277         default_send_IPI_mask_allbutself_phys(cpumask, vector);
278 }
279
280 static void physflat_send_IPI_allbutself(int vector)
281 {
282         default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
283 }
284
285 static void physflat_send_IPI_all(int vector)
286 {
287         physflat_send_IPI_mask(cpu_online_mask, vector);
288 }
289
290 static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
291 {
292         int cpu;
293
294         /*
295          * We're using fixed IRQ delivery, can only return one phys APIC ID.
296          * May as well be the first.
297          */
298         cpu = cpumask_first(cpumask);
299         if ((unsigned)cpu < nr_cpu_ids)
300                 return per_cpu(x86_cpu_to_apicid, cpu);
301         else
302                 return BAD_APICID;
303 }
304
305 static unsigned int
306 physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
307                                 const struct cpumask *andmask)
308 {
309         int cpu;
310
311         /*
312          * We're using fixed IRQ delivery, can only return one phys APIC ID.
313          * May as well be the first.
314          */
315         for_each_cpu_and(cpu, cpumask, andmask) {
316                 if (cpumask_test_cpu(cpu, cpu_online_mask))
317                         break;
318         }
319         return per_cpu(x86_cpu_to_apicid, cpu);
320 }
321
322 static int physflat_probe(void)
323 {
324         if (apic == &apic_physflat || num_possible_cpus() > 8)
325                 return 1;
326
327         return 0;
328 }
329
330 static struct apic apic_physflat =  {
331
332         .name                           = "physical flat",
333         .probe                          = physflat_probe,
334         .acpi_madt_oem_check            = physflat_acpi_madt_oem_check,
335         .apic_id_registered             = flat_apic_id_registered,
336
337         .irq_delivery_mode              = dest_Fixed,
338         .irq_dest_mode                  = 0, /* physical */
339
340         .target_cpus                    = physflat_target_cpus,
341         .disable_esr                    = 0,
342         .dest_logical                   = 0,
343         .check_apicid_used              = NULL,
344         .check_apicid_present           = NULL,
345
346         .vector_allocation_domain       = physflat_vector_allocation_domain,
347         /* not needed, but shouldn't hurt: */
348         .init_apic_ldr                  = flat_init_apic_ldr,
349
350         .ioapic_phys_id_map             = NULL,
351         .setup_apic_routing             = NULL,
352         .multi_timer_check              = NULL,
353         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
354         .apicid_to_cpu_present          = NULL,
355         .setup_portio_remap             = NULL,
356         .check_phys_apicid_present      = default_check_phys_apicid_present,
357         .enable_apic_mode               = NULL,
358         .phys_pkg_id                    = flat_phys_pkg_id,
359         .mps_oem_check                  = NULL,
360
361         .get_apic_id                    = flat_get_apic_id,
362         .set_apic_id                    = set_apic_id,
363         .apic_id_mask                   = 0xFFu << 24,
364
365         .cpu_mask_to_apicid             = physflat_cpu_mask_to_apicid,
366         .cpu_mask_to_apicid_and         = physflat_cpu_mask_to_apicid_and,
367
368         .send_IPI_mask                  = physflat_send_IPI_mask,
369         .send_IPI_mask_allbutself       = physflat_send_IPI_mask_allbutself,
370         .send_IPI_allbutself            = physflat_send_IPI_allbutself,
371         .send_IPI_all                   = physflat_send_IPI_all,
372         .send_IPI_self                  = apic_send_IPI_self,
373
374         .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
375         .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
376         .wait_for_init_deassert         = NULL,
377         .smp_callin_clear_local_apic    = NULL,
378         .inquire_remote_apic            = default_inquire_remote_apic,
379
380         .read                           = native_apic_mem_read,
381         .write                          = native_apic_mem_write,
382         .icr_read                       = native_apic_icr_read,
383         .icr_write                      = native_apic_icr_write,
384         .wait_icr_idle                  = native_apic_wait_icr_idle,
385         .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
386 };
387
388 /*
389  * We need to check for physflat first, so this order is important.
390  */
391 apic_drivers(apic_physflat, apic_flat);