[PATCH] x86_64: Don't assume future AMD CPUs have K8 compatible performance counters
[linux-2.6.git] / arch / x86_64 / kernel / nmi.c
1 /*
2  *  linux/arch/x86_64/nmi.c
3  *
4  *  NMI watchdog support on APIC systems
5  *
6  *  Started by Ingo Molnar <mingo@redhat.com>
7  *
8  *  Fixes:
9  *  Mikael Pettersson   : AMD K7 support for local APIC NMI watchdog.
10  *  Mikael Pettersson   : Power Management for local APIC NMI watchdog.
11  *  Pavel Machek and
12  *  Mikael Pettersson   : PM converted to driver model. Disable/enable API.
13  */
14
15 #include <linux/config.h>
16 #include <linux/mm.h>
17 #include <linux/irq.h>
18 #include <linux/delay.h>
19 #include <linux/bootmem.h>
20 #include <linux/smp_lock.h>
21 #include <linux/interrupt.h>
22 #include <linux/mc146818rtc.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/sysdev.h>
26 #include <linux/nmi.h>
27 #include <linux/sysctl.h>
28
29 #include <asm/smp.h>
30 #include <asm/mtrr.h>
31 #include <asm/mpspec.h>
32 #include <asm/nmi.h>
33 #include <asm/msr.h>
34 #include <asm/proto.h>
35 #include <asm/kdebug.h>
36
37 /*
38  * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
39  * - it may be reserved by some other driver, or not
40  * - when not reserved by some other driver, it may be used for
41  *   the NMI watchdog, or not
42  *
43  * This is maintained separately from nmi_active because the NMI
44  * watchdog may also be driven from the I/O APIC timer.
45  */
46 static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
47 static unsigned int lapic_nmi_owner;
48 #define LAPIC_NMI_WATCHDOG      (1<<0)
49 #define LAPIC_NMI_RESERVED      (1<<1)
50
51 /* nmi_active:
52  * +1: the lapic NMI watchdog is active, but can be disabled
53  *  0: the lapic NMI watchdog has not been set up, and cannot
54  *     be enabled
55  * -1: the lapic NMI watchdog is disabled, but can be enabled
56  */
57 int nmi_active;         /* oprofile uses this */
58 int panic_on_timeout;
59
60 unsigned int nmi_watchdog = NMI_DEFAULT;
61 static unsigned int nmi_hz = HZ;
62 unsigned int nmi_perfctr_msr;   /* the MSR to reset in NMI handler */
63
64 /* Note that these events don't tick when the CPU idles. This means
65    the frequency varies with CPU load. */
66
67 #define K7_EVNTSEL_ENABLE       (1 << 22)
68 #define K7_EVNTSEL_INT          (1 << 20)
69 #define K7_EVNTSEL_OS           (1 << 17)
70 #define K7_EVNTSEL_USR          (1 << 16)
71 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING    0x76
72 #define K7_NMI_EVENT            K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
73
74 #define P6_EVNTSEL0_ENABLE      (1 << 22)
75 #define P6_EVNTSEL_INT          (1 << 20)
76 #define P6_EVNTSEL_OS           (1 << 17)
77 #define P6_EVNTSEL_USR          (1 << 16)
78 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED  0x79
79 #define P6_NMI_EVENT            P6_EVENT_CPU_CLOCKS_NOT_HALTED
80
81 /* Run after command line and cpu_init init, but before all other checks */
82 void __init nmi_watchdog_default(void)
83 {
84         if (nmi_watchdog != NMI_DEFAULT)
85                 return;
86
87         /* For some reason the IO APIC watchdog doesn't work on the AMD
88            8111 chipset. For now switch to local APIC mode using
89            perfctr0 there.  On Intel CPUs we don't have code to handle
90            the perfctr and the IO-APIC seems to work, so use that.  */
91
92         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
93                 nmi_watchdog = NMI_LOCAL_APIC; 
94                 printk(KERN_INFO 
95               "Using local APIC NMI watchdog using perfctr0\n");
96         } else {
97                 printk(KERN_INFO "Using IO APIC NMI watchdog\n");
98                 nmi_watchdog = NMI_IO_APIC;
99         }
100 }
101
102 /* Why is there no CPUID flag for this? */
103 static __init int cpu_has_lapic(void)
104 {
105         switch (boot_cpu_data.x86_vendor) { 
106         case X86_VENDOR_INTEL:
107         case X86_VENDOR_AMD: 
108                 return boot_cpu_data.x86 >= 6; 
109         /* .... add more cpus here or find a different way to figure this out. */       
110         default:
111                 return 0;
112         }       
113 }
114
115 int __init check_nmi_watchdog (void)
116 {
117         int counts[NR_CPUS];
118         int cpu;
119
120         if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic())  {
121                 nmi_watchdog = NMI_NONE;
122                 return -1; 
123         }       
124
125         printk(KERN_INFO "testing NMI watchdog ... ");
126
127         for (cpu = 0; cpu < NR_CPUS; cpu++)
128                 counts[cpu] = cpu_pda[cpu].__nmi_count; 
129         local_irq_enable();
130         mdelay((10*1000)/nmi_hz); // wait 10 ticks
131
132         for (cpu = 0; cpu < NR_CPUS; cpu++) {
133 #ifdef CONFIG_SMP
134                 /* Check cpu_callin_map here because that is set
135                    after the timer is started. */
136                 if (!cpu_isset(cpu, cpu_callin_map))
137                         continue;
138 #endif
139                 if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
140                         printk("CPU#%d: NMI appears to be stuck (%d)!\n", 
141                                cpu,
142                                cpu_pda[cpu].__nmi_count);
143                         nmi_active = 0;
144                         lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
145                         return -1;
146                 }
147         }
148         printk("OK.\n");
149
150         /* now that we know it works we can reduce NMI frequency to
151            something more reasonable; makes a difference in some configs */
152         if (nmi_watchdog == NMI_LOCAL_APIC)
153                 nmi_hz = 1;
154
155         return 0;
156 }
157
158 int __init setup_nmi_watchdog(char *str)
159 {
160         int nmi;
161
162         if (!strncmp(str,"panic",5)) {
163                 panic_on_timeout = 1;
164                 str = strchr(str, ',');
165                 if (!str)
166                         return 1;
167                 ++str;
168         }
169
170         get_option(&str, &nmi);
171
172         if (nmi >= NMI_INVALID)
173                 return 0;
174                 nmi_watchdog = nmi;
175         return 1;
176 }
177
178 __setup("nmi_watchdog=", setup_nmi_watchdog);
179
180 static void disable_lapic_nmi_watchdog(void)
181 {
182         if (nmi_active <= 0)
183                 return;
184         switch (boot_cpu_data.x86_vendor) {
185         case X86_VENDOR_AMD:
186                 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
187                 break;
188         case X86_VENDOR_INTEL:
189                 wrmsr(MSR_IA32_EVNTSEL0, 0, 0);
190                 break;
191         }
192         nmi_active = -1;
193         /* tell do_nmi() and others that we're not active any more */
194         nmi_watchdog = 0;
195 }
196
197 static void enable_lapic_nmi_watchdog(void)
198 {
199         if (nmi_active < 0) {
200                 nmi_watchdog = NMI_LOCAL_APIC;
201                 setup_apic_nmi_watchdog();
202         }
203 }
204
205 int reserve_lapic_nmi(void)
206 {
207         unsigned int old_owner;
208
209         spin_lock(&lapic_nmi_owner_lock);
210         old_owner = lapic_nmi_owner;
211         lapic_nmi_owner |= LAPIC_NMI_RESERVED;
212         spin_unlock(&lapic_nmi_owner_lock);
213         if (old_owner & LAPIC_NMI_RESERVED)
214                 return -EBUSY;
215         if (old_owner & LAPIC_NMI_WATCHDOG)
216                 disable_lapic_nmi_watchdog();
217         return 0;
218 }
219
220 void release_lapic_nmi(void)
221 {
222         unsigned int new_owner;
223
224         spin_lock(&lapic_nmi_owner_lock);
225         new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
226         lapic_nmi_owner = new_owner;
227         spin_unlock(&lapic_nmi_owner_lock);
228         if (new_owner & LAPIC_NMI_WATCHDOG)
229                 enable_lapic_nmi_watchdog();
230 }
231
232 void disable_timer_nmi_watchdog(void)
233 {
234         if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
235                 return;
236
237         disable_irq(0);
238         unset_nmi_callback();
239         nmi_active = -1;
240         nmi_watchdog = NMI_NONE;
241 }
242
243 void enable_timer_nmi_watchdog(void)
244 {
245         if (nmi_active < 0) {
246                 nmi_watchdog = NMI_IO_APIC;
247                 touch_nmi_watchdog();
248                 nmi_active = 1;
249                 enable_irq(0);
250         }
251 }
252
253 #ifdef CONFIG_PM
254
255 static int nmi_pm_active; /* nmi_active before suspend */
256
257 static int lapic_nmi_suspend(struct sys_device *dev, u32 state)
258 {
259         nmi_pm_active = nmi_active;
260         disable_lapic_nmi_watchdog();
261         return 0;
262 }
263
264 static int lapic_nmi_resume(struct sys_device *dev)
265 {
266         if (nmi_pm_active > 0)
267         enable_lapic_nmi_watchdog();
268         return 0;
269 }
270
271 static struct sysdev_class nmi_sysclass = {
272         set_kset_name("lapic_nmi"),
273         .resume         = lapic_nmi_resume,
274         .suspend        = lapic_nmi_suspend,
275 };
276
277 static struct sys_device device_lapic_nmi = {
278         .id             = 0,
279         .cls    = &nmi_sysclass,
280 };
281
282 static int __init init_lapic_nmi_sysfs(void)
283 {
284         int error;
285
286         if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
287                 return 0;
288
289         error = sysdev_class_register(&nmi_sysclass);
290         if (!error)
291                 error = sysdev_register(&device_lapic_nmi);
292         return error;
293 }
294 /* must come after the local APIC's device_initcall() */
295 late_initcall(init_lapic_nmi_sysfs);
296
297 #endif  /* CONFIG_PM */
298
299 /*
300  * Activate the NMI watchdog via the local APIC.
301  * Original code written by Keith Owens.
302  */
303
304 static void setup_k7_watchdog(void)
305 {
306         int i;
307         unsigned int evntsel;
308
309         /* No check, so can start with slow frequency */
310         nmi_hz = 1; 
311
312         /* XXX should check these in EFER */
313
314         nmi_perfctr_msr = MSR_K7_PERFCTR0;
315
316         for(i = 0; i < 4; ++i) {
317                 /* Simulator may not support it */
318                 if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL))
319                         return;
320                 wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
321         }
322
323         evntsel = K7_EVNTSEL_INT
324                 | K7_EVNTSEL_OS
325                 | K7_EVNTSEL_USR
326                 | K7_NMI_EVENT;
327
328         wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
329         wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz*1000) / nmi_hz);
330         apic_write(APIC_LVTPC, APIC_DM_NMI);
331         evntsel |= K7_EVNTSEL_ENABLE;
332         wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
333 }
334
335 void setup_apic_nmi_watchdog(void)
336 {
337         switch (boot_cpu_data.x86_vendor) {
338         case X86_VENDOR_AMD:
339                 if (boot_cpu_data.x86 != 15)
340                         return;
341                 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
342                         return;
343                 setup_k7_watchdog();
344                 break;
345         default:
346                 return;
347         }
348         lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
349         nmi_active = 1;
350 }
351
352 /*
353  * the best way to detect whether a CPU has a 'hard lockup' problem
354  * is to check it's local APIC timer IRQ counts. If they are not
355  * changing then that CPU has some problem.
356  *
357  * as these watchdog NMI IRQs are generated on every CPU, we only
358  * have to check the current processor.
359  *
360  * since NMIs don't listen to _any_ locks, we have to be extremely
361  * careful not to rely on unsafe variables. The printk might lock
362  * up though, so we have to break up any console locks first ...
363  * [when there will be more tty-related locks, break them up
364  *  here too!]
365  */
366
367 static unsigned int
368         last_irq_sums [NR_CPUS],
369         alert_counter [NR_CPUS];
370
371 void touch_nmi_watchdog (void)
372 {
373         int i;
374
375         /*
376          * Just reset the alert counters, (other CPUs might be
377          * spinning on locks we hold):
378          */
379         for (i = 0; i < NR_CPUS; i++)
380                 alert_counter[i] = 0;
381 }
382
383 void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
384 {
385         int sum, cpu;
386
387         cpu = safe_smp_processor_id();
388         sum = read_pda(apic_timer_irqs);
389         if (last_irq_sums[cpu] == sum) {
390                 /*
391                  * Ayiee, looks like this CPU is stuck ...
392                  * wait a few IRQs (5 seconds) before doing the oops ...
393                  */
394                 alert_counter[cpu]++;
395                 if (alert_counter[cpu] == 5*nmi_hz) {
396                         if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
397                                                         == NOTIFY_STOP) {
398                                 alert_counter[cpu] = 0; 
399                                 return;
400                         } 
401                         die_nmi("NMI Watchdog detected LOCKUP on CPU%d", regs);
402                 }
403         } else {
404                 last_irq_sums[cpu] = sum;
405                 alert_counter[cpu] = 0;
406         }
407         if (nmi_perfctr_msr)
408                 wrmsr(nmi_perfctr_msr, -(cpu_khz/nmi_hz*1000), -1);
409 }
410
411 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
412 {
413         return 0;
414 }
415  
416 static nmi_callback_t nmi_callback = dummy_nmi_callback;
417  
418 asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
419 {
420         int cpu = safe_smp_processor_id();
421
422         nmi_enter();
423         add_pda(__nmi_count,1);
424         if (!nmi_callback(regs, cpu))
425                 default_do_nmi(regs);
426         nmi_exit();
427 }
428
429 void set_nmi_callback(nmi_callback_t callback)
430 {
431         nmi_callback = callback;
432 }
433
434 void unset_nmi_callback(void)
435 {
436         nmi_callback = dummy_nmi_callback;
437 }
438
439 #ifdef CONFIG_SYSCTL
440
441 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
442 {
443         unsigned char reason = get_nmi_reason();
444         char buf[64];
445
446         if (!(reason & 0xc0)) {
447                 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
448                 die_nmi(buf,regs);
449         }
450         return 0;
451 }
452
453 /*
454  * proc handler for /proc/sys/kernel/unknown_nmi_panic
455  */
456 int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
457                         void __user *buffer, size_t *length, loff_t *ppos)
458 {
459         int old_state;
460
461         old_state = unknown_nmi_panic;
462         proc_dointvec(table, write, file, buffer, length, ppos);
463         if (!!old_state == !!unknown_nmi_panic)
464                 return 0;
465
466         if (unknown_nmi_panic) {
467                 if (reserve_lapic_nmi() < 0) {
468                         unknown_nmi_panic = 0;
469                         return -EBUSY;
470                 } else {
471                         set_nmi_callback(unknown_nmi_panic_callback);
472                 }
473         } else {
474                 release_lapic_nmi();
475                 unset_nmi_callback();
476         }
477         return 0;
478 }
479
480 #endif
481
482 EXPORT_SYMBOL(nmi_active);
483 EXPORT_SYMBOL(nmi_watchdog);
484 EXPORT_SYMBOL(reserve_lapic_nmi);
485 EXPORT_SYMBOL(release_lapic_nmi);
486 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
487 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
488 EXPORT_SYMBOL(touch_nmi_watchdog);