]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - arch/x86/kernel/cpu/mcheck/mce_amd_64.c
x86 MCE: Fix CPU hotplug problem with multiple multicore AMD CPUs
[linux-2.6.git] / arch / x86 / kernel / cpu / mcheck / mce_amd_64.c
1 /*
2  *  (c) 2005, 2006 Advanced Micro Devices, Inc.
3  *  Your use of this code is subject to the terms and conditions of the
4  *  GNU general public license version 2. See "COPYING" or
5  *  http://www.gnu.org/licenses/gpl.html
6  *
7  *  Written by Jacob Shin - AMD, Inc.
8  *
9  *  Support : jacob.shin@amd.com
10  *
11  *  April 2006
12  *     - added support for AMD Family 0x10 processors
13  *
14  *  All MC4_MISCi registers are shared between multi-cores
15  */
16
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/kobject.h>
22 #include <linux/notifier.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/sysdev.h>
26 #include <linux/sysfs.h>
27 #include <asm/apic.h>
28 #include <asm/mce.h>
29 #include <asm/msr.h>
30 #include <asm/percpu.h>
31 #include <asm/idle.h>
32
33 #define PFX               "mce_threshold: "
34 #define VERSION           "version 1.1.1"
35 #define NR_BANKS          6
36 #define NR_BLOCKS         9
37 #define THRESHOLD_MAX     0xFFF
38 #define INT_TYPE_APIC     0x00020000
39 #define MASK_VALID_HI     0x80000000
40 #define MASK_CNTP_HI      0x40000000
41 #define MASK_LOCKED_HI    0x20000000
42 #define MASK_LVTOFF_HI    0x00F00000
43 #define MASK_COUNT_EN_HI  0x00080000
44 #define MASK_INT_TYPE_HI  0x00060000
45 #define MASK_OVERFLOW_HI  0x00010000
46 #define MASK_ERR_COUNT_HI 0x00000FFF
47 #define MASK_BLKPTR_LO    0xFF000000
48 #define MCG_XBLK_ADDR     0xC0000400
49
50 struct threshold_block {
51         unsigned int block;
52         unsigned int bank;
53         unsigned int cpu;
54         u32 address;
55         u16 interrupt_enable;
56         u16 threshold_limit;
57         struct kobject kobj;
58         struct list_head miscj;
59 };
60
61 /* defaults used early on boot */
62 static struct threshold_block threshold_defaults = {
63         .interrupt_enable = 0,
64         .threshold_limit = THRESHOLD_MAX,
65 };
66
67 struct threshold_bank {
68         struct kobject *kobj;
69         struct threshold_block *blocks;
70         cpumask_t cpus;
71 };
72 static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
73
74 #ifdef CONFIG_SMP
75 static unsigned char shared_bank[NR_BANKS] = {
76         0, 0, 0, 0, 1
77 };
78 #endif
79
80 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
81
82 /*
83  * CPU Initialization
84  */
85
86 /* must be called with correct cpu affinity */
87 static void threshold_restart_bank(struct threshold_block *b,
88                                    int reset, u16 old_limit)
89 {
90         u32 mci_misc_hi, mci_misc_lo;
91
92         rdmsr(b->address, mci_misc_lo, mci_misc_hi);
93
94         if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
95                 reset = 1;      /* limit cannot be lower than err count */
96
97         if (reset) {            /* reset err count and overflow bit */
98                 mci_misc_hi =
99                     (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
100                     (THRESHOLD_MAX - b->threshold_limit);
101         } else if (old_limit) { /* change limit w/o reset */
102                 int new_count = (mci_misc_hi & THRESHOLD_MAX) +
103                     (old_limit - b->threshold_limit);
104                 mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
105                     (new_count & THRESHOLD_MAX);
106         }
107
108         b->interrupt_enable ?
109             (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
110             (mci_misc_hi &= ~MASK_INT_TYPE_HI);
111
112         mci_misc_hi |= MASK_COUNT_EN_HI;
113         wrmsr(b->address, mci_misc_lo, mci_misc_hi);
114 }
115
116 /* cpu init entry point, called from mce.c with preempt off */
117 void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
118 {
119         unsigned int bank, block;
120         unsigned int cpu = smp_processor_id();
121         u8 lvt_off;
122         u32 low = 0, high = 0, address = 0;
123
124         for (bank = 0; bank < NR_BANKS; ++bank) {
125                 for (block = 0; block < NR_BLOCKS; ++block) {
126                         if (block == 0)
127                                 address = MSR_IA32_MC0_MISC + bank * 4;
128                         else if (block == 1) {
129                                 address = (low & MASK_BLKPTR_LO) >> 21;
130                                 if (!address)
131                                         break;
132                                 address += MCG_XBLK_ADDR;
133                         }
134                         else
135                                 ++address;
136
137                         if (rdmsr_safe(address, &low, &high))
138                                 break;
139
140                         if (!(high & MASK_VALID_HI)) {
141                                 if (block)
142                                         continue;
143                                 else
144                                         break;
145                         }
146
147                         if (!(high & MASK_CNTP_HI)  ||
148                              (high & MASK_LOCKED_HI))
149                                 continue;
150
151                         if (!block)
152                                 per_cpu(bank_map, cpu) |= (1 << bank);
153 #ifdef CONFIG_SMP
154                         if (shared_bank[bank] && c->cpu_core_id)
155                                 break;
156 #endif
157                         lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
158                                                        APIC_EILVT_MSG_FIX, 0);
159
160                         high &= ~MASK_LVTOFF_HI;
161                         high |= lvt_off << 20;
162                         wrmsr(address, low, high);
163
164                         threshold_defaults.address = address;
165                         threshold_restart_bank(&threshold_defaults, 0, 0);
166                 }
167         }
168 }
169
170 /*
171  * APIC Interrupt Handler
172  */
173
174 /*
175  * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
176  * the interrupt goes off when error_count reaches threshold_limit.
177  * the handler will simply log mcelog w/ software defined bank number.
178  */
179 asmlinkage void mce_threshold_interrupt(void)
180 {
181         unsigned int bank, block;
182         struct mce m;
183         u32 low = 0, high = 0, address = 0;
184
185         ack_APIC_irq();
186         exit_idle();
187         irq_enter();
188
189         memset(&m, 0, sizeof(m));
190         rdtscll(m.tsc);
191         m.cpu = smp_processor_id();
192
193         /* assume first bank caused it */
194         for (bank = 0; bank < NR_BANKS; ++bank) {
195                 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
196                         continue;
197                 for (block = 0; block < NR_BLOCKS; ++block) {
198                         if (block == 0)
199                                 address = MSR_IA32_MC0_MISC + bank * 4;
200                         else if (block == 1) {
201                                 address = (low & MASK_BLKPTR_LO) >> 21;
202                                 if (!address)
203                                         break;
204                                 address += MCG_XBLK_ADDR;
205                         }
206                         else
207                                 ++address;
208
209                         if (rdmsr_safe(address, &low, &high))
210                                 break;
211
212                         if (!(high & MASK_VALID_HI)) {
213                                 if (block)
214                                         continue;
215                                 else
216                                         break;
217                         }
218
219                         if (!(high & MASK_CNTP_HI)  ||
220                              (high & MASK_LOCKED_HI))
221                                 continue;
222
223                         /* Log the machine check that caused the threshold
224                            event. */
225                         do_machine_check(NULL, 0);
226
227                         if (high & MASK_OVERFLOW_HI) {
228                                 rdmsrl(address, m.misc);
229                                 rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
230                                        m.status);
231                                 m.bank = K8_MCE_THRESHOLD_BASE
232                                        + bank * NR_BLOCKS
233                                        + block;
234                                 mce_log(&m);
235                                 goto out;
236                         }
237                 }
238         }
239 out:
240         add_pda(irq_threshold_count, 1);
241         irq_exit();
242 }
243
244 /*
245  * Sysfs Interface
246  */
247
248 struct threshold_attr {
249         struct attribute attr;
250         ssize_t(*show) (struct threshold_block *, char *);
251         ssize_t(*store) (struct threshold_block *, const char *, size_t count);
252 };
253
254 static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
255                                            cpumask_t *newmask)
256 {
257         *oldmask = current->cpus_allowed;
258         cpus_clear(*newmask);
259         cpu_set(cpu, *newmask);
260         set_cpus_allowed_ptr(current, newmask);
261 }
262
263 static void affinity_restore(const cpumask_t *oldmask)
264 {
265         set_cpus_allowed_ptr(current, oldmask);
266 }
267
268 #define SHOW_FIELDS(name)                                           \
269 static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
270 {                                                                   \
271         return sprintf(buf, "%lx\n", (unsigned long) b->name);      \
272 }
273 SHOW_FIELDS(interrupt_enable)
274 SHOW_FIELDS(threshold_limit)
275
276 static ssize_t store_interrupt_enable(struct threshold_block *b,
277                                       const char *buf, size_t count)
278 {
279         char *end;
280         cpumask_t oldmask, newmask;
281         unsigned long new = simple_strtoul(buf, &end, 0);
282         if (end == buf)
283                 return -EINVAL;
284         b->interrupt_enable = !!new;
285
286         affinity_set(b->cpu, &oldmask, &newmask);
287         threshold_restart_bank(b, 0, 0);
288         affinity_restore(&oldmask);
289
290         return end - buf;
291 }
292
293 static ssize_t store_threshold_limit(struct threshold_block *b,
294                                      const char *buf, size_t count)
295 {
296         char *end;
297         cpumask_t oldmask, newmask;
298         u16 old;
299         unsigned long new = simple_strtoul(buf, &end, 0);
300         if (end == buf)
301                 return -EINVAL;
302         if (new > THRESHOLD_MAX)
303                 new = THRESHOLD_MAX;
304         if (new < 1)
305                 new = 1;
306         old = b->threshold_limit;
307         b->threshold_limit = new;
308
309         affinity_set(b->cpu, &oldmask, &newmask);
310         threshold_restart_bank(b, 0, old);
311         affinity_restore(&oldmask);
312
313         return end - buf;
314 }
315
316 static ssize_t show_error_count(struct threshold_block *b, char *buf)
317 {
318         u32 high, low;
319         cpumask_t oldmask, newmask;
320         affinity_set(b->cpu, &oldmask, &newmask);
321         rdmsr(b->address, low, high);
322         affinity_restore(&oldmask);
323         return sprintf(buf, "%x\n",
324                        (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
325 }
326
327 static ssize_t store_error_count(struct threshold_block *b,
328                                  const char *buf, size_t count)
329 {
330         cpumask_t oldmask, newmask;
331         affinity_set(b->cpu, &oldmask, &newmask);
332         threshold_restart_bank(b, 1, 0);
333         affinity_restore(&oldmask);
334         return 1;
335 }
336
337 #define THRESHOLD_ATTR(_name,_mode,_show,_store) {            \
338         .attr = {.name = __stringify(_name), .mode = _mode }, \
339         .show = _show,                                        \
340         .store = _store,                                      \
341 };
342
343 #define RW_ATTR(name)                                           \
344 static struct threshold_attr name =                             \
345         THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
346
347 RW_ATTR(interrupt_enable);
348 RW_ATTR(threshold_limit);
349 RW_ATTR(error_count);
350
351 static struct attribute *default_attrs[] = {
352         &interrupt_enable.attr,
353         &threshold_limit.attr,
354         &error_count.attr,
355         NULL
356 };
357
358 #define to_block(k) container_of(k, struct threshold_block, kobj)
359 #define to_attr(a) container_of(a, struct threshold_attr, attr)
360
361 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
362 {
363         struct threshold_block *b = to_block(kobj);
364         struct threshold_attr *a = to_attr(attr);
365         ssize_t ret;
366         ret = a->show ? a->show(b, buf) : -EIO;
367         return ret;
368 }
369
370 static ssize_t store(struct kobject *kobj, struct attribute *attr,
371                      const char *buf, size_t count)
372 {
373         struct threshold_block *b = to_block(kobj);
374         struct threshold_attr *a = to_attr(attr);
375         ssize_t ret;
376         ret = a->store ? a->store(b, buf, count) : -EIO;
377         return ret;
378 }
379
380 static struct sysfs_ops threshold_ops = {
381         .show = show,
382         .store = store,
383 };
384
385 static struct kobj_type threshold_ktype = {
386         .sysfs_ops = &threshold_ops,
387         .default_attrs = default_attrs,
388 };
389
390 static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
391                                                unsigned int bank,
392                                                unsigned int block,
393                                                u32 address)
394 {
395         int err;
396         u32 low, high;
397         struct threshold_block *b = NULL;
398
399         if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
400                 return 0;
401
402         if (rdmsr_safe(address, &low, &high))
403                 return 0;
404
405         if (!(high & MASK_VALID_HI)) {
406                 if (block)
407                         goto recurse;
408                 else
409                         return 0;
410         }
411
412         if (!(high & MASK_CNTP_HI)  ||
413              (high & MASK_LOCKED_HI))
414                 goto recurse;
415
416         b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
417         if (!b)
418                 return -ENOMEM;
419
420         b->block = block;
421         b->bank = bank;
422         b->cpu = cpu;
423         b->address = address;
424         b->interrupt_enable = 0;
425         b->threshold_limit = THRESHOLD_MAX;
426
427         INIT_LIST_HEAD(&b->miscj);
428
429         if (per_cpu(threshold_banks, cpu)[bank]->blocks)
430                 list_add(&b->miscj,
431                          &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
432         else
433                 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
434
435         err = kobject_init_and_add(&b->kobj, &threshold_ktype,
436                                    per_cpu(threshold_banks, cpu)[bank]->kobj,
437                                    "misc%i", block);
438         if (err)
439                 goto out_free;
440 recurse:
441         if (!block) {
442                 address = (low & MASK_BLKPTR_LO) >> 21;
443                 if (!address)
444                         return 0;
445                 address += MCG_XBLK_ADDR;
446         } else
447                 ++address;
448
449         err = allocate_threshold_blocks(cpu, bank, ++block, address);
450         if (err)
451                 goto out_free;
452
453         if (b)
454                 kobject_uevent(&b->kobj, KOBJ_ADD);
455
456         return err;
457
458 out_free:
459         if (b) {
460                 kobject_put(&b->kobj);
461                 kfree(b);
462         }
463         return err;
464 }
465
466 /* symlinks sibling shared banks to first core.  first core owns dir/files. */
467 static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
468 {
469         int i, err = 0;
470         struct threshold_bank *b = NULL;
471         cpumask_t oldmask, newmask;
472         char name[32];
473
474         sprintf(name, "threshold_bank%i", bank);
475
476 #ifdef CONFIG_SMP
477         if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
478                 i = first_cpu(per_cpu(cpu_core_map, cpu));
479
480                 /* first core not up yet */
481                 if (cpu_data(i).cpu_core_id)
482                         goto out;
483
484                 /* already linked */
485                 if (per_cpu(threshold_banks, cpu)[bank])
486                         goto out;
487
488                 b = per_cpu(threshold_banks, i)[bank];
489
490                 if (!b)
491                         goto out;
492
493                 err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
494                                         b->kobj, name);
495                 if (err)
496                         goto out;
497
498                 b->cpus = per_cpu(cpu_core_map, cpu);
499                 per_cpu(threshold_banks, cpu)[bank] = b;
500                 goto out;
501         }
502 #endif
503
504         b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
505         if (!b) {
506                 err = -ENOMEM;
507                 goto out;
508         }
509
510         b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
511         if (!b->kobj)
512                 goto out_free;
513
514 #ifndef CONFIG_SMP
515         b->cpus = CPU_MASK_ALL;
516 #else
517         b->cpus = per_cpu(cpu_core_map, cpu);
518 #endif
519
520         per_cpu(threshold_banks, cpu)[bank] = b;
521
522         affinity_set(cpu, &oldmask, &newmask);
523         err = allocate_threshold_blocks(cpu, bank, 0,
524                                         MSR_IA32_MC0_MISC + bank * 4);
525         affinity_restore(&oldmask);
526
527         if (err)
528                 goto out_free;
529
530         for_each_cpu_mask_nr(i, b->cpus) {
531                 if (i == cpu)
532                         continue;
533
534                 err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
535                                         b->kobj, name);
536                 if (err)
537                         goto out;
538
539                 per_cpu(threshold_banks, i)[bank] = b;
540         }
541
542         goto out;
543
544 out_free:
545         per_cpu(threshold_banks, cpu)[bank] = NULL;
546         kfree(b);
547 out:
548         return err;
549 }
550
551 /* create dir/files for all valid threshold banks */
552 static __cpuinit int threshold_create_device(unsigned int cpu)
553 {
554         unsigned int bank;
555         int err = 0;
556
557         for (bank = 0; bank < NR_BANKS; ++bank) {
558                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
559                         continue;
560                 err = threshold_create_bank(cpu, bank);
561                 if (err)
562                         goto out;
563         }
564 out:
565         return err;
566 }
567
568 /*
569  * let's be hotplug friendly.
570  * in case of multiple core processors, the first core always takes ownership
571  *   of shared sysfs dir/files, and rest of the cores will be symlinked to it.
572  */
573
574 static void deallocate_threshold_block(unsigned int cpu,
575                                                  unsigned int bank)
576 {
577         struct threshold_block *pos = NULL;
578         struct threshold_block *tmp = NULL;
579         struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
580
581         if (!head)
582                 return;
583
584         list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
585                 kobject_put(&pos->kobj);
586                 list_del(&pos->miscj);
587                 kfree(pos);
588         }
589
590         kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
591         per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
592 }
593
594 static void threshold_remove_bank(unsigned int cpu, int bank)
595 {
596         int i = 0;
597         struct threshold_bank *b;
598         char name[32];
599
600         b = per_cpu(threshold_banks, cpu)[bank];
601
602         if (!b)
603                 return;
604
605         if (!b->blocks)
606                 goto free_out;
607
608         sprintf(name, "threshold_bank%i", bank);
609
610 #ifdef CONFIG_SMP
611         /* sibling symlink */
612         if (shared_bank[bank] && b->blocks->cpu != cpu) {
613                 sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
614                 per_cpu(threshold_banks, cpu)[bank] = NULL;
615                 return;
616         }
617 #endif
618
619         /* remove all sibling symlinks before unregistering */
620         for_each_cpu_mask_nr(i, b->cpus) {
621                 if (i == cpu)
622                         continue;
623
624                 sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
625                 per_cpu(threshold_banks, i)[bank] = NULL;
626         }
627
628         deallocate_threshold_block(cpu, bank);
629
630 free_out:
631         kobject_del(b->kobj);
632         kobject_put(b->kobj);
633         kfree(b);
634         per_cpu(threshold_banks, cpu)[bank] = NULL;
635 }
636
637 static void threshold_remove_device(unsigned int cpu)
638 {
639         unsigned int bank;
640
641         for (bank = 0; bank < NR_BANKS; ++bank) {
642                 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
643                         continue;
644                 threshold_remove_bank(cpu, bank);
645         }
646 }
647
648 /* get notified when a cpu comes on/off */
649 static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action,
650                                                      unsigned int cpu)
651 {
652         if (cpu >= NR_CPUS)
653                 return;
654
655         switch (action) {
656         case CPU_ONLINE:
657         case CPU_ONLINE_FROZEN:
658                 threshold_create_device(cpu);
659                 break;
660         case CPU_DEAD:
661         case CPU_DEAD_FROZEN:
662                 threshold_remove_device(cpu);
663                 break;
664         default:
665                 break;
666         }
667 }
668
669 static __init int threshold_init_device(void)
670 {
671         unsigned lcpu = 0;
672
673         /* to hit CPUs online before the notifier is up */
674         for_each_online_cpu(lcpu) {
675                 int err = threshold_create_device(lcpu);
676                 if (err)
677                         return err;
678         }
679         threshold_cpu_callback = amd_64_threshold_cpu_callback;
680         return 0;
681 }
682
683 device_initcall(threshold_init_device);