x86, cpu: Make APERF/MPERF a normal table-driven flag
[linux-2.6.git] / arch / x86 / kernel / cpu / addon_cpuid_features.c
1 /*
2  *      Routines to indentify additional cpu features that are scattered in
3  *      cpuid space.
4  */
5 #include <linux/cpu.h>
6
7 #include <asm/pat.h>
8 #include <asm/processor.h>
9
10 #include <asm/apic.h>
11
12 struct cpuid_bit {
13         u16 feature;
14         u8 reg;
15         u8 bit;
16         u32 level;
17 };
18
19 enum cpuid_regs {
20         CR_EAX = 0,
21         CR_ECX,
22         CR_EDX,
23         CR_EBX
24 };
25
26 void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
27 {
28         u32 max_level;
29         u32 regs[4];
30         const struct cpuid_bit *cb;
31
32         static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33                 { X86_FEATURE_IDA,              CR_EAX, 1, 0x00000006 },
34                 { X86_FEATURE_ARAT,             CR_EAX, 2, 0x00000006 },
35                 { X86_FEATURE_APERFMPERF,       CR_ECX, 0, 0x00000006 },
36                 { X86_FEATURE_CPB,              CR_EDX, 9, 0x80000007 },
37                 { X86_FEATURE_NPT,              CR_EDX, 0, 0x8000000a },
38                 { X86_FEATURE_LBRV,             CR_EDX, 1, 0x8000000a },
39                 { X86_FEATURE_SVML,             CR_EDX, 2, 0x8000000a },
40                 { X86_FEATURE_NRIPS,            CR_EDX, 3, 0x8000000a },
41                 { 0, 0, 0, 0 }
42         };
43
44         for (cb = cpuid_bits; cb->feature; cb++) {
45
46                 /* Verify that the level is valid */
47                 max_level = cpuid_eax(cb->level & 0xffff0000);
48                 if (max_level < cb->level ||
49                     max_level > (cb->level | 0xffff))
50                         continue;
51
52                 cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
53                         &regs[CR_ECX], &regs[CR_EDX]);
54
55                 if (regs[cb->reg] & (1 << cb->bit))
56                         set_cpu_cap(c, cb->feature);
57         }
58 }
59
60 /* leaf 0xb SMT level */
61 #define SMT_LEVEL       0
62
63 /* leaf 0xb sub-leaf types */
64 #define INVALID_TYPE    0
65 #define SMT_TYPE        1
66 #define CORE_TYPE       2
67
68 #define LEAFB_SUBTYPE(ecx)              (((ecx) >> 8) & 0xff)
69 #define BITS_SHIFT_NEXT_LEVEL(eax)      ((eax) & 0x1f)
70 #define LEVEL_MAX_SIBLINGS(ebx)         ((ebx) & 0xffff)
71
72 /*
73  * Check for extended topology enumeration cpuid leaf 0xb and if it
74  * exists, use it for populating initial_apicid and cpu topology
75  * detection.
76  */
77 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
78 {
79 #ifdef CONFIG_SMP
80         unsigned int eax, ebx, ecx, edx, sub_index;
81         unsigned int ht_mask_width, core_plus_mask_width;
82         unsigned int core_select_mask, core_level_siblings;
83         static bool printed;
84
85         if (c->cpuid_level < 0xb)
86                 return;
87
88         cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
89
90         /*
91          * check if the cpuid leaf 0xb is actually implemented.
92          */
93         if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
94                 return;
95
96         set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
97
98         /*
99          * initial apic id, which also represents 32-bit extended x2apic id.
100          */
101         c->initial_apicid = edx;
102
103         /*
104          * Populate HT related information from sub-leaf level 0.
105          */
106         core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
107         core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
108
109         sub_index = 1;
110         do {
111                 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
112
113                 /*
114                  * Check for the Core type in the implemented sub leaves.
115                  */
116                 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
117                         core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
118                         core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
119                         break;
120                 }
121
122                 sub_index++;
123         } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
124
125         core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
126
127         c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
128                                                  & core_select_mask;
129         c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
130         /*
131          * Reinit the apicid, now that we have extended initial_apicid.
132          */
133         c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
134
135         c->x86_max_cores = (core_level_siblings / smp_num_siblings);
136
137         if (!printed) {
138                 printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
139                        c->phys_proc_id);
140                 if (c->x86_max_cores > 1)
141                         printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
142                                c->cpu_core_id);
143                 printed = 1;
144         }
145         return;
146 #endif
147 }