fd1fc1902a4744d6e77f6ee3d9a8cc6cc0bfc6e1
[linux-2.6.git] / arch / x86 / kernel / cpu / addon_cpuid_features.c
1 /*
2  *      Routines to indentify additional cpu features that are scattered in
3  *      cpuid space.
4  */
5 #include <linux/cpu.h>
6
7 #include <asm/pat.h>
8 #include <asm/processor.h>
9
10 #include <asm/apic.h>
11
12 struct cpuid_bit {
13         u16 feature;
14         u8 reg;
15         u8 bit;
16         u32 level;
17 };
18
19 enum cpuid_regs {
20         CR_EAX = 0,
21         CR_ECX,
22         CR_EDX,
23         CR_EBX
24 };
25
26 void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
27 {
28         u32 max_level;
29         u32 regs[4];
30         const struct cpuid_bit *cb;
31
32         static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33                 { X86_FEATURE_IDA,   CR_EAX, 1, 0x00000006 },
34                 { X86_FEATURE_ARAT,  CR_EAX, 2, 0x00000006 },
35                 { X86_FEATURE_CPB,   CR_EDX, 9, 0x80000007 },
36                 { X86_FEATURE_NPT,   CR_EDX, 0, 0x8000000a },
37                 { X86_FEATURE_LBRV,  CR_EDX, 1, 0x8000000a },
38                 { X86_FEATURE_SVML,  CR_EDX, 2, 0x8000000a },
39                 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
40                 { 0, 0, 0, 0 }
41         };
42
43         for (cb = cpuid_bits; cb->feature; cb++) {
44
45                 /* Verify that the level is valid */
46                 max_level = cpuid_eax(cb->level & 0xffff0000);
47                 if (max_level < cb->level ||
48                     max_level > (cb->level | 0xffff))
49                         continue;
50
51                 cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
52                         &regs[CR_ECX], &regs[CR_EDX]);
53
54                 if (regs[cb->reg] & (1 << cb->bit))
55                         set_cpu_cap(c, cb->feature);
56         }
57
58         /*
59          * common AMD/Intel features
60          */
61         if (c->cpuid_level >= 6) {
62                 if (cpuid_ecx(6) & 0x1)
63                         set_cpu_cap(c, X86_FEATURE_APERFMPERF);
64         }
65 }
66
67 /* leaf 0xb SMT level */
68 #define SMT_LEVEL       0
69
70 /* leaf 0xb sub-leaf types */
71 #define INVALID_TYPE    0
72 #define SMT_TYPE        1
73 #define CORE_TYPE       2
74
75 #define LEAFB_SUBTYPE(ecx)              (((ecx) >> 8) & 0xff)
76 #define BITS_SHIFT_NEXT_LEVEL(eax)      ((eax) & 0x1f)
77 #define LEVEL_MAX_SIBLINGS(ebx)         ((ebx) & 0xffff)
78
79 /*
80  * Check for extended topology enumeration cpuid leaf 0xb and if it
81  * exists, use it for populating initial_apicid and cpu topology
82  * detection.
83  */
84 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
85 {
86 #ifdef CONFIG_SMP
87         unsigned int eax, ebx, ecx, edx, sub_index;
88         unsigned int ht_mask_width, core_plus_mask_width;
89         unsigned int core_select_mask, core_level_siblings;
90         static bool printed;
91
92         if (c->cpuid_level < 0xb)
93                 return;
94
95         cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
96
97         /*
98          * check if the cpuid leaf 0xb is actually implemented.
99          */
100         if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
101                 return;
102
103         set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
104
105         /*
106          * initial apic id, which also represents 32-bit extended x2apic id.
107          */
108         c->initial_apicid = edx;
109
110         /*
111          * Populate HT related information from sub-leaf level 0.
112          */
113         core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
114         core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
115
116         sub_index = 1;
117         do {
118                 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
119
120                 /*
121                  * Check for the Core type in the implemented sub leaves.
122                  */
123                 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
124                         core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
125                         core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
126                         break;
127                 }
128
129                 sub_index++;
130         } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
131
132         core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
133
134         c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
135                                                  & core_select_mask;
136         c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
137         /*
138          * Reinit the apicid, now that we have extended initial_apicid.
139          */
140         c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
141
142         c->x86_max_cores = (core_level_siblings / smp_num_siblings);
143
144         if (!printed) {
145                 printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
146                        c->phys_proc_id);
147                 if (c->x86_max_cores > 1)
148                         printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
149                                c->cpu_core_id);
150                 printed = 1;
151         }
152         return;
153 #endif
154 }