blob: 12cd823c8d038008f8abe77785b4bb475dd3cf20 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Ingo Molnarcdcf7722008-07-28 16:20:08 +02002 * Routines to indentify caches on Intel CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Ingo Molnarcdcf7722008-07-28 16:20:08 +02004 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
Alan Cox8bdbd962009-07-04 00:35:45 +01006 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
Andi Kleen67cddd92007-07-21 17:10:03 +02007 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080015#include <linux/sched.h>
Mark Langsdorfa24e8d32008-07-22 13:06:02 -050016#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <asm/processor.h>
Alan Cox8bdbd962009-07-04 00:35:45 +010019#include <linux/smp.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020020#include <asm/amd_nb.h>
Borislav Petkovdcf39da2010-01-22 16:01:05 +010021#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
Alan Cox8bdbd962009-07-04 00:35:45 +010029struct _cache_table {
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
Dave Jones2ca49b22010-01-04 09:47:35 -050035#define MB(x) ((x) * 1024)
36
Alan Cox8bdbd962009-07-04 00:35:45 +010037/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
Jan Beulich02dde8b2009-03-12 12:08:49 +000040static const struct _cache_table __cpuinitconst cache_table[] =
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
Dave Jones9a8ecae2009-01-31 20:12:14 -050044 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
Dave Jones9a8ecae2009-01-31 20:12:14 -050047 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Dave Jones2ca49b22010-01-04 09:47:35 -050050 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
51 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
54 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Dave Jones6fe8f472006-01-26 22:40:40 -080056 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
58 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Dave Jones6fe8f472006-01-26 22:40:40 -080059 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
60 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Jason Gaston04fa11e2007-12-21 01:27:19 +010061 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
63 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
64 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
Dave Jones2ca49b22010-01-04 09:47:35 -050065 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
66 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
67 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
68 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
69 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
70 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
71 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
77 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
80 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
81 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
Dave Jones6fe8f472006-01-26 22:40:40 -080082 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
Dave Jones2ca49b22010-01-04 09:47:35 -050083 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
84 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
86 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
89 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
90 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
91 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
92 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
93 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
94 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
95 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
96 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
97 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
98 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
99 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
100 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
101 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
102 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
103 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
104 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
105 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
106 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
107 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
108 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
109 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
110 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 { 0x00, 0, 0}
112};
113
114
Alan Cox8bdbd962009-07-04 00:35:45 +0100115enum _cache_type {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 CACHE_TYPE_NULL = 0,
117 CACHE_TYPE_DATA = 1,
118 CACHE_TYPE_INST = 2,
119 CACHE_TYPE_UNIFIED = 3
120};
121
122union _cpuid4_leaf_eax {
123 struct {
124 enum _cache_type type:5;
125 unsigned int level:3;
126 unsigned int is_self_initializing:1;
127 unsigned int is_fully_associative:1;
128 unsigned int reserved:4;
129 unsigned int num_threads_sharing:12;
130 unsigned int num_cores_on_die:6;
131 } split;
132 u32 full;
133};
134
135union _cpuid4_leaf_ebx {
136 struct {
137 unsigned int coherency_line_size:12;
138 unsigned int physical_line_partition:10;
139 unsigned int ways_of_associativity:10;
140 } split;
141 u32 full;
142};
143
144union _cpuid4_leaf_ecx {
145 struct {
146 unsigned int number_of_sets:32;
147 } split;
148 u32 full;
149};
150
Borislav Petkov9350f982010-04-22 16:07:00 +0200151struct amd_l3_cache {
152 struct pci_dev *dev;
153 bool can_disable;
154 unsigned indices;
155 u8 subcaches[4];
156};
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158struct _cpuid4_info {
159 union _cpuid4_leaf_eax eax;
160 union _cpuid4_leaf_ebx ebx;
161 union _cpuid4_leaf_ecx ecx;
162 unsigned long size;
Borislav Petkov9350f982010-04-22 16:07:00 +0200163 struct amd_l3_cache *l3;
Mike Travisf9b90562009-01-10 21:58:10 -0800164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
165};
166
167/* subset of above _cpuid4_info w/o shared_cpu_map */
168struct _cpuid4_info_regs {
169 union _cpuid4_leaf_eax eax;
170 union _cpuid4_leaf_ebx ebx;
171 union _cpuid4_leaf_ecx ecx;
172 unsigned long size;
Borislav Petkov9350f982010-04-22 16:07:00 +0200173 struct amd_l3_cache *l3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174};
175
Andi Kleen240cd6a802006-06-26 13:56:13 +0200176unsigned short num_cache_leaves;
177
178/* AMD doesn't have CPUID4. Emulate it here to report the same
179 information to the user. This makes some assumptions about the machine:
Andi Kleen67cddd92007-07-21 17:10:03 +0200180 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
Andi Kleen240cd6a802006-06-26 13:56:13 +0200181
182 In theory the TLBs could be reported as fake type (they are in "dummy").
183 Maybe later */
184union l1_cache {
185 struct {
Alan Cox8bdbd962009-07-04 00:35:45 +0100186 unsigned line_size:8;
187 unsigned lines_per_tag:8;
188 unsigned assoc:8;
189 unsigned size_in_kb:8;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200190 };
191 unsigned val;
192};
193
194union l2_cache {
195 struct {
Alan Cox8bdbd962009-07-04 00:35:45 +0100196 unsigned line_size:8;
197 unsigned lines_per_tag:4;
198 unsigned assoc:4;
199 unsigned size_in_kb:16;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200200 };
201 unsigned val;
202};
203
Andi Kleen67cddd92007-07-21 17:10:03 +0200204union l3_cache {
205 struct {
Alan Cox8bdbd962009-07-04 00:35:45 +0100206 unsigned line_size:8;
207 unsigned lines_per_tag:4;
208 unsigned assoc:4;
209 unsigned res:2;
210 unsigned size_encoded:14;
Andi Kleen67cddd92007-07-21 17:10:03 +0200211 };
212 unsigned val;
213};
214
Jan Beulich02dde8b2009-03-12 12:08:49 +0000215static const unsigned short __cpuinitconst assocs[] = {
Andreas Herrmann6265ff12009-04-09 15:47:10 +0200216 [1] = 1,
217 [2] = 2,
218 [4] = 4,
219 [6] = 8,
220 [8] = 16,
221 [0xa] = 32,
222 [0xb] = 48,
Andi Kleen67cddd92007-07-21 17:10:03 +0200223 [0xc] = 64,
Andreas Herrmann6265ff12009-04-09 15:47:10 +0200224 [0xd] = 96,
225 [0xe] = 128,
226 [0xf] = 0xffff /* fully associative - no way to show this currently */
Andi Kleen67cddd92007-07-21 17:10:03 +0200227};
228
Jan Beulich02dde8b2009-03-12 12:08:49 +0000229static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
230static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
Andi Kleen240cd6a802006-06-26 13:56:13 +0200231
Ingo Molnarcdcf7722008-07-28 16:20:08 +0200232static void __cpuinit
233amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
234 union _cpuid4_leaf_ebx *ebx,
235 union _cpuid4_leaf_ecx *ecx)
Andi Kleen240cd6a802006-06-26 13:56:13 +0200236{
237 unsigned dummy;
238 unsigned line_size, lines_per_tag, assoc, size_in_kb;
239 union l1_cache l1i, l1d;
240 union l2_cache l2;
Andi Kleen67cddd92007-07-21 17:10:03 +0200241 union l3_cache l3;
242 union l1_cache *l1 = &l1d;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200243
244 eax->full = 0;
245 ebx->full = 0;
246 ecx->full = 0;
247
248 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
Andi Kleen67cddd92007-07-21 17:10:03 +0200249 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
Andi Kleen240cd6a802006-06-26 13:56:13 +0200250
Andi Kleen67cddd92007-07-21 17:10:03 +0200251 switch (leaf) {
252 case 1:
253 l1 = &l1i;
254 case 0:
255 if (!l1->val)
256 return;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200257 assoc = assocs[l1->assoc];
Andi Kleen240cd6a802006-06-26 13:56:13 +0200258 line_size = l1->line_size;
259 lines_per_tag = l1->lines_per_tag;
260 size_in_kb = l1->size_in_kb;
Andi Kleen67cddd92007-07-21 17:10:03 +0200261 break;
262 case 2:
263 if (!l2.val)
264 return;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200265 assoc = assocs[l2.assoc];
Andi Kleen240cd6a802006-06-26 13:56:13 +0200266 line_size = l2.line_size;
267 lines_per_tag = l2.lines_per_tag;
268 /* cpu_data has errata corrections for K7 applied */
269 size_in_kb = current_cpu_data.x86_cache_size;
Andi Kleen67cddd92007-07-21 17:10:03 +0200270 break;
271 case 3:
272 if (!l3.val)
273 return;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200274 assoc = assocs[l3.assoc];
Andi Kleen67cddd92007-07-21 17:10:03 +0200275 line_size = l3.line_size;
276 lines_per_tag = l3.lines_per_tag;
277 size_in_kb = l3.size_encoded * 512;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200278 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
279 size_in_kb = size_in_kb >> 1;
280 assoc = assoc >> 1;
281 }
Andi Kleen67cddd92007-07-21 17:10:03 +0200282 break;
283 default:
284 return;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200285 }
286
Andi Kleen67cddd92007-07-21 17:10:03 +0200287 eax->split.is_self_initializing = 1;
288 eax->split.type = types[leaf];
289 eax->split.level = levels[leaf];
Andreas Herrmanna326e942009-09-03 09:41:19 +0200290 eax->split.num_threads_sharing = 0;
Andi Kleen67cddd92007-07-21 17:10:03 +0200291 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
292
293
Andreas Herrmanna326e942009-09-03 09:41:19 +0200294 if (assoc == 0xffff)
Andi Kleen240cd6a802006-06-26 13:56:13 +0200295 eax->split.is_fully_associative = 1;
296 ebx->split.coherency_line_size = line_size - 1;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200297 ebx->split.ways_of_associativity = assoc - 1;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200298 ebx->split.physical_line_partition = lines_per_tag - 1;
299 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
300 (ebx->split.ways_of_associativity + 1) - 1;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Borislav Petkovcb190602010-02-18 19:37:14 +0100303struct _cache_attr {
304 struct attribute attr;
305 ssize_t (*show)(struct _cpuid4_info *, char *);
306 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
307};
308
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200309#ifdef CONFIG_AMD_NB
Borislav Petkovba06edb2010-04-22 16:07:01 +0200310
311/*
312 * L3 cache descriptors
313 */
314static struct amd_l3_cache **__cpuinitdata l3_caches;
315
Borislav Petkov9350f982010-04-22 16:07:00 +0200316static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
Borislav Petkov048a8772010-01-22 16:01:07 +0100317{
Borislav Petkov048a8772010-01-22 16:01:07 +0100318 unsigned int sc0, sc1, sc2, sc3;
Borislav Petkovcb190602010-02-18 19:37:14 +0100319 u32 val = 0;
Borislav Petkov048a8772010-01-22 16:01:07 +0100320
Borislav Petkovba06edb2010-04-22 16:07:01 +0200321 pci_read_config_dword(l3->dev, 0x1C4, &val);
Borislav Petkov048a8772010-01-22 16:01:07 +0100322
323 /* calculate subcache sizes */
Borislav Petkov9350f982010-04-22 16:07:00 +0200324 l3->subcaches[0] = sc0 = !(val & BIT(0));
325 l3->subcaches[1] = sc1 = !(val & BIT(4));
326 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
327 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
Borislav Petkov048a8772010-01-22 16:01:07 +0100328
Borislav Petkov9350f982010-04-22 16:07:00 +0200329 l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
Borislav Petkovba06edb2010-04-22 16:07:01 +0200330}
331
332static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
333{
334 struct amd_l3_cache *l3;
335 struct pci_dev *dev = node_to_k8_nb_misc(node);
336
337 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
338 if (!l3) {
339 printk(KERN_WARNING "Error allocating L3 struct\n");
340 return NULL;
341 }
Borislav Petkov9350f982010-04-22 16:07:00 +0200342
343 l3->dev = dev;
Borislav Petkovba06edb2010-04-22 16:07:01 +0200344
345 amd_calc_l3_indices(l3);
346
347 return l3;
Borislav Petkov048a8772010-01-22 16:01:07 +0100348}
349
Borislav Petkov8cc11762010-06-02 18:18:40 +0200350static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf,
351 int index)
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500352{
Borislav Petkovba06edb2010-04-22 16:07:01 +0200353 int node;
354
Borislav Petkovb1ab1b42010-04-22 16:06:58 +0200355 if (boot_cpu_data.x86 != 0x10)
356 return;
357
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500358 if (index < 3)
359 return;
Andreas Herrmannbda869c2009-04-09 15:05:10 +0200360
Borislav Petkovb1ab1b42010-04-22 16:06:58 +0200361 /* see errata #382 and #388 */
362 if (boot_cpu_data.x86_model < 0x8)
Andreas Herrmannbda869c2009-04-09 15:05:10 +0200363 return;
364
Borislav Petkovb1ab1b42010-04-22 16:06:58 +0200365 if ((boot_cpu_data.x86_model == 0x8 ||
366 boot_cpu_data.x86_model == 0x9)
367 &&
368 boot_cpu_data.x86_mask < 0x1)
369 return;
Andreas Herrmannbda869c2009-04-09 15:05:10 +0200370
Frank Arnoldf2b20e42010-04-22 16:06:59 +0200371 /* not in virtualized environments */
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200372 if (k8_northbridges.num == 0)
Frank Arnoldf2b20e42010-04-22 16:06:59 +0200373 return;
374
Borislav Petkovba06edb2010-04-22 16:07:01 +0200375 /*
376 * Strictly speaking, the amount in @size below is leaked since it is
377 * never freed but this is done only on shutdown so it doesn't matter.
378 */
379 if (!l3_caches) {
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200380 int size = k8_northbridges.num * sizeof(struct amd_l3_cache *);
Borislav Petkovba06edb2010-04-22 16:07:01 +0200381
382 l3_caches = kzalloc(size, GFP_ATOMIC);
383 if (!l3_caches)
384 return;
Borislav Petkov9350f982010-04-22 16:07:00 +0200385 }
386
Borislav Petkovba06edb2010-04-22 16:07:01 +0200387 node = amd_get_nb_id(smp_processor_id());
388
389 if (!l3_caches[node]) {
390 l3_caches[node] = amd_init_l3_cache(node);
391 l3_caches[node]->can_disable = true;
392 }
393
394 WARN_ON(!l3_caches[node]);
395
396 this_leaf->l3 = l3_caches[node];
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500397}
398
Borislav Petkov8cc11762010-06-02 18:18:40 +0200399/*
400 * check whether a slot used for disabling an L3 index is occupied.
401 * @l3: L3 cache descriptor
402 * @slot: slot number (0..1)
403 *
404 * @returns: the disabled index if used or negative value if slot free.
405 */
406int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
407{
408 unsigned int reg = 0;
409
410 pci_read_config_dword(l3->dev, 0x1BC + slot * 4, &reg);
411
412 /* check whether this slot is activated already */
413 if (reg & (3UL << 30))
414 return reg & 0xfff;
415
416 return -1;
417}
418
Borislav Petkovcb190602010-02-18 19:37:14 +0100419static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
Borislav Petkov59d3b382010-04-22 16:07:02 +0200420 unsigned int slot)
Borislav Petkovcb190602010-02-18 19:37:14 +0100421{
Borislav Petkov8cc11762010-06-02 18:18:40 +0200422 int index;
Borislav Petkovcb190602010-02-18 19:37:14 +0100423
Borislav Petkov9350f982010-04-22 16:07:00 +0200424 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
Borislav Petkovcb190602010-02-18 19:37:14 +0100425 return -EINVAL;
426
Borislav Petkov8cc11762010-06-02 18:18:40 +0200427 index = amd_get_l3_disable_slot(this_leaf->l3, slot);
428 if (index >= 0)
429 return sprintf(buf, "%d\n", index);
Borislav Petkovcb190602010-02-18 19:37:14 +0100430
Borislav Petkov8cc11762010-06-02 18:18:40 +0200431 return sprintf(buf, "FREE\n");
Borislav Petkovcb190602010-02-18 19:37:14 +0100432}
433
Borislav Petkov59d3b382010-04-22 16:07:02 +0200434#define SHOW_CACHE_DISABLE(slot) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100435static ssize_t \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200436show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100437{ \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200438 return show_cache_disable(this_leaf, buf, slot); \
Borislav Petkovcb190602010-02-18 19:37:14 +0100439}
440SHOW_CACHE_DISABLE(0)
441SHOW_CACHE_DISABLE(1)
442
Borislav Petkov59d3b382010-04-22 16:07:02 +0200443static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
444 unsigned slot, unsigned long idx)
445{
446 int i;
447
448 idx |= BIT(30);
449
450 /*
451 * disable index in all 4 subcaches
452 */
453 for (i = 0; i < 4; i++) {
454 u32 reg = idx | (i << 20);
455
456 if (!l3->subcaches[i])
457 continue;
458
459 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
460
461 /*
462 * We need to WBINVD on a core on the node containing the L3
463 * cache which indices we disable therefore a simple wbinvd()
464 * is not sufficient.
465 */
466 wbinvd_on_cpu(cpu);
467
468 reg |= BIT(31);
469 pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
470 }
471}
472
Borislav Petkov8cc11762010-06-02 18:18:40 +0200473/*
474 * disable a L3 cache index by using a disable-slot
475 *
476 * @l3: L3 cache descriptor
477 * @cpu: A CPU on the node containing the L3 cache
478 * @slot: slot number (0..1)
479 * @index: index to disable
480 *
481 * @return: 0 on success, error status on failure
482 */
483int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
484 unsigned long index)
Borislav Petkovcb190602010-02-18 19:37:14 +0100485{
Borislav Petkov8cc11762010-06-02 18:18:40 +0200486 int ret = 0;
Borislav Petkovcb190602010-02-18 19:37:14 +0100487
488#define SUBCACHE_MASK (3UL << 20)
489#define SUBCACHE_INDEX 0xfff
490
Borislav Petkov8cc11762010-06-02 18:18:40 +0200491 /*
492 * check whether this slot is already used or
493 * the index is already disabled
494 */
495 ret = amd_get_l3_disable_slot(l3, slot);
496 if (ret >= 0)
Borislav Petkovcb190602010-02-18 19:37:14 +0100497 return -EINVAL;
498
Borislav Petkov8cc11762010-06-02 18:18:40 +0200499 /*
500 * check whether the other slot has disabled the
501 * same index already
502 */
503 if (index == amd_get_l3_disable_slot(l3, !slot))
504 return -EINVAL;
505
506 /* do not allow writes outside of allowed bits */
507 if ((index & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
508 ((index & SUBCACHE_INDEX) > l3->indices))
509 return -EINVAL;
510
511 amd_l3_disable_index(l3, cpu, slot, index);
512
513 return 0;
514}
515
516static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
517 const char *buf, size_t count,
518 unsigned int slot)
519{
520 unsigned long val = 0;
521 int cpu, err = 0;
522
Borislav Petkovcb190602010-02-18 19:37:14 +0100523 if (!capable(CAP_SYS_ADMIN))
524 return -EPERM;
525
Borislav Petkov8cc11762010-06-02 18:18:40 +0200526 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
Borislav Petkovcb190602010-02-18 19:37:14 +0100527 return -EINVAL;
528
Borislav Petkov8cc11762010-06-02 18:18:40 +0200529 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
530
Borislav Petkovcb190602010-02-18 19:37:14 +0100531 if (strict_strtoul(buf, 10, &val) < 0)
532 return -EINVAL;
533
Borislav Petkov8cc11762010-06-02 18:18:40 +0200534 err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
535 if (err) {
536 if (err == -EEXIST)
537 printk(KERN_WARNING "L3 disable slot %d in use!\n",
538 slot);
539 return err;
540 }
Borislav Petkovcb190602010-02-18 19:37:14 +0100541 return count;
542}
543
Borislav Petkov59d3b382010-04-22 16:07:02 +0200544#define STORE_CACHE_DISABLE(slot) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100545static ssize_t \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200546store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
Borislav Petkovcb190602010-02-18 19:37:14 +0100547 const char *buf, size_t count) \
548{ \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200549 return store_cache_disable(this_leaf, buf, count, slot); \
Borislav Petkovcb190602010-02-18 19:37:14 +0100550}
551STORE_CACHE_DISABLE(0)
552STORE_CACHE_DISABLE(1)
553
554static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
555 show_cache_disable_0, store_cache_disable_0);
556static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
557 show_cache_disable_1, store_cache_disable_1);
558
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200559#else /* CONFIG_AMD_NB */
Borislav Petkovcb190602010-02-18 19:37:14 +0100560static void __cpuinit
Borislav Petkov8cc11762010-06-02 18:18:40 +0200561amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index)
Borislav Petkovcb190602010-02-18 19:37:14 +0100562{
563};
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200564#endif /* CONFIG_AMD_NB */
Borislav Petkovcb190602010-02-18 19:37:14 +0100565
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200566static int
Mike Travisf9b90562009-01-10 21:58:10 -0800567__cpuinit cpuid4_cache_lookup_regs(int index,
568 struct _cpuid4_info_regs *this_leaf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569{
Andi Kleen240cd6a802006-06-26 13:56:13 +0200570 union _cpuid4_leaf_eax eax;
571 union _cpuid4_leaf_ebx ebx;
572 union _cpuid4_leaf_ecx ecx;
573 unsigned edx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500575 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
Andi Kleen240cd6a802006-06-26 13:56:13 +0200576 amd_cpuid4(index, &eax, &ebx, &ecx);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200577 amd_check_l3_disable(this_leaf, index);
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200578 } else {
579 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
580 }
581
Andi Kleen240cd6a802006-06-26 13:56:13 +0200582 if (eax.split.type == CACHE_TYPE_NULL)
Andi Kleene2cac782005-07-28 21:15:46 -0700583 return -EIO; /* better error ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Andi Kleen240cd6a802006-06-26 13:56:13 +0200585 this_leaf->eax = eax;
586 this_leaf->ebx = ebx;
587 this_leaf->ecx = ecx;
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200588 this_leaf->size = (ecx.split.number_of_sets + 1) *
589 (ebx.split.coherency_line_size + 1) *
590 (ebx.split.physical_line_partition + 1) *
591 (ebx.split.ways_of_associativity + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return 0;
593}
594
Adrian Bunk61d488d2007-07-21 04:37:39 -0700595static int __cpuinit find_num_cache_leaves(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596{
597 unsigned int eax, ebx, ecx, edx;
598 union _cpuid4_leaf_eax cache_eax;
Siddha, Suresh Bd16aafff2005-10-30 14:59:30 -0800599 int i = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Siddha, Suresh Bd16aafff2005-10-30 14:59:30 -0800601 do {
602 ++i;
603 /* Do cpuid(4) loop to find out num_cache_leaves */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
605 cache_eax.full = eax;
Siddha, Suresh Bd16aafff2005-10-30 14:59:30 -0800606 } while (cache_eax.split.type != CACHE_TYPE_NULL);
607 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
Ashok Raj1aa1a9f2005-10-30 14:59:50 -0800610unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
Alan Cox8bdbd962009-07-04 00:35:45 +0100612 /* Cache sizes */
613 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
615 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800616 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
James Bottomley96c52742006-06-27 02:53:49 -0700617#ifdef CONFIG_X86_HT
Mike Travis92cb7612007-10-19 20:35:04 +0200618 unsigned int cpu = c->cpu_index;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800619#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Shaohua Lif2d0d262006-03-23 02:59:52 -0800621 if (c->cpuid_level > 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 static int is_initialized;
623
624 if (is_initialized == 0) {
625 /* Init num_cache_leaves from boot CPU */
626 num_cache_leaves = find_num_cache_leaves();
627 is_initialized++;
628 }
629
630 /*
631 * Whenever possible use cpuid(4), deterministic cache
632 * parameters cpuid leaf to find the cache details
633 */
634 for (i = 0; i < num_cache_leaves; i++) {
Mike Travisf9b90562009-01-10 21:58:10 -0800635 struct _cpuid4_info_regs this_leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 int retval;
637
Mike Travisf9b90562009-01-10 21:58:10 -0800638 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 if (retval >= 0) {
Alan Cox8bdbd962009-07-04 00:35:45 +0100640 switch (this_leaf.eax.split.level) {
641 case 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 if (this_leaf.eax.split.type ==
643 CACHE_TYPE_DATA)
644 new_l1d = this_leaf.size/1024;
645 else if (this_leaf.eax.split.type ==
646 CACHE_TYPE_INST)
647 new_l1i = this_leaf.size/1024;
648 break;
Alan Cox8bdbd962009-07-04 00:35:45 +0100649 case 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 new_l2 = this_leaf.size/1024;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800651 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
652 index_msb = get_count_order(num_threads_sharing);
653 l2_id = c->apicid >> index_msb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 break;
Alan Cox8bdbd962009-07-04 00:35:45 +0100655 case 3:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 new_l3 = this_leaf.size/1024;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800657 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
Alan Cox8bdbd962009-07-04 00:35:45 +0100658 index_msb = get_count_order(
659 num_threads_sharing);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800660 l3_id = c->apicid >> index_msb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 break;
Alan Cox8bdbd962009-07-04 00:35:45 +0100662 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 break;
664 }
665 }
666 }
667 }
Shaohua Lib06be912006-03-27 01:15:24 -0800668 /*
669 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
670 * trace cache
671 */
672 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 /* supports eax=2 call */
Harvey Harrisonc1666e62008-01-31 22:05:43 +0100674 int j, n;
675 unsigned int regs[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 unsigned char *dp = (unsigned char *)regs;
Shaohua Lib06be912006-03-27 01:15:24 -0800677 int only_trace = 0;
678
679 if (num_cache_leaves != 0 && c->x86 == 15)
680 only_trace = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682 /* Number of times to iterate */
683 n = cpuid_eax(2) & 0xFF;
684
Alan Cox8bdbd962009-07-04 00:35:45 +0100685 for (i = 0 ; i < n ; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
687
688 /* If bit 31 is set, this is an unknown format */
Alan Cox8bdbd962009-07-04 00:35:45 +0100689 for (j = 0 ; j < 3 ; j++)
690 if (regs[j] & (1 << 31))
691 regs[j] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
693 /* Byte 0 is level count, not a descriptor */
Alan Cox8bdbd962009-07-04 00:35:45 +0100694 for (j = 1 ; j < 16 ; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 unsigned char des = dp[j];
696 unsigned char k = 0;
697
698 /* look up this descriptor in the table */
Alan Cox8bdbd962009-07-04 00:35:45 +0100699 while (cache_table[k].descriptor != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (cache_table[k].descriptor == des) {
Shaohua Lib06be912006-03-27 01:15:24 -0800701 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
702 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 switch (cache_table[k].cache_type) {
704 case LVL_1_INST:
705 l1i += cache_table[k].size;
706 break;
707 case LVL_1_DATA:
708 l1d += cache_table[k].size;
709 break;
710 case LVL_2:
711 l2 += cache_table[k].size;
712 break;
713 case LVL_3:
714 l3 += cache_table[k].size;
715 break;
716 case LVL_TRACE:
717 trace += cache_table[k].size;
718 break;
719 }
720
721 break;
722 }
723
724 k++;
725 }
726 }
727 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 }
729
Shaohua Lib06be912006-03-27 01:15:24 -0800730 if (new_l1d)
731 l1d = new_l1d;
732
733 if (new_l1i)
734 l1i = new_l1i;
735
736 if (new_l2) {
737 l2 = new_l2;
James Bottomley96c52742006-06-27 02:53:49 -0700738#ifdef CONFIG_X86_HT
Mike Travisb6278472007-10-19 20:35:03 +0200739 per_cpu(cpu_llc_id, cpu) = l2_id;
Shaohua Lib06be912006-03-27 01:15:24 -0800740#endif
741 }
742
743 if (new_l3) {
744 l3 = new_l3;
James Bottomley96c52742006-06-27 02:53:49 -0700745#ifdef CONFIG_X86_HT
Mike Travisb6278472007-10-19 20:35:03 +0200746 per_cpu(cpu_llc_id, cpu) = l3_id;
Shaohua Lib06be912006-03-27 01:15:24 -0800747#endif
748 }
749
Shaohua Lib06be912006-03-27 01:15:24 -0800750 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 return l2;
753}
754
Ingo Molnarba1d7552008-10-18 21:24:45 +0200755#ifdef CONFIG_SYSFS
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757/* pointer to _cpuid4_info array (for each cache leaf) */
Tejun Heo0fe1e002009-10-29 22:34:14 +0900758static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
759#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761#ifdef CONFIG_SMP
Ashok Raj1aa1a9f2005-10-30 14:59:50 -0800762static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100764 struct _cpuid4_info *this_leaf, *sibling_leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 unsigned long num_threads_sharing;
Prarit Bhargavaebb682f2009-12-09 13:36:45 -0500766 int index_msb, i, sibling;
Mike Travis92cb7612007-10-19 20:35:04 +0200767 struct cpuinfo_x86 *c = &cpu_data(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Andreas Herrmanna326e942009-09-03 09:41:19 +0200769 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
Prarit Bhargavaebb682f2009-12-09 13:36:45 -0500770 for_each_cpu(i, c->llc_shared_map) {
Tejun Heo0fe1e002009-10-29 22:34:14 +0900771 if (!per_cpu(ici_cpuid4_info, i))
Andreas Herrmanna326e942009-09-03 09:41:19 +0200772 continue;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200773 this_leaf = CPUID4_INFO_IDX(i, index);
Prarit Bhargavaebb682f2009-12-09 13:36:45 -0500774 for_each_cpu(sibling, c->llc_shared_map) {
775 if (!cpu_online(sibling))
776 continue;
777 set_bit(sibling, this_leaf->shared_cpu_map);
778 }
Andreas Herrmanna326e942009-09-03 09:41:19 +0200779 }
780 return;
781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 this_leaf = CPUID4_INFO_IDX(cpu, index);
783 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
784
785 if (num_threads_sharing == 1)
Mike Travisf9b90562009-01-10 21:58:10 -0800786 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100787 else {
788 index_msb = get_count_order(num_threads_sharing);
789
790 for_each_online_cpu(i) {
Mike Travis92cb7612007-10-19 20:35:04 +0200791 if (cpu_data(i).apicid >> index_msb ==
792 c->apicid >> index_msb) {
Mike Travisf9b90562009-01-10 21:58:10 -0800793 cpumask_set_cpu(i,
794 to_cpumask(this_leaf->shared_cpu_map));
Tejun Heo0fe1e002009-10-29 22:34:14 +0900795 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
Mike Travisf9b90562009-01-10 21:58:10 -0800796 sibling_leaf =
797 CPUID4_INFO_IDX(i, index);
798 cpumask_set_cpu(cpu, to_cpumask(
799 sibling_leaf->shared_cpu_map));
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100800 }
801 }
802 }
803 }
804}
Chuck Ebbert3bc9b762006-03-23 02:59:33 -0800805static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100806{
807 struct _cpuid4_info *this_leaf, *sibling_leaf;
808 int sibling;
809
810 this_leaf = CPUID4_INFO_IDX(cpu, index);
Mike Travisf9b90562009-01-10 21:58:10 -0800811 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
Ingo Molnarcdcf7722008-07-28 16:20:08 +0200812 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
Mike Travisf9b90562009-01-10 21:58:10 -0800813 cpumask_clear_cpu(cpu,
814 to_cpumask(sibling_leaf->shared_cpu_map));
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816}
817#else
Alan Cox8bdbd962009-07-04 00:35:45 +0100818static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
819{
820}
821
822static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
823{
824}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825#endif
826
Adrian Bunkf22d9bc2007-12-04 17:19:07 +0100827static void __cpuinit free_cache_attributes(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700829 int i;
830
831 for (i = 0; i < num_cache_leaves; i++)
832 cache_remove_shared_cpu_map(cpu, i);
833
Borislav Petkov9350f982010-04-22 16:07:00 +0200834 kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900835 kfree(per_cpu(ici_cpuid4_info, cpu));
836 per_cpu(ici_cpuid4_info, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837}
838
Hiroshi Shimamotobd0838f2009-01-26 18:08:47 -0800839static int
840__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
841{
842 struct _cpuid4_info_regs *leaf_regs =
843 (struct _cpuid4_info_regs *)this_leaf;
844
845 return cpuid4_cache_lookup_regs(index, leaf_regs);
846}
847
Sergio Luis60928482008-12-28 04:12:26 -0300848static void __cpuinit get_cpu_leaves(void *_retval)
Mike Travisb2bb8552008-12-16 17:34:03 -0800849{
850 int j, *retval = _retval, cpu = smp_processor_id();
851
852 /* Do cpuid and store the results */
853 for (j = 0; j < num_cache_leaves; j++) {
854 struct _cpuid4_info *this_leaf;
855 this_leaf = CPUID4_INFO_IDX(cpu, j);
856 *retval = cpuid4_cache_lookup(j, this_leaf);
857 if (unlikely(*retval < 0)) {
858 int i;
859
860 for (i = 0; i < j; i++)
861 cache_remove_shared_cpu_map(cpu, i);
862 break;
863 }
864 cache_shared_cpu_map_setup(cpu, j);
865 }
866}
867
Ashok Raj1aa1a9f2005-10-30 14:59:50 -0800868static int __cpuinit detect_cache_attributes(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869{
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700870 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 if (num_cache_leaves == 0)
873 return -ENOENT;
874
Tejun Heo0fe1e002009-10-29 22:34:14 +0900875 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900877 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Mike Travisb2bb8552008-12-16 17:34:03 -0800880 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700881 if (retval) {
Tejun Heo0fe1e002009-10-29 22:34:14 +0900882 kfree(per_cpu(ici_cpuid4_info, cpu));
883 per_cpu(ici_cpuid4_info, cpu) = NULL;
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700884 }
885
Andi Kleene2cac782005-07-28 21:15:46 -0700886 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887}
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889#include <linux/kobject.h>
890#include <linux/sysfs.h>
891
892extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
893
894/* pointer to kobject for cpuX/cache */
Tejun Heo0fe1e002009-10-29 22:34:14 +0900895static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
897struct _index_kobject {
898 struct kobject kobj;
899 unsigned int cpu;
900 unsigned short index;
901};
902
903/* pointer to array of kobjects for cpuX/cache/indexY */
Tejun Heo0fe1e002009-10-29 22:34:14 +0900904static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
905#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907#define show_one_plus(file_name, object, val) \
908static ssize_t show_##file_name \
909 (struct _cpuid4_info *this_leaf, char *buf) \
910{ \
Alan Cox8bdbd962009-07-04 00:35:45 +0100911 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912}
913
914show_one_plus(level, eax.split.level, 0);
915show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
916show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
917show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
918show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
919
920static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
921{
Alan Cox8bdbd962009-07-04 00:35:45 +0100922 return sprintf(buf, "%luK\n", this_leaf->size / 1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924
Mike Travisfb0f3302008-04-08 11:43:02 -0700925static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
926 int type, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Mike Travisfb0f3302008-04-08 11:43:02 -0700928 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
Mike Travis6b6309b2008-03-25 15:06:56 -0700929 int n = 0;
Mike Travis6b6309b2008-03-25 15:06:56 -0700930
Mike Travisfb0f3302008-04-08 11:43:02 -0700931 if (len > 1) {
Mike Travisf9b90562009-01-10 21:58:10 -0800932 const struct cpumask *mask;
Mike Travisfb0f3302008-04-08 11:43:02 -0700933
Mike Travisf9b90562009-01-10 21:58:10 -0800934 mask = to_cpumask(this_leaf->shared_cpu_map);
Alan Cox8bdbd962009-07-04 00:35:45 +0100935 n = type ?
Rusty Russell29c01772008-12-13 21:20:25 +1030936 cpulist_scnprintf(buf, len-2, mask) :
937 cpumask_scnprintf(buf, len-2, mask);
Mike Travisfb0f3302008-04-08 11:43:02 -0700938 buf[n++] = '\n';
939 buf[n] = '\0';
Mike Travis6b6309b2008-03-25 15:06:56 -0700940 }
941 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942}
943
Mike Travisfb0f3302008-04-08 11:43:02 -0700944static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
945{
946 return show_shared_cpu_map_func(leaf, 0, buf);
947}
948
949static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
950{
951 return show_shared_cpu_map_func(leaf, 1, buf);
952}
953
Jiri Slaby4385cec2008-11-29 22:33:16 +0100954static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
955{
956 switch (this_leaf->eax.split.type) {
957 case CACHE_TYPE_DATA:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 return sprintf(buf, "Data\n");
Jiri Slaby4385cec2008-11-29 22:33:16 +0100959 case CACHE_TYPE_INST:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 return sprintf(buf, "Instruction\n");
Jiri Slaby4385cec2008-11-29 22:33:16 +0100961 case CACHE_TYPE_UNIFIED:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return sprintf(buf, "Unified\n");
Jiri Slaby4385cec2008-11-29 22:33:16 +0100963 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 return sprintf(buf, "Unknown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 }
966}
967
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200968#define to_object(k) container_of(k, struct _index_kobject, kobj)
969#define to_attr(a) container_of(a, struct _cache_attr, attr)
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971#define define_one_ro(_name) \
972static struct _cache_attr _name = \
973 __ATTR(_name, 0444, show_##_name, NULL)
974
975define_one_ro(level);
976define_one_ro(type);
977define_one_ro(coherency_line_size);
978define_one_ro(physical_line_partition);
979define_one_ro(ways_of_associativity);
980define_one_ro(number_of_sets);
981define_one_ro(size);
982define_one_ro(shared_cpu_map);
Mike Travisfb0f3302008-04-08 11:43:02 -0700983define_one_ro(shared_cpu_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Borislav Petkov897de502010-01-22 16:01:06 +0100985#define DEFAULT_SYSFS_CACHE_ATTRS \
986 &type.attr, \
987 &level.attr, \
988 &coherency_line_size.attr, \
989 &physical_line_partition.attr, \
990 &ways_of_associativity.attr, \
991 &number_of_sets.attr, \
992 &size.attr, \
993 &shared_cpu_map.attr, \
994 &shared_cpu_list.attr
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500995
Alan Cox8bdbd962009-07-04 00:35:45 +0100996static struct attribute *default_attrs[] = {
Borislav Petkov897de502010-01-22 16:01:06 +0100997 DEFAULT_SYSFS_CACHE_ATTRS,
998 NULL
999};
1000
1001static struct attribute *default_l3_attrs[] = {
1002 DEFAULT_SYSFS_CACHE_ATTRS,
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +02001003#ifdef CONFIG_AMD_NB
Mark Langsdorff8b201f2009-04-09 15:18:49 +02001004 &cache_disable_0.attr,
1005 &cache_disable_1.attr,
Borislav Petkovcb190602010-02-18 19:37:14 +01001006#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 NULL
1008};
1009
Alan Cox8bdbd962009-07-04 00:35:45 +01001010static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011{
1012 struct _cache_attr *fattr = to_attr(attr);
1013 struct _index_kobject *this_leaf = to_object(kobj);
1014 ssize_t ret;
1015
1016 ret = fattr->show ?
1017 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1018 buf) :
Ingo Molnarcdcf7722008-07-28 16:20:08 +02001019 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 return ret;
1021}
1022
Alan Cox8bdbd962009-07-04 00:35:45 +01001023static ssize_t store(struct kobject *kobj, struct attribute *attr,
1024 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -05001026 struct _cache_attr *fattr = to_attr(attr);
1027 struct _index_kobject *this_leaf = to_object(kobj);
1028 ssize_t ret;
1029
Ingo Molnarcdcf7722008-07-28 16:20:08 +02001030 ret = fattr->store ?
1031 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1032 buf, count) :
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -05001033 0;
1034 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035}
1036
Emese Revfy52cf25d2010-01-19 02:58:23 +01001037static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 .show = show,
1039 .store = store,
1040};
1041
1042static struct kobj_type ktype_cache = {
1043 .sysfs_ops = &sysfs_ops,
1044 .default_attrs = default_attrs,
1045};
1046
1047static struct kobj_type ktype_percpu_entry = {
1048 .sysfs_ops = &sysfs_ops,
1049};
1050
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001051static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001053 kfree(per_cpu(ici_cache_kobject, cpu));
1054 kfree(per_cpu(ici_index_kobject, cpu));
1055 per_cpu(ici_cache_kobject, cpu) = NULL;
1056 per_cpu(ici_index_kobject, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 free_cache_attributes(cpu);
1058}
1059
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001060static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001062 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064 if (num_cache_leaves == 0)
1065 return -ENOENT;
1066
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001067 err = detect_cache_attributes(cpu);
1068 if (err)
1069 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
1071 /* Allocate all required memory */
Tejun Heo0fe1e002009-10-29 22:34:14 +09001072 per_cpu(ici_cache_kobject, cpu) =
Mike Travis6b6309b2008-03-25 15:06:56 -07001073 kzalloc(sizeof(struct kobject), GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +09001074 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Tejun Heo0fe1e002009-10-29 22:34:14 +09001077 per_cpu(ici_index_kobject, cpu) = kzalloc(
Alan Cox8bdbd962009-07-04 00:35:45 +01001078 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +09001079 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 return 0;
1083
1084err_out:
1085 cpuid4_cache_sysfs_exit(cpu);
1086 return -ENOMEM;
1087}
1088
Mike Travisf9b90562009-01-10 21:58:10 -08001089static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091/* Add/Remove cache interface for CPU device */
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001092static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
1094 unsigned int cpu = sys_dev->id;
1095 unsigned long i, j;
1096 struct _index_kobject *this_object;
Borislav Petkov897de502010-01-22 16:01:06 +01001097 struct _cpuid4_info *this_leaf;
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001098 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 retval = cpuid4_cache_sysfs_init(cpu);
1101 if (unlikely(retval < 0))
1102 return retval;
1103
Tejun Heo0fe1e002009-10-29 22:34:14 +09001104 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
Mike Travis6b6309b2008-03-25 15:06:56 -07001105 &ktype_percpu_entry,
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001106 &sys_dev->kobj, "%s", "cache");
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001107 if (retval < 0) {
1108 cpuid4_cache_sysfs_exit(cpu);
1109 return retval;
1110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 for (i = 0; i < num_cache_leaves; i++) {
Alan Cox8bdbd962009-07-04 00:35:45 +01001113 this_object = INDEX_KOBJECT_PTR(cpu, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 this_object->cpu = cpu;
1115 this_object->index = i;
Borislav Petkov897de502010-01-22 16:01:06 +01001116
1117 this_leaf = CPUID4_INFO_IDX(cpu, i);
1118
Borislav Petkov9350f982010-04-22 16:07:00 +02001119 if (this_leaf->l3 && this_leaf->l3->can_disable)
Borislav Petkov897de502010-01-22 16:01:06 +01001120 ktype_cache.default_attrs = default_l3_attrs;
1121 else
1122 ktype_cache.default_attrs = default_attrs;
1123
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001124 retval = kobject_init_and_add(&(this_object->kobj),
Mike Travis6b6309b2008-03-25 15:06:56 -07001125 &ktype_cache,
Tejun Heo0fe1e002009-10-29 22:34:14 +09001126 per_cpu(ici_cache_kobject, cpu),
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001127 "index%1lu", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 if (unlikely(retval)) {
Alan Cox8bdbd962009-07-04 00:35:45 +01001129 for (j = 0; j < i; j++)
1130 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
Tejun Heo0fe1e002009-10-29 22:34:14 +09001131 kobject_put(per_cpu(ici_cache_kobject, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 cpuid4_cache_sysfs_exit(cpu);
Akinobu Mita8b2b9c12008-07-15 17:09:03 +09001133 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 }
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001135 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 }
Mike Travisf9b90562009-01-10 21:58:10 -08001137 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001138
Tejun Heo0fe1e002009-10-29 22:34:14 +09001139 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
Akinobu Mita8b2b9c12008-07-15 17:09:03 +09001140 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
Sam Ravnborg114ab8e2007-07-21 17:11:08 +02001143static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144{
1145 unsigned int cpu = sys_dev->id;
1146 unsigned long i;
1147
Tejun Heo0fe1e002009-10-29 22:34:14 +09001148 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
Andi Kleen2966c6a2007-09-11 14:02:11 +02001149 return;
Mike Travisf9b90562009-01-10 21:58:10 -08001150 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001151 return;
Mike Travisf9b90562009-01-10 21:58:10 -08001152 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001153
1154 for (i = 0; i < num_cache_leaves; i++)
Alan Cox8bdbd962009-07-04 00:35:45 +01001155 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
Tejun Heo0fe1e002009-10-29 22:34:14 +09001156 kobject_put(per_cpu(ici_cache_kobject, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 cpuid4_cache_sysfs_exit(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158}
1159
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -07001160static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001161 unsigned long action, void *hcpu)
1162{
1163 unsigned int cpu = (unsigned long)hcpu;
1164 struct sys_device *sys_dev;
1165
1166 sys_dev = get_cpu_sysdev(cpu);
1167 switch (action) {
1168 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001169 case CPU_ONLINE_FROZEN:
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001170 cache_add_dev(sys_dev);
1171 break;
1172 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001173 case CPU_DEAD_FROZEN:
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001174 cache_remove_dev(sys_dev);
1175 break;
1176 }
1177 return NOTIFY_OK;
1178}
1179
Alan Cox8bdbd962009-07-04 00:35:45 +01001180static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001181 .notifier_call = cacheinfo_cpu_callback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182};
1183
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001184static int __cpuinit cache_sysfs_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001186 int i;
1187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 if (num_cache_leaves == 0)
1189 return 0;
1190
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001191 for_each_online_cpu(i) {
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001192 int err;
1193 struct sys_device *sys_dev = get_cpu_sysdev(i);
Satyam Sharmac789c032007-10-17 18:04:40 +02001194
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001195 err = cache_add_dev(sys_dev);
1196 if (err)
1197 return err;
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001198 }
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001199 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001200 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201}
1202
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001203device_initcall(cache_sysfs_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
1205#endif