1846ead0576b933806c1c83ef30855c648427bf8
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct cpu_hw_events {
84         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
85         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
86         unsigned long           interrupts;
87         int                     enabled;
88         struct debug_store      *ds;
89
90         int                     n_events;
91         int                     n_added;
92         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94 };
95
96 #define EVENT_CONSTRAINT(c, n, m) {     \
97         { .idxmsk64[0] = (n) },         \
98         .code = (c),                    \
99         .cmask = (m),                   \
100         .weight = HWEIGHT64((u64)(n)),  \
101 }
102
103 #define INTEL_EVENT_CONSTRAINT(c, n)    \
104         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
105
106 #define FIXED_EVENT_CONSTRAINT(c, n)    \
107         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
108
109 #define EVENT_CONSTRAINT_END            \
110         EVENT_CONSTRAINT(0, 0, 0)
111
112 #define for_each_event_constraint(e, c) \
113         for ((e) = (c); (e)->cmask; (e)++)
114
115 /*
116  * struct x86_pmu - generic x86 pmu
117  */
118 struct x86_pmu {
119         const char      *name;
120         int             version;
121         int             (*handle_irq)(struct pt_regs *);
122         void            (*disable_all)(void);
123         void            (*enable_all)(void);
124         void            (*enable)(struct hw_perf_event *, int);
125         void            (*disable)(struct hw_perf_event *, int);
126         unsigned        eventsel;
127         unsigned        perfctr;
128         u64             (*event_map)(int);
129         u64             (*raw_event)(u64);
130         int             max_events;
131         int             num_events;
132         int             num_events_fixed;
133         int             event_bits;
134         u64             event_mask;
135         int             apic;
136         u64             max_period;
137         u64             intel_ctrl;
138         void            (*enable_bts)(u64 config);
139         void            (*disable_bts)(void);
140
141         struct event_constraint *
142                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
143                                                  struct perf_event *event);
144
145         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
146                                                  struct perf_event *event);
147         struct event_constraint *event_constraints;
148 };
149
150 static struct x86_pmu x86_pmu __read_mostly;
151
152 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
153         .enabled = 1,
154 };
155
156 static int x86_perf_event_set_period(struct perf_event *event,
157                              struct hw_perf_event *hwc, int idx);
158
159 /*
160  * Not sure about some of these
161  */
162 static const u64 p6_perfmon_event_map[] =
163 {
164   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
165   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
166   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
167   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
168   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
169   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
170   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
171 };
172
173 static u64 p6_pmu_event_map(int hw_event)
174 {
175         return p6_perfmon_event_map[hw_event];
176 }
177
178 /*
179  * Event setting that is specified not to count anything.
180  * We use this to effectively disable a counter.
181  *
182  * L2_RQSTS with 0 MESI unit mask.
183  */
184 #define P6_NOP_EVENT                    0x0000002EULL
185
186 static u64 p6_pmu_raw_event(u64 hw_event)
187 {
188 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
189 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
190 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
191 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
192 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
193
194 #define P6_EVNTSEL_MASK                 \
195         (P6_EVNTSEL_EVENT_MASK |        \
196          P6_EVNTSEL_UNIT_MASK  |        \
197          P6_EVNTSEL_EDGE_MASK  |        \
198          P6_EVNTSEL_INV_MASK   |        \
199          P6_EVNTSEL_REG_MASK)
200
201         return hw_event & P6_EVNTSEL_MASK;
202 }
203
204 static struct event_constraint intel_p6_event_constraints[] =
205 {
206         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
207         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
208         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
209         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
210         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
211         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
212         EVENT_CONSTRAINT_END
213 };
214
215 /*
216  * Intel PerfMon v3. Used on Core2 and later.
217  */
218 static const u64 intel_perfmon_event_map[] =
219 {
220   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
221   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
222   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
223   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
224   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
225   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
226   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
227 };
228
229 static struct event_constraint intel_core_event_constraints[] =
230 {
231         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
232         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
233         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
234         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
235         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
236         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
237         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
238         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
239         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
240         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
241         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
242         EVENT_CONSTRAINT_END
243 };
244
245 static struct event_constraint intel_nehalem_event_constraints[] =
246 {
247         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
248         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
249         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
250         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
251         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
252         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
253         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
254         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
255         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
256         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
257         EVENT_CONSTRAINT_END
258 };
259
260 static struct event_constraint intel_westmere_event_constraints[] =
261 {
262         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
263         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
264         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
265         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
266         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
267         EVENT_CONSTRAINT_END
268 };
269
270 static struct event_constraint intel_gen_event_constraints[] =
271 {
272         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
273         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
274         EVENT_CONSTRAINT_END
275 };
276
277 static u64 intel_pmu_event_map(int hw_event)
278 {
279         return intel_perfmon_event_map[hw_event];
280 }
281
282 /*
283  * Generalized hw caching related hw_event table, filled
284  * in on a per model basis. A value of 0 means
285  * 'not supported', -1 means 'hw_event makes no sense on
286  * this CPU', any other value means the raw hw_event
287  * ID.
288  */
289
290 #define C(x) PERF_COUNT_HW_CACHE_##x
291
292 static u64 __read_mostly hw_cache_event_ids
293                                 [PERF_COUNT_HW_CACHE_MAX]
294                                 [PERF_COUNT_HW_CACHE_OP_MAX]
295                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
296
297 static __initconst u64 westmere_hw_cache_event_ids
298                                 [PERF_COUNT_HW_CACHE_MAX]
299                                 [PERF_COUNT_HW_CACHE_OP_MAX]
300                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
301 {
302  [ C(L1D) ] = {
303         [ C(OP_READ) ] = {
304                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
305                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
306         },
307         [ C(OP_WRITE) ] = {
308                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
309                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
310         },
311         [ C(OP_PREFETCH) ] = {
312                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
313                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
314         },
315  },
316  [ C(L1I ) ] = {
317         [ C(OP_READ) ] = {
318                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
319                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
320         },
321         [ C(OP_WRITE) ] = {
322                 [ C(RESULT_ACCESS) ] = -1,
323                 [ C(RESULT_MISS)   ] = -1,
324         },
325         [ C(OP_PREFETCH) ] = {
326                 [ C(RESULT_ACCESS) ] = 0x0,
327                 [ C(RESULT_MISS)   ] = 0x0,
328         },
329  },
330  [ C(LL  ) ] = {
331         [ C(OP_READ) ] = {
332                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
333                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
334         },
335         [ C(OP_WRITE) ] = {
336                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
337                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
338         },
339         [ C(OP_PREFETCH) ] = {
340                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
341                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
342         },
343  },
344  [ C(DTLB) ] = {
345         [ C(OP_READ) ] = {
346                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
347                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
348         },
349         [ C(OP_WRITE) ] = {
350                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
351                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
352         },
353         [ C(OP_PREFETCH) ] = {
354                 [ C(RESULT_ACCESS) ] = 0x0,
355                 [ C(RESULT_MISS)   ] = 0x0,
356         },
357  },
358  [ C(ITLB) ] = {
359         [ C(OP_READ) ] = {
360                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
361                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
362         },
363         [ C(OP_WRITE) ] = {
364                 [ C(RESULT_ACCESS) ] = -1,
365                 [ C(RESULT_MISS)   ] = -1,
366         },
367         [ C(OP_PREFETCH) ] = {
368                 [ C(RESULT_ACCESS) ] = -1,
369                 [ C(RESULT_MISS)   ] = -1,
370         },
371  },
372  [ C(BPU ) ] = {
373         [ C(OP_READ) ] = {
374                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
375                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
376         },
377         [ C(OP_WRITE) ] = {
378                 [ C(RESULT_ACCESS) ] = -1,
379                 [ C(RESULT_MISS)   ] = -1,
380         },
381         [ C(OP_PREFETCH) ] = {
382                 [ C(RESULT_ACCESS) ] = -1,
383                 [ C(RESULT_MISS)   ] = -1,
384         },
385  },
386 };
387
388 static __initconst u64 nehalem_hw_cache_event_ids
389                                 [PERF_COUNT_HW_CACHE_MAX]
390                                 [PERF_COUNT_HW_CACHE_OP_MAX]
391                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
392 {
393  [ C(L1D) ] = {
394         [ C(OP_READ) ] = {
395                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
396                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
397         },
398         [ C(OP_WRITE) ] = {
399                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
400                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
401         },
402         [ C(OP_PREFETCH) ] = {
403                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
404                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
405         },
406  },
407  [ C(L1I ) ] = {
408         [ C(OP_READ) ] = {
409                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
410                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
411         },
412         [ C(OP_WRITE) ] = {
413                 [ C(RESULT_ACCESS) ] = -1,
414                 [ C(RESULT_MISS)   ] = -1,
415         },
416         [ C(OP_PREFETCH) ] = {
417                 [ C(RESULT_ACCESS) ] = 0x0,
418                 [ C(RESULT_MISS)   ] = 0x0,
419         },
420  },
421  [ C(LL  ) ] = {
422         [ C(OP_READ) ] = {
423                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
424                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
425         },
426         [ C(OP_WRITE) ] = {
427                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
428                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
429         },
430         [ C(OP_PREFETCH) ] = {
431                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
432                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
433         },
434  },
435  [ C(DTLB) ] = {
436         [ C(OP_READ) ] = {
437                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
438                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
439         },
440         [ C(OP_WRITE) ] = {
441                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
442                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
443         },
444         [ C(OP_PREFETCH) ] = {
445                 [ C(RESULT_ACCESS) ] = 0x0,
446                 [ C(RESULT_MISS)   ] = 0x0,
447         },
448  },
449  [ C(ITLB) ] = {
450         [ C(OP_READ) ] = {
451                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
452                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
453         },
454         [ C(OP_WRITE) ] = {
455                 [ C(RESULT_ACCESS) ] = -1,
456                 [ C(RESULT_MISS)   ] = -1,
457         },
458         [ C(OP_PREFETCH) ] = {
459                 [ C(RESULT_ACCESS) ] = -1,
460                 [ C(RESULT_MISS)   ] = -1,
461         },
462  },
463  [ C(BPU ) ] = {
464         [ C(OP_READ) ] = {
465                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
466                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
467         },
468         [ C(OP_WRITE) ] = {
469                 [ C(RESULT_ACCESS) ] = -1,
470                 [ C(RESULT_MISS)   ] = -1,
471         },
472         [ C(OP_PREFETCH) ] = {
473                 [ C(RESULT_ACCESS) ] = -1,
474                 [ C(RESULT_MISS)   ] = -1,
475         },
476  },
477 };
478
479 static __initconst u64 core2_hw_cache_event_ids
480                                 [PERF_COUNT_HW_CACHE_MAX]
481                                 [PERF_COUNT_HW_CACHE_OP_MAX]
482                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
483 {
484  [ C(L1D) ] = {
485         [ C(OP_READ) ] = {
486                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
487                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
488         },
489         [ C(OP_WRITE) ] = {
490                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
491                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
492         },
493         [ C(OP_PREFETCH) ] = {
494                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
495                 [ C(RESULT_MISS)   ] = 0,
496         },
497  },
498  [ C(L1I ) ] = {
499         [ C(OP_READ) ] = {
500                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
501                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
502         },
503         [ C(OP_WRITE) ] = {
504                 [ C(RESULT_ACCESS) ] = -1,
505                 [ C(RESULT_MISS)   ] = -1,
506         },
507         [ C(OP_PREFETCH) ] = {
508                 [ C(RESULT_ACCESS) ] = 0,
509                 [ C(RESULT_MISS)   ] = 0,
510         },
511  },
512  [ C(LL  ) ] = {
513         [ C(OP_READ) ] = {
514                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
515                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
516         },
517         [ C(OP_WRITE) ] = {
518                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
519                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
520         },
521         [ C(OP_PREFETCH) ] = {
522                 [ C(RESULT_ACCESS) ] = 0,
523                 [ C(RESULT_MISS)   ] = 0,
524         },
525  },
526  [ C(DTLB) ] = {
527         [ C(OP_READ) ] = {
528                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
529                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
530         },
531         [ C(OP_WRITE) ] = {
532                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
533                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
534         },
535         [ C(OP_PREFETCH) ] = {
536                 [ C(RESULT_ACCESS) ] = 0,
537                 [ C(RESULT_MISS)   ] = 0,
538         },
539  },
540  [ C(ITLB) ] = {
541         [ C(OP_READ) ] = {
542                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
543                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
544         },
545         [ C(OP_WRITE) ] = {
546                 [ C(RESULT_ACCESS) ] = -1,
547                 [ C(RESULT_MISS)   ] = -1,
548         },
549         [ C(OP_PREFETCH) ] = {
550                 [ C(RESULT_ACCESS) ] = -1,
551                 [ C(RESULT_MISS)   ] = -1,
552         },
553  },
554  [ C(BPU ) ] = {
555         [ C(OP_READ) ] = {
556                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
557                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
558         },
559         [ C(OP_WRITE) ] = {
560                 [ C(RESULT_ACCESS) ] = -1,
561                 [ C(RESULT_MISS)   ] = -1,
562         },
563         [ C(OP_PREFETCH) ] = {
564                 [ C(RESULT_ACCESS) ] = -1,
565                 [ C(RESULT_MISS)   ] = -1,
566         },
567  },
568 };
569
570 static __initconst u64 atom_hw_cache_event_ids
571                                 [PERF_COUNT_HW_CACHE_MAX]
572                                 [PERF_COUNT_HW_CACHE_OP_MAX]
573                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
574 {
575  [ C(L1D) ] = {
576         [ C(OP_READ) ] = {
577                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
578                 [ C(RESULT_MISS)   ] = 0,
579         },
580         [ C(OP_WRITE) ] = {
581                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
582                 [ C(RESULT_MISS)   ] = 0,
583         },
584         [ C(OP_PREFETCH) ] = {
585                 [ C(RESULT_ACCESS) ] = 0x0,
586                 [ C(RESULT_MISS)   ] = 0,
587         },
588  },
589  [ C(L1I ) ] = {
590         [ C(OP_READ) ] = {
591                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
592                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
593         },
594         [ C(OP_WRITE) ] = {
595                 [ C(RESULT_ACCESS) ] = -1,
596                 [ C(RESULT_MISS)   ] = -1,
597         },
598         [ C(OP_PREFETCH) ] = {
599                 [ C(RESULT_ACCESS) ] = 0,
600                 [ C(RESULT_MISS)   ] = 0,
601         },
602  },
603  [ C(LL  ) ] = {
604         [ C(OP_READ) ] = {
605                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
606                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
607         },
608         [ C(OP_WRITE) ] = {
609                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
610                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
611         },
612         [ C(OP_PREFETCH) ] = {
613                 [ C(RESULT_ACCESS) ] = 0,
614                 [ C(RESULT_MISS)   ] = 0,
615         },
616  },
617  [ C(DTLB) ] = {
618         [ C(OP_READ) ] = {
619                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
620                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
621         },
622         [ C(OP_WRITE) ] = {
623                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
624                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
625         },
626         [ C(OP_PREFETCH) ] = {
627                 [ C(RESULT_ACCESS) ] = 0,
628                 [ C(RESULT_MISS)   ] = 0,
629         },
630  },
631  [ C(ITLB) ] = {
632         [ C(OP_READ) ] = {
633                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
634                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
635         },
636         [ C(OP_WRITE) ] = {
637                 [ C(RESULT_ACCESS) ] = -1,
638                 [ C(RESULT_MISS)   ] = -1,
639         },
640         [ C(OP_PREFETCH) ] = {
641                 [ C(RESULT_ACCESS) ] = -1,
642                 [ C(RESULT_MISS)   ] = -1,
643         },
644  },
645  [ C(BPU ) ] = {
646         [ C(OP_READ) ] = {
647                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
648                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
649         },
650         [ C(OP_WRITE) ] = {
651                 [ C(RESULT_ACCESS) ] = -1,
652                 [ C(RESULT_MISS)   ] = -1,
653         },
654         [ C(OP_PREFETCH) ] = {
655                 [ C(RESULT_ACCESS) ] = -1,
656                 [ C(RESULT_MISS)   ] = -1,
657         },
658  },
659 };
660
661 static u64 intel_pmu_raw_event(u64 hw_event)
662 {
663 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
664 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
665 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
666 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
667 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
668
669 #define CORE_EVNTSEL_MASK               \
670         (INTEL_ARCH_EVTSEL_MASK |       \
671          INTEL_ARCH_UNIT_MASK   |       \
672          INTEL_ARCH_EDGE_MASK   |       \
673          INTEL_ARCH_INV_MASK    |       \
674          INTEL_ARCH_CNT_MASK)
675
676         return hw_event & CORE_EVNTSEL_MASK;
677 }
678
679 static __initconst u64 amd_hw_cache_event_ids
680                                 [PERF_COUNT_HW_CACHE_MAX]
681                                 [PERF_COUNT_HW_CACHE_OP_MAX]
682                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
683 {
684  [ C(L1D) ] = {
685         [ C(OP_READ) ] = {
686                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
687                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
688         },
689         [ C(OP_WRITE) ] = {
690                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
691                 [ C(RESULT_MISS)   ] = 0,
692         },
693         [ C(OP_PREFETCH) ] = {
694                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
695                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
696         },
697  },
698  [ C(L1I ) ] = {
699         [ C(OP_READ) ] = {
700                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
701                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
702         },
703         [ C(OP_WRITE) ] = {
704                 [ C(RESULT_ACCESS) ] = -1,
705                 [ C(RESULT_MISS)   ] = -1,
706         },
707         [ C(OP_PREFETCH) ] = {
708                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
709                 [ C(RESULT_MISS)   ] = 0,
710         },
711  },
712  [ C(LL  ) ] = {
713         [ C(OP_READ) ] = {
714                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
715                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
716         },
717         [ C(OP_WRITE) ] = {
718                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
719                 [ C(RESULT_MISS)   ] = 0,
720         },
721         [ C(OP_PREFETCH) ] = {
722                 [ C(RESULT_ACCESS) ] = 0,
723                 [ C(RESULT_MISS)   ] = 0,
724         },
725  },
726  [ C(DTLB) ] = {
727         [ C(OP_READ) ] = {
728                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
729                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
730         },
731         [ C(OP_WRITE) ] = {
732                 [ C(RESULT_ACCESS) ] = 0,
733                 [ C(RESULT_MISS)   ] = 0,
734         },
735         [ C(OP_PREFETCH) ] = {
736                 [ C(RESULT_ACCESS) ] = 0,
737                 [ C(RESULT_MISS)   ] = 0,
738         },
739  },
740  [ C(ITLB) ] = {
741         [ C(OP_READ) ] = {
742                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
743                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
744         },
745         [ C(OP_WRITE) ] = {
746                 [ C(RESULT_ACCESS) ] = -1,
747                 [ C(RESULT_MISS)   ] = -1,
748         },
749         [ C(OP_PREFETCH) ] = {
750                 [ C(RESULT_ACCESS) ] = -1,
751                 [ C(RESULT_MISS)   ] = -1,
752         },
753  },
754  [ C(BPU ) ] = {
755         [ C(OP_READ) ] = {
756                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
757                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
758         },
759         [ C(OP_WRITE) ] = {
760                 [ C(RESULT_ACCESS) ] = -1,
761                 [ C(RESULT_MISS)   ] = -1,
762         },
763         [ C(OP_PREFETCH) ] = {
764                 [ C(RESULT_ACCESS) ] = -1,
765                 [ C(RESULT_MISS)   ] = -1,
766         },
767  },
768 };
769
770 /*
771  * AMD Performance Monitor K7 and later.
772  */
773 static const u64 amd_perfmon_event_map[] =
774 {
775   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
776   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
777   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
778   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
779   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
780   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
781 };
782
783 static u64 amd_pmu_event_map(int hw_event)
784 {
785         return amd_perfmon_event_map[hw_event];
786 }
787
788 static u64 amd_pmu_raw_event(u64 hw_event)
789 {
790 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
791 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
792 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
793 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
794 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
795
796 #define K7_EVNTSEL_MASK                 \
797         (K7_EVNTSEL_EVENT_MASK |        \
798          K7_EVNTSEL_UNIT_MASK  |        \
799          K7_EVNTSEL_EDGE_MASK  |        \
800          K7_EVNTSEL_INV_MASK   |        \
801          K7_EVNTSEL_REG_MASK)
802
803         return hw_event & K7_EVNTSEL_MASK;
804 }
805
806 /*
807  * Propagate event elapsed time into the generic event.
808  * Can only be executed on the CPU where the event is active.
809  * Returns the delta events processed.
810  */
811 static u64
812 x86_perf_event_update(struct perf_event *event,
813                         struct hw_perf_event *hwc, int idx)
814 {
815         int shift = 64 - x86_pmu.event_bits;
816         u64 prev_raw_count, new_raw_count;
817         s64 delta;
818
819         if (idx == X86_PMC_IDX_FIXED_BTS)
820                 return 0;
821
822         /*
823          * Careful: an NMI might modify the previous event value.
824          *
825          * Our tactic to handle this is to first atomically read and
826          * exchange a new raw count - then add that new-prev delta
827          * count to the generic event atomically:
828          */
829 again:
830         prev_raw_count = atomic64_read(&hwc->prev_count);
831         rdmsrl(hwc->event_base + idx, new_raw_count);
832
833         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
834                                         new_raw_count) != prev_raw_count)
835                 goto again;
836
837         /*
838          * Now we have the new raw value and have updated the prev
839          * timestamp already. We can now calculate the elapsed delta
840          * (event-)time and add that to the generic event.
841          *
842          * Careful, not all hw sign-extends above the physical width
843          * of the count.
844          */
845         delta = (new_raw_count << shift) - (prev_raw_count << shift);
846         delta >>= shift;
847
848         atomic64_add(delta, &event->count);
849         atomic64_sub(delta, &hwc->period_left);
850
851         return new_raw_count;
852 }
853
854 static atomic_t active_events;
855 static DEFINE_MUTEX(pmc_reserve_mutex);
856
857 static bool reserve_pmc_hardware(void)
858 {
859 #ifdef CONFIG_X86_LOCAL_APIC
860         int i;
861
862         if (nmi_watchdog == NMI_LOCAL_APIC)
863                 disable_lapic_nmi_watchdog();
864
865         for (i = 0; i < x86_pmu.num_events; i++) {
866                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
867                         goto perfctr_fail;
868         }
869
870         for (i = 0; i < x86_pmu.num_events; i++) {
871                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
872                         goto eventsel_fail;
873         }
874 #endif
875
876         return true;
877
878 #ifdef CONFIG_X86_LOCAL_APIC
879 eventsel_fail:
880         for (i--; i >= 0; i--)
881                 release_evntsel_nmi(x86_pmu.eventsel + i);
882
883         i = x86_pmu.num_events;
884
885 perfctr_fail:
886         for (i--; i >= 0; i--)
887                 release_perfctr_nmi(x86_pmu.perfctr + i);
888
889         if (nmi_watchdog == NMI_LOCAL_APIC)
890                 enable_lapic_nmi_watchdog();
891
892         return false;
893 #endif
894 }
895
896 static void release_pmc_hardware(void)
897 {
898 #ifdef CONFIG_X86_LOCAL_APIC
899         int i;
900
901         for (i = 0; i < x86_pmu.num_events; i++) {
902                 release_perfctr_nmi(x86_pmu.perfctr + i);
903                 release_evntsel_nmi(x86_pmu.eventsel + i);
904         }
905
906         if (nmi_watchdog == NMI_LOCAL_APIC)
907                 enable_lapic_nmi_watchdog();
908 #endif
909 }
910
911 static inline bool bts_available(void)
912 {
913         return x86_pmu.enable_bts != NULL;
914 }
915
916 static inline void init_debug_store_on_cpu(int cpu)
917 {
918         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
919
920         if (!ds)
921                 return;
922
923         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
924                      (u32)((u64)(unsigned long)ds),
925                      (u32)((u64)(unsigned long)ds >> 32));
926 }
927
928 static inline void fini_debug_store_on_cpu(int cpu)
929 {
930         if (!per_cpu(cpu_hw_events, cpu).ds)
931                 return;
932
933         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
934 }
935
936 static void release_bts_hardware(void)
937 {
938         int cpu;
939
940         if (!bts_available())
941                 return;
942
943         get_online_cpus();
944
945         for_each_online_cpu(cpu)
946                 fini_debug_store_on_cpu(cpu);
947
948         for_each_possible_cpu(cpu) {
949                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
950
951                 if (!ds)
952                         continue;
953
954                 per_cpu(cpu_hw_events, cpu).ds = NULL;
955
956                 kfree((void *)(unsigned long)ds->bts_buffer_base);
957                 kfree(ds);
958         }
959
960         put_online_cpus();
961 }
962
963 static int reserve_bts_hardware(void)
964 {
965         int cpu, err = 0;
966
967         if (!bts_available())
968                 return 0;
969
970         get_online_cpus();
971
972         for_each_possible_cpu(cpu) {
973                 struct debug_store *ds;
974                 void *buffer;
975
976                 err = -ENOMEM;
977                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
978                 if (unlikely(!buffer))
979                         break;
980
981                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
982                 if (unlikely(!ds)) {
983                         kfree(buffer);
984                         break;
985                 }
986
987                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
988                 ds->bts_index = ds->bts_buffer_base;
989                 ds->bts_absolute_maximum =
990                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
991                 ds->bts_interrupt_threshold =
992                         ds->bts_absolute_maximum - BTS_OVFL_TH;
993
994                 per_cpu(cpu_hw_events, cpu).ds = ds;
995                 err = 0;
996         }
997
998         if (err)
999                 release_bts_hardware();
1000         else {
1001                 for_each_online_cpu(cpu)
1002                         init_debug_store_on_cpu(cpu);
1003         }
1004
1005         put_online_cpus();
1006
1007         return err;
1008 }
1009
1010 static void hw_perf_event_destroy(struct perf_event *event)
1011 {
1012         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
1013                 release_pmc_hardware();
1014                 release_bts_hardware();
1015                 mutex_unlock(&pmc_reserve_mutex);
1016         }
1017 }
1018
1019 static inline int x86_pmu_initialized(void)
1020 {
1021         return x86_pmu.handle_irq != NULL;
1022 }
1023
1024 static inline int
1025 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
1026 {
1027         unsigned int cache_type, cache_op, cache_result;
1028         u64 config, val;
1029
1030         config = attr->config;
1031
1032         cache_type = (config >>  0) & 0xff;
1033         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1034                 return -EINVAL;
1035
1036         cache_op = (config >>  8) & 0xff;
1037         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1038                 return -EINVAL;
1039
1040         cache_result = (config >> 16) & 0xff;
1041         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1042                 return -EINVAL;
1043
1044         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
1045
1046         if (val == 0)
1047                 return -ENOENT;
1048
1049         if (val == -1)
1050                 return -EINVAL;
1051
1052         hwc->config |= val;
1053
1054         return 0;
1055 }
1056
1057 static void intel_pmu_enable_bts(u64 config)
1058 {
1059         unsigned long debugctlmsr;
1060
1061         debugctlmsr = get_debugctlmsr();
1062
1063         debugctlmsr |= X86_DEBUGCTL_TR;
1064         debugctlmsr |= X86_DEBUGCTL_BTS;
1065         debugctlmsr |= X86_DEBUGCTL_BTINT;
1066
1067         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
1068                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
1069
1070         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
1071                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
1072
1073         update_debugctlmsr(debugctlmsr);
1074 }
1075
1076 static void intel_pmu_disable_bts(void)
1077 {
1078         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1079         unsigned long debugctlmsr;
1080
1081         if (!cpuc->ds)
1082                 return;
1083
1084         debugctlmsr = get_debugctlmsr();
1085
1086         debugctlmsr &=
1087                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
1088                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
1089
1090         update_debugctlmsr(debugctlmsr);
1091 }
1092
1093 /*
1094  * Setup the hardware configuration for a given attr_type
1095  */
1096 static int __hw_perf_event_init(struct perf_event *event)
1097 {
1098         struct perf_event_attr *attr = &event->attr;
1099         struct hw_perf_event *hwc = &event->hw;
1100         u64 config;
1101         int err;
1102
1103         if (!x86_pmu_initialized())
1104                 return -ENODEV;
1105
1106         err = 0;
1107         if (!atomic_inc_not_zero(&active_events)) {
1108                 mutex_lock(&pmc_reserve_mutex);
1109                 if (atomic_read(&active_events) == 0) {
1110                         if (!reserve_pmc_hardware())
1111                                 err = -EBUSY;
1112                         else
1113                                 err = reserve_bts_hardware();
1114                 }
1115                 if (!err)
1116                         atomic_inc(&active_events);
1117                 mutex_unlock(&pmc_reserve_mutex);
1118         }
1119         if (err)
1120                 return err;
1121
1122         event->destroy = hw_perf_event_destroy;
1123
1124         /*
1125          * Generate PMC IRQs:
1126          * (keep 'enabled' bit clear for now)
1127          */
1128         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1129
1130         hwc->idx = -1;
1131
1132         /*
1133          * Count user and OS events unless requested not to.
1134          */
1135         if (!attr->exclude_user)
1136                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1137         if (!attr->exclude_kernel)
1138                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1139
1140         if (!hwc->sample_period) {
1141                 hwc->sample_period = x86_pmu.max_period;
1142                 hwc->last_period = hwc->sample_period;
1143                 atomic64_set(&hwc->period_left, hwc->sample_period);
1144         } else {
1145                 /*
1146                  * If we have a PMU initialized but no APIC
1147                  * interrupts, we cannot sample hardware
1148                  * events (user-space has to fall back and
1149                  * sample via a hrtimer based software event):
1150                  */
1151                 if (!x86_pmu.apic)
1152                         return -EOPNOTSUPP;
1153         }
1154
1155         /*
1156          * Raw hw_event type provide the config in the hw_event structure
1157          */
1158         if (attr->type == PERF_TYPE_RAW) {
1159                 hwc->config |= x86_pmu.raw_event(attr->config);
1160                 return 0;
1161         }
1162
1163         if (attr->type == PERF_TYPE_HW_CACHE)
1164                 return set_ext_hw_attr(hwc, attr);
1165
1166         if (attr->config >= x86_pmu.max_events)
1167                 return -EINVAL;
1168
1169         /*
1170          * The generic map:
1171          */
1172         config = x86_pmu.event_map(attr->config);
1173
1174         if (config == 0)
1175                 return -ENOENT;
1176
1177         if (config == -1LL)
1178                 return -EINVAL;
1179
1180         /*
1181          * Branch tracing:
1182          */
1183         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1184             (hwc->sample_period == 1)) {
1185                 /* BTS is not supported by this architecture. */
1186                 if (!bts_available())
1187                         return -EOPNOTSUPP;
1188
1189                 /* BTS is currently only allowed for user-mode. */
1190                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1191                         return -EOPNOTSUPP;
1192         }
1193
1194         hwc->config |= config;
1195
1196         return 0;
1197 }
1198
1199 static void p6_pmu_disable_all(void)
1200 {
1201         u64 val;
1202
1203         /* p6 only has one enable register */
1204         rdmsrl(MSR_P6_EVNTSEL0, val);
1205         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1206         wrmsrl(MSR_P6_EVNTSEL0, val);
1207 }
1208
1209 static void intel_pmu_disable_all(void)
1210 {
1211         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1212
1213         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1214
1215         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1216                 intel_pmu_disable_bts();
1217 }
1218
1219 static void amd_pmu_disable_all(void)
1220 {
1221         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1222         int idx;
1223
1224         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1225                 u64 val;
1226
1227                 if (!test_bit(idx, cpuc->active_mask))
1228                         continue;
1229                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1230                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1231                         continue;
1232                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1233                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1234         }
1235 }
1236
1237 void hw_perf_disable(void)
1238 {
1239         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1240
1241         if (!x86_pmu_initialized())
1242                 return;
1243
1244         if (!cpuc->enabled)
1245                 return;
1246
1247         cpuc->n_added = 0;
1248         cpuc->enabled = 0;
1249         barrier();
1250
1251         x86_pmu.disable_all();
1252 }
1253
1254 static void p6_pmu_enable_all(void)
1255 {
1256         unsigned long val;
1257
1258         /* p6 only has one enable register */
1259         rdmsrl(MSR_P6_EVNTSEL0, val);
1260         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1261         wrmsrl(MSR_P6_EVNTSEL0, val);
1262 }
1263
1264 static void intel_pmu_enable_all(void)
1265 {
1266         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1267
1268         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1269
1270         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1271                 struct perf_event *event =
1272                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1273
1274                 if (WARN_ON_ONCE(!event))
1275                         return;
1276
1277                 intel_pmu_enable_bts(event->hw.config);
1278         }
1279 }
1280
1281 static void amd_pmu_enable_all(void)
1282 {
1283         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1284         int idx;
1285
1286         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1287                 struct perf_event *event = cpuc->events[idx];
1288                 u64 val;
1289
1290                 if (!test_bit(idx, cpuc->active_mask))
1291                         continue;
1292
1293                 val = event->hw.config;
1294                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1295                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1296         }
1297 }
1298
1299 static const struct pmu pmu;
1300
1301 static inline int is_x86_event(struct perf_event *event)
1302 {
1303         return event->pmu == &pmu;
1304 }
1305
1306 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1307 {
1308         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1309         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1310         int i, j, w, wmax, num = 0;
1311         struct hw_perf_event *hwc;
1312
1313         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1314
1315         for (i = 0; i < n; i++) {
1316                 constraints[i] =
1317                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1318         }
1319
1320         /*
1321          * fastpath, try to reuse previous register
1322          */
1323         for (i = 0; i < n; i++) {
1324                 hwc = &cpuc->event_list[i]->hw;
1325                 c = constraints[i];
1326
1327                 /* never assigned */
1328                 if (hwc->idx == -1)
1329                         break;
1330
1331                 /* constraint still honored */
1332                 if (!test_bit(hwc->idx, c->idxmsk))
1333                         break;
1334
1335                 /* not already used */
1336                 if (test_bit(hwc->idx, used_mask))
1337                         break;
1338
1339                 set_bit(hwc->idx, used_mask);
1340                 if (assign)
1341                         assign[i] = hwc->idx;
1342         }
1343         if (i == n)
1344                 goto done;
1345
1346         /*
1347          * begin slow path
1348          */
1349
1350         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1351
1352         /*
1353          * weight = number of possible counters
1354          *
1355          * 1    = most constrained, only works on one counter
1356          * wmax = least constrained, works on any counter
1357          *
1358          * assign events to counters starting with most
1359          * constrained events.
1360          */
1361         wmax = x86_pmu.num_events;
1362
1363         /*
1364          * when fixed event counters are present,
1365          * wmax is incremented by 1 to account
1366          * for one more choice
1367          */
1368         if (x86_pmu.num_events_fixed)
1369                 wmax++;
1370
1371         for (w = 1, num = n; num && w <= wmax; w++) {
1372                 /* for each event */
1373                 for (i = 0; num && i < n; i++) {
1374                         c = constraints[i];
1375                         hwc = &cpuc->event_list[i]->hw;
1376
1377                         if (c->weight != w)
1378                                 continue;
1379
1380                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1381                                 if (!test_bit(j, used_mask))
1382                                         break;
1383                         }
1384
1385                         if (j == X86_PMC_IDX_MAX)
1386                                 break;
1387
1388                         set_bit(j, used_mask);
1389
1390                         if (assign)
1391                                 assign[i] = j;
1392                         num--;
1393                 }
1394         }
1395 done:
1396         /*
1397          * scheduling failed or is just a simulation,
1398          * free resources if necessary
1399          */
1400         if (!assign || num) {
1401                 for (i = 0; i < n; i++) {
1402                         if (x86_pmu.put_event_constraints)
1403                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1404                 }
1405         }
1406         return num ? -ENOSPC : 0;
1407 }
1408
1409 /*
1410  * dogrp: true if must collect siblings events (group)
1411  * returns total number of events and error code
1412  */
1413 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1414 {
1415         struct perf_event *event;
1416         int n, max_count;
1417
1418         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1419
1420         /* current number of events already accepted */
1421         n = cpuc->n_events;
1422
1423         if (is_x86_event(leader)) {
1424                 if (n >= max_count)
1425                         return -ENOSPC;
1426                 cpuc->event_list[n] = leader;
1427                 n++;
1428         }
1429         if (!dogrp)
1430                 return n;
1431
1432         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1433                 if (!is_x86_event(event) ||
1434                     event->state <= PERF_EVENT_STATE_OFF)
1435                         continue;
1436
1437                 if (n >= max_count)
1438                         return -ENOSPC;
1439
1440                 cpuc->event_list[n] = event;
1441                 n++;
1442         }
1443         return n;
1444 }
1445
1446
1447 static inline void x86_assign_hw_event(struct perf_event *event,
1448                                 struct hw_perf_event *hwc, int idx)
1449 {
1450         hwc->idx = idx;
1451
1452         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1453                 hwc->config_base = 0;
1454                 hwc->event_base = 0;
1455         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1456                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1457                 /*
1458                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1459                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1460                  */
1461                 hwc->event_base =
1462                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1463         } else {
1464                 hwc->config_base = x86_pmu.eventsel;
1465                 hwc->event_base  = x86_pmu.perfctr;
1466         }
1467 }
1468
1469 static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
1470
1471 void hw_perf_enable(void)
1472 {
1473         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1474         struct perf_event *event;
1475         struct hw_perf_event *hwc;
1476         int i;
1477
1478         if (!x86_pmu_initialized())
1479                 return;
1480
1481         if (cpuc->enabled)
1482                 return;
1483
1484         if (cpuc->n_added) {
1485                 /*
1486                  * apply assignment obtained either from
1487                  * hw_perf_group_sched_in() or x86_pmu_enable()
1488                  *
1489                  * step1: save events moving to new counters
1490                  * step2: reprogram moved events into new counters
1491                  */
1492                 for (i = 0; i < cpuc->n_events; i++) {
1493
1494                         event = cpuc->event_list[i];
1495                         hwc = &event->hw;
1496
1497                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1498                                 continue;
1499
1500                         __x86_pmu_disable(event, cpuc);
1501
1502                         hwc->idx = -1;
1503                 }
1504
1505                 for (i = 0; i < cpuc->n_events; i++) {
1506
1507                         event = cpuc->event_list[i];
1508                         hwc = &event->hw;
1509
1510                         if (hwc->idx == -1) {
1511                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1512                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1513                         }
1514                         /*
1515                          * need to mark as active because x86_pmu_disable()
1516                          * clear active_mask and eventsp[] yet it preserves
1517                          * idx
1518                          */
1519                         set_bit(hwc->idx, cpuc->active_mask);
1520                         cpuc->events[hwc->idx] = event;
1521
1522                         x86_pmu.enable(hwc, hwc->idx);
1523                         perf_event_update_userpage(event);
1524                 }
1525                 cpuc->n_added = 0;
1526                 perf_events_lapic_init();
1527         }
1528
1529         cpuc->enabled = 1;
1530         barrier();
1531
1532         x86_pmu.enable_all();
1533 }
1534
1535 static inline u64 intel_pmu_get_status(void)
1536 {
1537         u64 status;
1538
1539         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1540
1541         return status;
1542 }
1543
1544 static inline void intel_pmu_ack_status(u64 ack)
1545 {
1546         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1547 }
1548
1549 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1550 {
1551         (void)checking_wrmsrl(hwc->config_base + idx,
1552                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1553 }
1554
1555 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1556 {
1557         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1558 }
1559
1560 static inline void
1561 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1562 {
1563         int idx = __idx - X86_PMC_IDX_FIXED;
1564         u64 ctrl_val, mask;
1565
1566         mask = 0xfULL << (idx * 4);
1567
1568         rdmsrl(hwc->config_base, ctrl_val);
1569         ctrl_val &= ~mask;
1570         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1571 }
1572
1573 static inline void
1574 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1575 {
1576         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1577         u64 val = P6_NOP_EVENT;
1578
1579         if (cpuc->enabled)
1580                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1581
1582         (void)checking_wrmsrl(hwc->config_base + idx, val);
1583 }
1584
1585 static inline void
1586 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1587 {
1588         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1589                 intel_pmu_disable_bts();
1590                 return;
1591         }
1592
1593         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1594                 intel_pmu_disable_fixed(hwc, idx);
1595                 return;
1596         }
1597
1598         x86_pmu_disable_event(hwc, idx);
1599 }
1600
1601 static inline void
1602 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1603 {
1604         x86_pmu_disable_event(hwc, idx);
1605 }
1606
1607 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1608
1609 /*
1610  * Set the next IRQ period, based on the hwc->period_left value.
1611  * To be called with the event disabled in hw:
1612  */
1613 static int
1614 x86_perf_event_set_period(struct perf_event *event,
1615                              struct hw_perf_event *hwc, int idx)
1616 {
1617         s64 left = atomic64_read(&hwc->period_left);
1618         s64 period = hwc->sample_period;
1619         int err, ret = 0;
1620
1621         if (idx == X86_PMC_IDX_FIXED_BTS)
1622                 return 0;
1623
1624         /*
1625          * If we are way outside a reasonable range then just skip forward:
1626          */
1627         if (unlikely(left <= -period)) {
1628                 left = period;
1629                 atomic64_set(&hwc->period_left, left);
1630                 hwc->last_period = period;
1631                 ret = 1;
1632         }
1633
1634         if (unlikely(left <= 0)) {
1635                 left += period;
1636                 atomic64_set(&hwc->period_left, left);
1637                 hwc->last_period = period;
1638                 ret = 1;
1639         }
1640         /*
1641          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1642          */
1643         if (unlikely(left < 2))
1644                 left = 2;
1645
1646         if (left > x86_pmu.max_period)
1647                 left = x86_pmu.max_period;
1648
1649         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1650
1651         /*
1652          * The hw event starts counting from this event offset,
1653          * mark it to be able to extra future deltas:
1654          */
1655         atomic64_set(&hwc->prev_count, (u64)-left);
1656
1657         err = checking_wrmsrl(hwc->event_base + idx,
1658                              (u64)(-left) & x86_pmu.event_mask);
1659
1660         perf_event_update_userpage(event);
1661
1662         return ret;
1663 }
1664
1665 static inline void
1666 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1667 {
1668         int idx = __idx - X86_PMC_IDX_FIXED;
1669         u64 ctrl_val, bits, mask;
1670         int err;
1671
1672         /*
1673          * Enable IRQ generation (0x8),
1674          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1675          * if requested:
1676          */
1677         bits = 0x8ULL;
1678         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1679                 bits |= 0x2;
1680         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1681                 bits |= 0x1;
1682
1683         /*
1684          * ANY bit is supported in v3 and up
1685          */
1686         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1687                 bits |= 0x4;
1688
1689         bits <<= (idx * 4);
1690         mask = 0xfULL << (idx * 4);
1691
1692         rdmsrl(hwc->config_base, ctrl_val);
1693         ctrl_val &= ~mask;
1694         ctrl_val |= bits;
1695         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1696 }
1697
1698 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1699 {
1700         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1701         u64 val;
1702
1703         val = hwc->config;
1704         if (cpuc->enabled)
1705                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1706
1707         (void)checking_wrmsrl(hwc->config_base + idx, val);
1708 }
1709
1710
1711 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1712 {
1713         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1714                 if (!__get_cpu_var(cpu_hw_events).enabled)
1715                         return;
1716
1717                 intel_pmu_enable_bts(hwc->config);
1718                 return;
1719         }
1720
1721         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1722                 intel_pmu_enable_fixed(hwc, idx);
1723                 return;
1724         }
1725
1726         x86_pmu_enable_event(hwc, idx);
1727 }
1728
1729 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1730 {
1731         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1732
1733         if (cpuc->enabled)
1734                 x86_pmu_enable_event(hwc, idx);
1735 }
1736
1737 /*
1738  * activate a single event
1739  *
1740  * The event is added to the group of enabled events
1741  * but only if it can be scehduled with existing events.
1742  *
1743  * Called with PMU disabled. If successful and return value 1,
1744  * then guaranteed to call perf_enable() and hw_perf_enable()
1745  */
1746 static int x86_pmu_enable(struct perf_event *event)
1747 {
1748         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1749         struct hw_perf_event *hwc;
1750         int assign[X86_PMC_IDX_MAX];
1751         int n, n0, ret;
1752
1753         hwc = &event->hw;
1754
1755         n0 = cpuc->n_events;
1756         n = collect_events(cpuc, event, false);
1757         if (n < 0)
1758                 return n;
1759
1760         ret = x86_schedule_events(cpuc, n, assign);
1761         if (ret)
1762                 return ret;
1763         /*
1764          * copy new assignment, now we know it is possible
1765          * will be used by hw_perf_enable()
1766          */
1767         memcpy(cpuc->assign, assign, n*sizeof(int));
1768
1769         cpuc->n_events = n;
1770         cpuc->n_added  = n - n0;
1771
1772         return 0;
1773 }
1774
1775 static void x86_pmu_unthrottle(struct perf_event *event)
1776 {
1777         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1778         struct hw_perf_event *hwc = &event->hw;
1779
1780         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1781                                 cpuc->events[hwc->idx] != event))
1782                 return;
1783
1784         x86_pmu.enable(hwc, hwc->idx);
1785 }
1786
1787 void perf_event_print_debug(void)
1788 {
1789         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1790         struct cpu_hw_events *cpuc;
1791         unsigned long flags;
1792         int cpu, idx;
1793
1794         if (!x86_pmu.num_events)
1795                 return;
1796
1797         local_irq_save(flags);
1798
1799         cpu = smp_processor_id();
1800         cpuc = &per_cpu(cpu_hw_events, cpu);
1801
1802         if (x86_pmu.version >= 2) {
1803                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1804                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1805                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1806                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1807
1808                 pr_info("\n");
1809                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1810                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1811                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1812                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1813         }
1814         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1815
1816         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1817                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1818                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1819
1820                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1821
1822                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1823                         cpu, idx, pmc_ctrl);
1824                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1825                         cpu, idx, pmc_count);
1826                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1827                         cpu, idx, prev_left);
1828         }
1829         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1830                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1831
1832                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1833                         cpu, idx, pmc_count);
1834         }
1835         local_irq_restore(flags);
1836 }
1837
1838 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1839 {
1840         struct debug_store *ds = cpuc->ds;
1841         struct bts_record {
1842                 u64     from;
1843                 u64     to;
1844                 u64     flags;
1845         };
1846         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1847         struct bts_record *at, *top;
1848         struct perf_output_handle handle;
1849         struct perf_event_header header;
1850         struct perf_sample_data data;
1851         struct pt_regs regs;
1852
1853         if (!event)
1854                 return;
1855
1856         if (!ds)
1857                 return;
1858
1859         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1860         top = (struct bts_record *)(unsigned long)ds->bts_index;
1861
1862         if (top <= at)
1863                 return;
1864
1865         ds->bts_index = ds->bts_buffer_base;
1866
1867
1868         data.period     = event->hw.last_period;
1869         data.addr       = 0;
1870         data.raw        = NULL;
1871         regs.ip         = 0;
1872
1873         /*
1874          * Prepare a generic sample, i.e. fill in the invariant fields.
1875          * We will overwrite the from and to address before we output
1876          * the sample.
1877          */
1878         perf_prepare_sample(&header, &data, event, &regs);
1879
1880         if (perf_output_begin(&handle, event,
1881                               header.size * (top - at), 1, 1))
1882                 return;
1883
1884         for (; at < top; at++) {
1885                 data.ip         = at->from;
1886                 data.addr       = at->to;
1887
1888                 perf_output_sample(&handle, &header, &data, event);
1889         }
1890
1891         perf_output_end(&handle);
1892
1893         /* There's new data available. */
1894         event->hw.interrupts++;
1895         event->pending_kill = POLL_IN;
1896 }
1897
1898 static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc)
1899 {
1900         struct hw_perf_event *hwc = &event->hw;
1901         int idx = hwc->idx;
1902
1903         /*
1904          * Must be done before we disable, otherwise the nmi handler
1905          * could reenable again:
1906          */
1907         clear_bit(idx, cpuc->active_mask);
1908         x86_pmu.disable(hwc, idx);
1909
1910         /*
1911          * Drain the remaining delta count out of a event
1912          * that we are disabling:
1913          */
1914         x86_perf_event_update(event, hwc, idx);
1915
1916         /* Drain the remaining BTS records. */
1917         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1918                 intel_pmu_drain_bts_buffer(cpuc);
1919
1920         cpuc->events[idx] = NULL;
1921 }
1922
1923 static void x86_pmu_disable(struct perf_event *event)
1924 {
1925         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1926         int i;
1927
1928         __x86_pmu_disable(event, cpuc);
1929
1930         for (i = 0; i < cpuc->n_events; i++) {
1931                 if (event == cpuc->event_list[i]) {
1932
1933                         if (x86_pmu.put_event_constraints)
1934                                 x86_pmu.put_event_constraints(cpuc, event);
1935
1936                         while (++i < cpuc->n_events)
1937                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1938
1939                         --cpuc->n_events;
1940                         break;
1941                 }
1942         }
1943         perf_event_update_userpage(event);
1944 }
1945
1946 /*
1947  * Save and restart an expired event. Called by NMI contexts,
1948  * so it has to be careful about preempting normal event ops:
1949  */
1950 static int intel_pmu_save_and_restart(struct perf_event *event)
1951 {
1952         struct hw_perf_event *hwc = &event->hw;
1953         int idx = hwc->idx;
1954         int ret;
1955
1956         x86_perf_event_update(event, hwc, idx);
1957         ret = x86_perf_event_set_period(event, hwc, idx);
1958
1959         if (event->state == PERF_EVENT_STATE_ACTIVE)
1960                 intel_pmu_enable_event(hwc, idx);
1961
1962         return ret;
1963 }
1964
1965 static void intel_pmu_reset(void)
1966 {
1967         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1968         unsigned long flags;
1969         int idx;
1970
1971         if (!x86_pmu.num_events)
1972                 return;
1973
1974         local_irq_save(flags);
1975
1976         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1977
1978         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1979                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1980                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1981         }
1982         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1983                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1984         }
1985         if (ds)
1986                 ds->bts_index = ds->bts_buffer_base;
1987
1988         local_irq_restore(flags);
1989 }
1990
1991 static int p6_pmu_handle_irq(struct pt_regs *regs)
1992 {
1993         struct perf_sample_data data;
1994         struct cpu_hw_events *cpuc;
1995         struct perf_event *event;
1996         struct hw_perf_event *hwc;
1997         int idx, handled = 0;
1998         u64 val;
1999
2000         data.addr = 0;
2001         data.raw = NULL;
2002
2003         cpuc = &__get_cpu_var(cpu_hw_events);
2004
2005         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2006                 if (!test_bit(idx, cpuc->active_mask))
2007                         continue;
2008
2009                 event = cpuc->events[idx];
2010                 hwc = &event->hw;
2011
2012                 val = x86_perf_event_update(event, hwc, idx);
2013                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2014                         continue;
2015
2016                 /*
2017                  * event overflow
2018                  */
2019                 handled         = 1;
2020                 data.period     = event->hw.last_period;
2021
2022                 if (!x86_perf_event_set_period(event, hwc, idx))
2023                         continue;
2024
2025                 if (perf_event_overflow(event, 1, &data, regs))
2026                         p6_pmu_disable_event(hwc, idx);
2027         }
2028
2029         if (handled)
2030                 inc_irq_stat(apic_perf_irqs);
2031
2032         return handled;
2033 }
2034
2035 /*
2036  * This handler is triggered by the local APIC, so the APIC IRQ handling
2037  * rules apply:
2038  */
2039 static int intel_pmu_handle_irq(struct pt_regs *regs)
2040 {
2041         struct perf_sample_data data;
2042         struct cpu_hw_events *cpuc;
2043         int bit, loops;
2044         u64 ack, status;
2045
2046         data.addr = 0;
2047         data.raw = NULL;
2048
2049         cpuc = &__get_cpu_var(cpu_hw_events);
2050
2051         perf_disable();
2052         intel_pmu_drain_bts_buffer(cpuc);
2053         status = intel_pmu_get_status();
2054         if (!status) {
2055                 perf_enable();
2056                 return 0;
2057         }
2058
2059         loops = 0;
2060 again:
2061         if (++loops > 100) {
2062                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2063                 perf_event_print_debug();
2064                 intel_pmu_reset();
2065                 perf_enable();
2066                 return 1;
2067         }
2068
2069         inc_irq_stat(apic_perf_irqs);
2070         ack = status;
2071         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2072                 struct perf_event *event = cpuc->events[bit];
2073
2074                 clear_bit(bit, (unsigned long *) &status);
2075                 if (!test_bit(bit, cpuc->active_mask))
2076                         continue;
2077
2078                 if (!intel_pmu_save_and_restart(event))
2079                         continue;
2080
2081                 data.period = event->hw.last_period;
2082
2083                 if (perf_event_overflow(event, 1, &data, regs))
2084                         intel_pmu_disable_event(&event->hw, bit);
2085         }
2086
2087         intel_pmu_ack_status(ack);
2088
2089         /*
2090          * Repeat if there is more work to be done:
2091          */
2092         status = intel_pmu_get_status();
2093         if (status)
2094                 goto again;
2095
2096         perf_enable();
2097
2098         return 1;
2099 }
2100
2101 static int amd_pmu_handle_irq(struct pt_regs *regs)
2102 {
2103         struct perf_sample_data data;
2104         struct cpu_hw_events *cpuc;
2105         struct perf_event *event;
2106         struct hw_perf_event *hwc;
2107         int idx, handled = 0;
2108         u64 val;
2109
2110         data.addr = 0;
2111         data.raw = NULL;
2112
2113         cpuc = &__get_cpu_var(cpu_hw_events);
2114
2115         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2116                 if (!test_bit(idx, cpuc->active_mask))
2117                         continue;
2118
2119                 event = cpuc->events[idx];
2120                 hwc = &event->hw;
2121
2122                 val = x86_perf_event_update(event, hwc, idx);
2123                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2124                         continue;
2125
2126                 /*
2127                  * event overflow
2128                  */
2129                 handled         = 1;
2130                 data.period     = event->hw.last_period;
2131
2132                 if (!x86_perf_event_set_period(event, hwc, idx))
2133                         continue;
2134
2135                 if (perf_event_overflow(event, 1, &data, regs))
2136                         amd_pmu_disable_event(hwc, idx);
2137         }
2138
2139         if (handled)
2140                 inc_irq_stat(apic_perf_irqs);
2141
2142         return handled;
2143 }
2144
2145 void smp_perf_pending_interrupt(struct pt_regs *regs)
2146 {
2147         irq_enter();
2148         ack_APIC_irq();
2149         inc_irq_stat(apic_pending_irqs);
2150         perf_event_do_pending();
2151         irq_exit();
2152 }
2153
2154 void set_perf_event_pending(void)
2155 {
2156 #ifdef CONFIG_X86_LOCAL_APIC
2157         if (!x86_pmu.apic || !x86_pmu_initialized())
2158                 return;
2159
2160         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2161 #endif
2162 }
2163
2164 void perf_events_lapic_init(void)
2165 {
2166 #ifdef CONFIG_X86_LOCAL_APIC
2167         if (!x86_pmu.apic || !x86_pmu_initialized())
2168                 return;
2169
2170         /*
2171          * Always use NMI for PMU
2172          */
2173         apic_write(APIC_LVTPC, APIC_DM_NMI);
2174 #endif
2175 }
2176
2177 static int __kprobes
2178 perf_event_nmi_handler(struct notifier_block *self,
2179                          unsigned long cmd, void *__args)
2180 {
2181         struct die_args *args = __args;
2182         struct pt_regs *regs;
2183
2184         if (!atomic_read(&active_events))
2185                 return NOTIFY_DONE;
2186
2187         switch (cmd) {
2188         case DIE_NMI:
2189         case DIE_NMI_IPI:
2190                 break;
2191
2192         default:
2193                 return NOTIFY_DONE;
2194         }
2195
2196         regs = args->regs;
2197
2198 #ifdef CONFIG_X86_LOCAL_APIC
2199         apic_write(APIC_LVTPC, APIC_DM_NMI);
2200 #endif
2201         /*
2202          * Can't rely on the handled return value to say it was our NMI, two
2203          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2204          *
2205          * If the first NMI handles both, the latter will be empty and daze
2206          * the CPU.
2207          */
2208         x86_pmu.handle_irq(regs);
2209
2210         return NOTIFY_STOP;
2211 }
2212
2213 static struct event_constraint unconstrained;
2214
2215 static struct event_constraint bts_constraint =
2216         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2217
2218 static struct event_constraint *
2219 intel_special_constraints(struct perf_event *event)
2220 {
2221         unsigned int hw_event;
2222
2223         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2224
2225         if (unlikely((hw_event ==
2226                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2227                      (event->hw.sample_period == 1))) {
2228
2229                 return &bts_constraint;
2230         }
2231         return NULL;
2232 }
2233
2234 static struct event_constraint *
2235 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2236 {
2237         struct event_constraint *c;
2238
2239         c = intel_special_constraints(event);
2240         if (c)
2241                 return c;
2242
2243         if (x86_pmu.event_constraints) {
2244                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2245                         if ((event->hw.config & c->cmask) == c->code)
2246                                 return c;
2247                 }
2248         }
2249
2250         return &unconstrained;
2251 }
2252
2253 static struct event_constraint *
2254 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2255 {
2256         return &unconstrained;
2257 }
2258
2259 static int x86_event_sched_in(struct perf_event *event,
2260                           struct perf_cpu_context *cpuctx, int cpu)
2261 {
2262         int ret = 0;
2263
2264         event->state = PERF_EVENT_STATE_ACTIVE;
2265         event->oncpu = cpu;
2266         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2267
2268         if (!is_x86_event(event))
2269                 ret = event->pmu->enable(event);
2270
2271         if (!ret && !is_software_event(event))
2272                 cpuctx->active_oncpu++;
2273
2274         if (!ret && event->attr.exclusive)
2275                 cpuctx->exclusive = 1;
2276
2277         return ret;
2278 }
2279
2280 static void x86_event_sched_out(struct perf_event *event,
2281                             struct perf_cpu_context *cpuctx, int cpu)
2282 {
2283         event->state = PERF_EVENT_STATE_INACTIVE;
2284         event->oncpu = -1;
2285
2286         if (!is_x86_event(event))
2287                 event->pmu->disable(event);
2288
2289         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2290
2291         if (!is_software_event(event))
2292                 cpuctx->active_oncpu--;
2293
2294         if (event->attr.exclusive || !cpuctx->active_oncpu)
2295                 cpuctx->exclusive = 0;
2296 }
2297
2298 /*
2299  * Called to enable a whole group of events.
2300  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2301  * Assumes the caller has disabled interrupts and has
2302  * frozen the PMU with hw_perf_save_disable.
2303  *
2304  * called with PMU disabled. If successful and return value 1,
2305  * then guaranteed to call perf_enable() and hw_perf_enable()
2306  */
2307 int hw_perf_group_sched_in(struct perf_event *leader,
2308                struct perf_cpu_context *cpuctx,
2309                struct perf_event_context *ctx, int cpu)
2310 {
2311         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2312         struct perf_event *sub;
2313         int assign[X86_PMC_IDX_MAX];
2314         int n0, n1, ret;
2315
2316         /* n0 = total number of events */
2317         n0 = collect_events(cpuc, leader, true);
2318         if (n0 < 0)
2319                 return n0;
2320
2321         ret = x86_schedule_events(cpuc, n0, assign);
2322         if (ret)
2323                 return ret;
2324
2325         ret = x86_event_sched_in(leader, cpuctx, cpu);
2326         if (ret)
2327                 return ret;
2328
2329         n1 = 1;
2330         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2331                 if (sub->state > PERF_EVENT_STATE_OFF) {
2332                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2333                         if (ret)
2334                                 goto undo;
2335                         ++n1;
2336                 }
2337         }
2338         /*
2339          * copy new assignment, now we know it is possible
2340          * will be used by hw_perf_enable()
2341          */
2342         memcpy(cpuc->assign, assign, n0*sizeof(int));
2343
2344         cpuc->n_events  = n0;
2345         cpuc->n_added   = n1;
2346         ctx->nr_active += n1;
2347
2348         /*
2349          * 1 means successful and events are active
2350          * This is not quite true because we defer
2351          * actual activation until hw_perf_enable() but
2352          * this way we* ensure caller won't try to enable
2353          * individual events
2354          */
2355         return 1;
2356 undo:
2357         x86_event_sched_out(leader, cpuctx, cpu);
2358         n0  = 1;
2359         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2360                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2361                         x86_event_sched_out(sub, cpuctx, cpu);
2362                         if (++n0 == n1)
2363                                 break;
2364                 }
2365         }
2366         return ret;
2367 }
2368
2369 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2370         .notifier_call          = perf_event_nmi_handler,
2371         .next                   = NULL,
2372         .priority               = 1
2373 };
2374
2375 static __initconst struct x86_pmu p6_pmu = {
2376         .name                   = "p6",
2377         .handle_irq             = p6_pmu_handle_irq,
2378         .disable_all            = p6_pmu_disable_all,
2379         .enable_all             = p6_pmu_enable_all,
2380         .enable                 = p6_pmu_enable_event,
2381         .disable                = p6_pmu_disable_event,
2382         .eventsel               = MSR_P6_EVNTSEL0,
2383         .perfctr                = MSR_P6_PERFCTR0,
2384         .event_map              = p6_pmu_event_map,
2385         .raw_event              = p6_pmu_raw_event,
2386         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2387         .apic                   = 1,
2388         .max_period             = (1ULL << 31) - 1,
2389         .version                = 0,
2390         .num_events             = 2,
2391         /*
2392          * Events have 40 bits implemented. However they are designed such
2393          * that bits [32-39] are sign extensions of bit 31. As such the
2394          * effective width of a event for P6-like PMU is 32 bits only.
2395          *
2396          * See IA-32 Intel Architecture Software developer manual Vol 3B
2397          */
2398         .event_bits             = 32,
2399         .event_mask             = (1ULL << 32) - 1,
2400         .get_event_constraints  = intel_get_event_constraints,
2401         .event_constraints      = intel_p6_event_constraints
2402 };
2403
2404 static __initconst struct x86_pmu intel_pmu = {
2405         .name                   = "Intel",
2406         .handle_irq             = intel_pmu_handle_irq,
2407         .disable_all            = intel_pmu_disable_all,
2408         .enable_all             = intel_pmu_enable_all,
2409         .enable                 = intel_pmu_enable_event,
2410         .disable                = intel_pmu_disable_event,
2411         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2412         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2413         .event_map              = intel_pmu_event_map,
2414         .raw_event              = intel_pmu_raw_event,
2415         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2416         .apic                   = 1,
2417         /*
2418          * Intel PMCs cannot be accessed sanely above 32 bit width,
2419          * so we install an artificial 1<<31 period regardless of
2420          * the generic event period:
2421          */
2422         .max_period             = (1ULL << 31) - 1,
2423         .enable_bts             = intel_pmu_enable_bts,
2424         .disable_bts            = intel_pmu_disable_bts,
2425         .get_event_constraints  = intel_get_event_constraints
2426 };
2427
2428 static __initconst struct x86_pmu amd_pmu = {
2429         .name                   = "AMD",
2430         .handle_irq             = amd_pmu_handle_irq,
2431         .disable_all            = amd_pmu_disable_all,
2432         .enable_all             = amd_pmu_enable_all,
2433         .enable                 = amd_pmu_enable_event,
2434         .disable                = amd_pmu_disable_event,
2435         .eventsel               = MSR_K7_EVNTSEL0,
2436         .perfctr                = MSR_K7_PERFCTR0,
2437         .event_map              = amd_pmu_event_map,
2438         .raw_event              = amd_pmu_raw_event,
2439         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2440         .num_events             = 4,
2441         .event_bits             = 48,
2442         .event_mask             = (1ULL << 48) - 1,
2443         .apic                   = 1,
2444         /* use highest bit to detect overflow */
2445         .max_period             = (1ULL << 47) - 1,
2446         .get_event_constraints  = amd_get_event_constraints
2447 };
2448
2449 static __init int p6_pmu_init(void)
2450 {
2451         switch (boot_cpu_data.x86_model) {
2452         case 1:
2453         case 3:  /* Pentium Pro */
2454         case 5:
2455         case 6:  /* Pentium II */
2456         case 7:
2457         case 8:
2458         case 11: /* Pentium III */
2459         case 9:
2460         case 13:
2461                 /* Pentium M */
2462                 break;
2463         default:
2464                 pr_cont("unsupported p6 CPU model %d ",
2465                         boot_cpu_data.x86_model);
2466                 return -ENODEV;
2467         }
2468
2469         x86_pmu = p6_pmu;
2470
2471         return 0;
2472 }
2473
2474 static __init int intel_pmu_init(void)
2475 {
2476         union cpuid10_edx edx;
2477         union cpuid10_eax eax;
2478         unsigned int unused;
2479         unsigned int ebx;
2480         int version;
2481
2482         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2483                 /* check for P6 processor family */
2484            if (boot_cpu_data.x86 == 6) {
2485                 return p6_pmu_init();
2486            } else {
2487                 return -ENODEV;
2488            }
2489         }
2490
2491         /*
2492          * Check whether the Architectural PerfMon supports
2493          * Branch Misses Retired hw_event or not.
2494          */
2495         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2496         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2497                 return -ENODEV;
2498
2499         version = eax.split.version_id;
2500         if (version < 2)
2501                 return -ENODEV;
2502
2503         x86_pmu                         = intel_pmu;
2504         x86_pmu.version                 = version;
2505         x86_pmu.num_events              = eax.split.num_events;
2506         x86_pmu.event_bits              = eax.split.bit_width;
2507         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2508
2509         /*
2510          * Quirk: v2 perfmon does not report fixed-purpose events, so
2511          * assume at least 3 events:
2512          */
2513         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2514
2515         /*
2516          * Install the hw-cache-events table:
2517          */
2518         switch (boot_cpu_data.x86_model) {
2519         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2520         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2521         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2522         case 29: /* six-core 45 nm xeon "Dunnington" */
2523                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2524                        sizeof(hw_cache_event_ids));
2525
2526                 x86_pmu.event_constraints = intel_core_event_constraints;
2527                 pr_cont("Core2 events, ");
2528                 break;
2529
2530         case 26: /* 45 nm nehalem, "Bloomfield" */
2531         case 30: /* 45 nm nehalem, "Lynnfield" */
2532                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2533                        sizeof(hw_cache_event_ids));
2534
2535                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2536                 pr_cont("Nehalem/Corei7 events, ");
2537                 break;
2538         case 28:
2539                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2540                        sizeof(hw_cache_event_ids));
2541
2542                 x86_pmu.event_constraints = intel_gen_event_constraints;
2543                 pr_cont("Atom events, ");
2544                 break;
2545
2546         case 37: /* 32 nm nehalem, "Clarkdale" */
2547         case 44: /* 32 nm nehalem, "Gulftown" */
2548                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2549                        sizeof(hw_cache_event_ids));
2550
2551                 x86_pmu.event_constraints = intel_westmere_event_constraints;
2552                 pr_cont("Westmere events, ");
2553                 break;
2554         default:
2555                 /*
2556                  * default constraints for v2 and up
2557                  */
2558                 x86_pmu.event_constraints = intel_gen_event_constraints;
2559                 pr_cont("generic architected perfmon, ");
2560         }
2561         return 0;
2562 }
2563
2564 static __init int amd_pmu_init(void)
2565 {
2566         /* Performance-monitoring supported from K7 and later: */
2567         if (boot_cpu_data.x86 < 6)
2568                 return -ENODEV;
2569
2570         x86_pmu = amd_pmu;
2571
2572         /* Events are common for all AMDs */
2573         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2574                sizeof(hw_cache_event_ids));
2575
2576         return 0;
2577 }
2578
2579 static void __init pmu_check_apic(void)
2580 {
2581         if (cpu_has_apic)
2582                 return;
2583
2584         x86_pmu.apic = 0;
2585         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2586         pr_info("no hardware sampling interrupt available.\n");
2587 }
2588
2589 void __init init_hw_perf_events(void)
2590 {
2591         int err;
2592
2593         pr_info("Performance Events: ");
2594
2595         switch (boot_cpu_data.x86_vendor) {
2596         case X86_VENDOR_INTEL:
2597                 err = intel_pmu_init();
2598                 break;
2599         case X86_VENDOR_AMD:
2600                 err = amd_pmu_init();
2601                 break;
2602         default:
2603                 return;
2604         }
2605         if (err != 0) {
2606                 pr_cont("no PMU driver, software events only.\n");
2607                 return;
2608         }
2609
2610         pmu_check_apic();
2611
2612         pr_cont("%s PMU driver.\n", x86_pmu.name);
2613
2614         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2615                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2616                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2617                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2618         }
2619         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2620         perf_max_events = x86_pmu.num_events;
2621
2622         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2623                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2624                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2625                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2626         }
2627
2628         perf_event_mask |=
2629                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2630         x86_pmu.intel_ctrl = perf_event_mask;
2631
2632         perf_events_lapic_init();
2633         register_die_notifier(&perf_event_nmi_notifier);
2634
2635         unconstrained = (struct event_constraint)
2636                 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2637
2638         pr_info("... version:                %d\n",     x86_pmu.version);
2639         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2640         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2641         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2642         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2643         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2644         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2645 }
2646
2647 static inline void x86_pmu_read(struct perf_event *event)
2648 {
2649         x86_perf_event_update(event, &event->hw, event->hw.idx);
2650 }
2651
2652 static const struct pmu pmu = {
2653         .enable         = x86_pmu_enable,
2654         .disable        = x86_pmu_disable,
2655         .read           = x86_pmu_read,
2656         .unthrottle     = x86_pmu_unthrottle,
2657 };
2658
2659 /*
2660  * validate a single event group
2661  *
2662  * validation include:
2663  *      - check events are compatible which each other
2664  *      - events do not compete for the same counter
2665  *      - number of events <= number of counters
2666  *
2667  * validation ensures the group can be loaded onto the
2668  * PMU if it was the only group available.
2669  */
2670 static int validate_group(struct perf_event *event)
2671 {
2672         struct perf_event *leader = event->group_leader;
2673         struct cpu_hw_events *fake_cpuc;
2674         int ret, n;
2675
2676         ret = -ENOMEM;
2677         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2678         if (!fake_cpuc)
2679                 goto out;
2680
2681         /*
2682          * the event is not yet connected with its
2683          * siblings therefore we must first collect
2684          * existing siblings, then add the new event
2685          * before we can simulate the scheduling
2686          */
2687         ret = -ENOSPC;
2688         n = collect_events(fake_cpuc, leader, true);
2689         if (n < 0)
2690                 goto out_free;
2691
2692         fake_cpuc->n_events = n;
2693         n = collect_events(fake_cpuc, event, false);
2694         if (n < 0)
2695                 goto out_free;
2696
2697         fake_cpuc->n_events = n;
2698
2699         ret = x86_schedule_events(fake_cpuc, n, NULL);
2700
2701 out_free:
2702         kfree(fake_cpuc);
2703 out:
2704         return ret;
2705 }
2706
2707 const struct pmu *hw_perf_event_init(struct perf_event *event)
2708 {
2709         const struct pmu *tmp;
2710         int err;
2711
2712         err = __hw_perf_event_init(event);
2713         if (!err) {
2714                 /*
2715                  * we temporarily connect event to its pmu
2716                  * such that validate_group() can classify
2717                  * it as an x86 event using is_x86_event()
2718                  */
2719                 tmp = event->pmu;
2720                 event->pmu = &pmu;
2721
2722                 if (event->group_leader != event)
2723                         err = validate_group(event);
2724
2725                 event->pmu = tmp;
2726         }
2727         if (err) {
2728                 if (event->destroy)
2729                         event->destroy(event);
2730                 return ERR_PTR(err);
2731         }
2732
2733         return &pmu;
2734 }
2735
2736 /*
2737  * callchain support
2738  */
2739
2740 static inline
2741 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2742 {
2743         if (entry->nr < PERF_MAX_STACK_DEPTH)
2744                 entry->ip[entry->nr++] = ip;
2745 }
2746
2747 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2748 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2749
2750
2751 static void
2752 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2753 {
2754         /* Ignore warnings */
2755 }
2756
2757 static void backtrace_warning(void *data, char *msg)
2758 {
2759         /* Ignore warnings */
2760 }
2761
2762 static int backtrace_stack(void *data, char *name)
2763 {
2764         return 0;
2765 }
2766
2767 static void backtrace_address(void *data, unsigned long addr, int reliable)
2768 {
2769         struct perf_callchain_entry *entry = data;
2770
2771         if (reliable)
2772                 callchain_store(entry, addr);
2773 }
2774
2775 static const struct stacktrace_ops backtrace_ops = {
2776         .warning                = backtrace_warning,
2777         .warning_symbol         = backtrace_warning_symbol,
2778         .stack                  = backtrace_stack,
2779         .address                = backtrace_address,
2780         .walk_stack             = print_context_stack_bp,
2781 };
2782
2783 #include "../dumpstack.h"
2784
2785 static void
2786 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2787 {
2788         callchain_store(entry, PERF_CONTEXT_KERNEL);
2789         callchain_store(entry, regs->ip);
2790
2791         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2792 }
2793
2794 /*
2795  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2796  */
2797 static unsigned long
2798 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2799 {
2800         unsigned long offset, addr = (unsigned long)from;
2801         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2802         unsigned long size, len = 0;
2803         struct page *page;
2804         void *map;
2805         int ret;
2806
2807         do {
2808                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2809                 if (!ret)
2810                         break;
2811
2812                 offset = addr & (PAGE_SIZE - 1);
2813                 size = min(PAGE_SIZE - offset, n - len);
2814
2815                 map = kmap_atomic(page, type);
2816                 memcpy(to, map+offset, size);
2817                 kunmap_atomic(map, type);
2818                 put_page(page);
2819
2820                 len  += size;
2821                 to   += size;
2822                 addr += size;
2823
2824         } while (len < n);
2825
2826         return len;
2827 }
2828
2829 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2830 {
2831         unsigned long bytes;
2832
2833         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2834
2835         return bytes == sizeof(*frame);
2836 }
2837
2838 static void
2839 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2840 {
2841         struct stack_frame frame;
2842         const void __user *fp;
2843
2844         if (!user_mode(regs))
2845                 regs = task_pt_regs(current);
2846
2847         fp = (void __user *)regs->bp;
2848
2849         callchain_store(entry, PERF_CONTEXT_USER);
2850         callchain_store(entry, regs->ip);
2851
2852         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2853                 frame.next_frame             = NULL;
2854                 frame.return_address = 0;
2855
2856                 if (!copy_stack_frame(fp, &frame))
2857                         break;
2858
2859                 if ((unsigned long)fp < regs->sp)
2860                         break;
2861
2862                 callchain_store(entry, frame.return_address);
2863                 fp = frame.next_frame;
2864         }
2865 }
2866
2867 static void
2868 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2869 {
2870         int is_user;
2871
2872         if (!regs)
2873                 return;
2874
2875         is_user = user_mode(regs);
2876
2877         if (is_user && current->state != TASK_RUNNING)
2878                 return;
2879
2880         if (!is_user)
2881                 perf_callchain_kernel(regs, entry);
2882
2883         if (current->mm)
2884                 perf_callchain_user(regs, entry);
2885 }
2886
2887 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2888 {
2889         struct perf_callchain_entry *entry;
2890
2891         if (in_nmi())
2892                 entry = &__get_cpu_var(pmc_nmi_entry);
2893         else
2894                 entry = &__get_cpu_var(pmc_irq_entry);
2895
2896         entry->nr = 0;
2897
2898         perf_do_callchain(regs, entry);
2899
2900         return entry;
2901 }
2902
2903 void hw_perf_event_setup_online(int cpu)
2904 {
2905         init_debug_store_on_cpu(cpu);
2906 }