bitops: Ensure the compile time HWEIGHT is only used for such
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct cpu_hw_events {
84         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
85         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
86         unsigned long           interrupts;
87         int                     enabled;
88         struct debug_store      *ds;
89
90         int                     n_events;
91         int                     n_added;
92         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94 };
95
96 #define __EVENT_CONSTRAINT(c, n, m, w) {\
97         { .idxmsk64[0] = (n) },         \
98         .code = (c),                    \
99         .cmask = (m),                   \
100         .weight = (w),                  \
101 }
102
103 #define EVENT_CONSTRAINT(c, n, m)       \
104         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
105
106 #define INTEL_EVENT_CONSTRAINT(c, n)    \
107         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
108
109 #define FIXED_EVENT_CONSTRAINT(c, n)    \
110         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
111
112 #define EVENT_CONSTRAINT_END            \
113         EVENT_CONSTRAINT(0, 0, 0)
114
115 #define for_each_event_constraint(e, c) \
116         for ((e) = (c); (e)->cmask; (e)++)
117
118 /*
119  * struct x86_pmu - generic x86 pmu
120  */
121 struct x86_pmu {
122         const char      *name;
123         int             version;
124         int             (*handle_irq)(struct pt_regs *);
125         void            (*disable_all)(void);
126         void            (*enable_all)(void);
127         void            (*enable)(struct hw_perf_event *, int);
128         void            (*disable)(struct hw_perf_event *, int);
129         unsigned        eventsel;
130         unsigned        perfctr;
131         u64             (*event_map)(int);
132         u64             (*raw_event)(u64);
133         int             max_events;
134         int             num_events;
135         int             num_events_fixed;
136         int             event_bits;
137         u64             event_mask;
138         int             apic;
139         u64             max_period;
140         u64             intel_ctrl;
141         void            (*enable_bts)(u64 config);
142         void            (*disable_bts)(void);
143
144         struct event_constraint *
145                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
146                                                  struct perf_event *event);
147
148         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
149                                                  struct perf_event *event);
150         struct event_constraint *event_constraints;
151 };
152
153 static struct x86_pmu x86_pmu __read_mostly;
154
155 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
156         .enabled = 1,
157 };
158
159 static int x86_perf_event_set_period(struct perf_event *event,
160                              struct hw_perf_event *hwc, int idx);
161
162 /*
163  * Not sure about some of these
164  */
165 static const u64 p6_perfmon_event_map[] =
166 {
167   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
168   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
169   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
170   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
171   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
172   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
173   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
174 };
175
176 static u64 p6_pmu_event_map(int hw_event)
177 {
178         return p6_perfmon_event_map[hw_event];
179 }
180
181 /*
182  * Event setting that is specified not to count anything.
183  * We use this to effectively disable a counter.
184  *
185  * L2_RQSTS with 0 MESI unit mask.
186  */
187 #define P6_NOP_EVENT                    0x0000002EULL
188
189 static u64 p6_pmu_raw_event(u64 hw_event)
190 {
191 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
192 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
193 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
194 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
195 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
196
197 #define P6_EVNTSEL_MASK                 \
198         (P6_EVNTSEL_EVENT_MASK |        \
199          P6_EVNTSEL_UNIT_MASK  |        \
200          P6_EVNTSEL_EDGE_MASK  |        \
201          P6_EVNTSEL_INV_MASK   |        \
202          P6_EVNTSEL_REG_MASK)
203
204         return hw_event & P6_EVNTSEL_MASK;
205 }
206
207 static struct event_constraint intel_p6_event_constraints[] =
208 {
209         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
210         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
211         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
212         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
213         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
214         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
215         EVENT_CONSTRAINT_END
216 };
217
218 /*
219  * Intel PerfMon v3. Used on Core2 and later.
220  */
221 static const u64 intel_perfmon_event_map[] =
222 {
223   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
224   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
225   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
226   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
227   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
228   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
229   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
230 };
231
232 static struct event_constraint intel_core_event_constraints[] =
233 {
234         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
235         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
236         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
237         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
238         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
239         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
240         EVENT_CONSTRAINT_END
241 };
242
243 static struct event_constraint intel_core2_event_constraints[] =
244 {
245         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
246         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
247         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
248         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
249         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
250         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
251         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
252         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
253         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
254         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
255         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
256         EVENT_CONSTRAINT_END
257 };
258
259 static struct event_constraint intel_nehalem_event_constraints[] =
260 {
261         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
262         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
263         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
264         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
265         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
266         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
267         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
268         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
269         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
270         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
271         EVENT_CONSTRAINT_END
272 };
273
274 static struct event_constraint intel_westmere_event_constraints[] =
275 {
276         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
277         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
278         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
279         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
280         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
281         EVENT_CONSTRAINT_END
282 };
283
284 static struct event_constraint intel_gen_event_constraints[] =
285 {
286         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
287         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
288         EVENT_CONSTRAINT_END
289 };
290
291 static u64 intel_pmu_event_map(int hw_event)
292 {
293         return intel_perfmon_event_map[hw_event];
294 }
295
296 /*
297  * Generalized hw caching related hw_event table, filled
298  * in on a per model basis. A value of 0 means
299  * 'not supported', -1 means 'hw_event makes no sense on
300  * this CPU', any other value means the raw hw_event
301  * ID.
302  */
303
304 #define C(x) PERF_COUNT_HW_CACHE_##x
305
306 static u64 __read_mostly hw_cache_event_ids
307                                 [PERF_COUNT_HW_CACHE_MAX]
308                                 [PERF_COUNT_HW_CACHE_OP_MAX]
309                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
310
311 static __initconst u64 westmere_hw_cache_event_ids
312                                 [PERF_COUNT_HW_CACHE_MAX]
313                                 [PERF_COUNT_HW_CACHE_OP_MAX]
314                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
315 {
316  [ C(L1D) ] = {
317         [ C(OP_READ) ] = {
318                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
319                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
320         },
321         [ C(OP_WRITE) ] = {
322                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
323                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
324         },
325         [ C(OP_PREFETCH) ] = {
326                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
327                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
328         },
329  },
330  [ C(L1I ) ] = {
331         [ C(OP_READ) ] = {
332                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
333                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
334         },
335         [ C(OP_WRITE) ] = {
336                 [ C(RESULT_ACCESS) ] = -1,
337                 [ C(RESULT_MISS)   ] = -1,
338         },
339         [ C(OP_PREFETCH) ] = {
340                 [ C(RESULT_ACCESS) ] = 0x0,
341                 [ C(RESULT_MISS)   ] = 0x0,
342         },
343  },
344  [ C(LL  ) ] = {
345         [ C(OP_READ) ] = {
346                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
347                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
348         },
349         [ C(OP_WRITE) ] = {
350                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
351                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
352         },
353         [ C(OP_PREFETCH) ] = {
354                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
355                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
356         },
357  },
358  [ C(DTLB) ] = {
359         [ C(OP_READ) ] = {
360                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
361                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
362         },
363         [ C(OP_WRITE) ] = {
364                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
365                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
366         },
367         [ C(OP_PREFETCH) ] = {
368                 [ C(RESULT_ACCESS) ] = 0x0,
369                 [ C(RESULT_MISS)   ] = 0x0,
370         },
371  },
372  [ C(ITLB) ] = {
373         [ C(OP_READ) ] = {
374                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
375                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
376         },
377         [ C(OP_WRITE) ] = {
378                 [ C(RESULT_ACCESS) ] = -1,
379                 [ C(RESULT_MISS)   ] = -1,
380         },
381         [ C(OP_PREFETCH) ] = {
382                 [ C(RESULT_ACCESS) ] = -1,
383                 [ C(RESULT_MISS)   ] = -1,
384         },
385  },
386  [ C(BPU ) ] = {
387         [ C(OP_READ) ] = {
388                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
389                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
390         },
391         [ C(OP_WRITE) ] = {
392                 [ C(RESULT_ACCESS) ] = -1,
393                 [ C(RESULT_MISS)   ] = -1,
394         },
395         [ C(OP_PREFETCH) ] = {
396                 [ C(RESULT_ACCESS) ] = -1,
397                 [ C(RESULT_MISS)   ] = -1,
398         },
399  },
400 };
401
402 static __initconst u64 nehalem_hw_cache_event_ids
403                                 [PERF_COUNT_HW_CACHE_MAX]
404                                 [PERF_COUNT_HW_CACHE_OP_MAX]
405                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
406 {
407  [ C(L1D) ] = {
408         [ C(OP_READ) ] = {
409                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
410                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
411         },
412         [ C(OP_WRITE) ] = {
413                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
414                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
415         },
416         [ C(OP_PREFETCH) ] = {
417                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
418                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
419         },
420  },
421  [ C(L1I ) ] = {
422         [ C(OP_READ) ] = {
423                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
424                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
425         },
426         [ C(OP_WRITE) ] = {
427                 [ C(RESULT_ACCESS) ] = -1,
428                 [ C(RESULT_MISS)   ] = -1,
429         },
430         [ C(OP_PREFETCH) ] = {
431                 [ C(RESULT_ACCESS) ] = 0x0,
432                 [ C(RESULT_MISS)   ] = 0x0,
433         },
434  },
435  [ C(LL  ) ] = {
436         [ C(OP_READ) ] = {
437                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
438                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
439         },
440         [ C(OP_WRITE) ] = {
441                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
442                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
443         },
444         [ C(OP_PREFETCH) ] = {
445                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
446                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
447         },
448  },
449  [ C(DTLB) ] = {
450         [ C(OP_READ) ] = {
451                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
452                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
453         },
454         [ C(OP_WRITE) ] = {
455                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
456                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
457         },
458         [ C(OP_PREFETCH) ] = {
459                 [ C(RESULT_ACCESS) ] = 0x0,
460                 [ C(RESULT_MISS)   ] = 0x0,
461         },
462  },
463  [ C(ITLB) ] = {
464         [ C(OP_READ) ] = {
465                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
466                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
467         },
468         [ C(OP_WRITE) ] = {
469                 [ C(RESULT_ACCESS) ] = -1,
470                 [ C(RESULT_MISS)   ] = -1,
471         },
472         [ C(OP_PREFETCH) ] = {
473                 [ C(RESULT_ACCESS) ] = -1,
474                 [ C(RESULT_MISS)   ] = -1,
475         },
476  },
477  [ C(BPU ) ] = {
478         [ C(OP_READ) ] = {
479                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
480                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
481         },
482         [ C(OP_WRITE) ] = {
483                 [ C(RESULT_ACCESS) ] = -1,
484                 [ C(RESULT_MISS)   ] = -1,
485         },
486         [ C(OP_PREFETCH) ] = {
487                 [ C(RESULT_ACCESS) ] = -1,
488                 [ C(RESULT_MISS)   ] = -1,
489         },
490  },
491 };
492
493 static __initconst u64 core2_hw_cache_event_ids
494                                 [PERF_COUNT_HW_CACHE_MAX]
495                                 [PERF_COUNT_HW_CACHE_OP_MAX]
496                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
497 {
498  [ C(L1D) ] = {
499         [ C(OP_READ) ] = {
500                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
501                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
502         },
503         [ C(OP_WRITE) ] = {
504                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
505                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
506         },
507         [ C(OP_PREFETCH) ] = {
508                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
509                 [ C(RESULT_MISS)   ] = 0,
510         },
511  },
512  [ C(L1I ) ] = {
513         [ C(OP_READ) ] = {
514                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
515                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
516         },
517         [ C(OP_WRITE) ] = {
518                 [ C(RESULT_ACCESS) ] = -1,
519                 [ C(RESULT_MISS)   ] = -1,
520         },
521         [ C(OP_PREFETCH) ] = {
522                 [ C(RESULT_ACCESS) ] = 0,
523                 [ C(RESULT_MISS)   ] = 0,
524         },
525  },
526  [ C(LL  ) ] = {
527         [ C(OP_READ) ] = {
528                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
529                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
530         },
531         [ C(OP_WRITE) ] = {
532                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
533                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
534         },
535         [ C(OP_PREFETCH) ] = {
536                 [ C(RESULT_ACCESS) ] = 0,
537                 [ C(RESULT_MISS)   ] = 0,
538         },
539  },
540  [ C(DTLB) ] = {
541         [ C(OP_READ) ] = {
542                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
543                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
544         },
545         [ C(OP_WRITE) ] = {
546                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
547                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
548         },
549         [ C(OP_PREFETCH) ] = {
550                 [ C(RESULT_ACCESS) ] = 0,
551                 [ C(RESULT_MISS)   ] = 0,
552         },
553  },
554  [ C(ITLB) ] = {
555         [ C(OP_READ) ] = {
556                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
557                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
558         },
559         [ C(OP_WRITE) ] = {
560                 [ C(RESULT_ACCESS) ] = -1,
561                 [ C(RESULT_MISS)   ] = -1,
562         },
563         [ C(OP_PREFETCH) ] = {
564                 [ C(RESULT_ACCESS) ] = -1,
565                 [ C(RESULT_MISS)   ] = -1,
566         },
567  },
568  [ C(BPU ) ] = {
569         [ C(OP_READ) ] = {
570                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
571                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
572         },
573         [ C(OP_WRITE) ] = {
574                 [ C(RESULT_ACCESS) ] = -1,
575                 [ C(RESULT_MISS)   ] = -1,
576         },
577         [ C(OP_PREFETCH) ] = {
578                 [ C(RESULT_ACCESS) ] = -1,
579                 [ C(RESULT_MISS)   ] = -1,
580         },
581  },
582 };
583
584 static __initconst u64 atom_hw_cache_event_ids
585                                 [PERF_COUNT_HW_CACHE_MAX]
586                                 [PERF_COUNT_HW_CACHE_OP_MAX]
587                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
588 {
589  [ C(L1D) ] = {
590         [ C(OP_READ) ] = {
591                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
592                 [ C(RESULT_MISS)   ] = 0,
593         },
594         [ C(OP_WRITE) ] = {
595                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
596                 [ C(RESULT_MISS)   ] = 0,
597         },
598         [ C(OP_PREFETCH) ] = {
599                 [ C(RESULT_ACCESS) ] = 0x0,
600                 [ C(RESULT_MISS)   ] = 0,
601         },
602  },
603  [ C(L1I ) ] = {
604         [ C(OP_READ) ] = {
605                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
606                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
607         },
608         [ C(OP_WRITE) ] = {
609                 [ C(RESULT_ACCESS) ] = -1,
610                 [ C(RESULT_MISS)   ] = -1,
611         },
612         [ C(OP_PREFETCH) ] = {
613                 [ C(RESULT_ACCESS) ] = 0,
614                 [ C(RESULT_MISS)   ] = 0,
615         },
616  },
617  [ C(LL  ) ] = {
618         [ C(OP_READ) ] = {
619                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
620                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
621         },
622         [ C(OP_WRITE) ] = {
623                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
624                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
625         },
626         [ C(OP_PREFETCH) ] = {
627                 [ C(RESULT_ACCESS) ] = 0,
628                 [ C(RESULT_MISS)   ] = 0,
629         },
630  },
631  [ C(DTLB) ] = {
632         [ C(OP_READ) ] = {
633                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
634                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
635         },
636         [ C(OP_WRITE) ] = {
637                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
638                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
639         },
640         [ C(OP_PREFETCH) ] = {
641                 [ C(RESULT_ACCESS) ] = 0,
642                 [ C(RESULT_MISS)   ] = 0,
643         },
644  },
645  [ C(ITLB) ] = {
646         [ C(OP_READ) ] = {
647                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
648                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
649         },
650         [ C(OP_WRITE) ] = {
651                 [ C(RESULT_ACCESS) ] = -1,
652                 [ C(RESULT_MISS)   ] = -1,
653         },
654         [ C(OP_PREFETCH) ] = {
655                 [ C(RESULT_ACCESS) ] = -1,
656                 [ C(RESULT_MISS)   ] = -1,
657         },
658  },
659  [ C(BPU ) ] = {
660         [ C(OP_READ) ] = {
661                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
662                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
663         },
664         [ C(OP_WRITE) ] = {
665                 [ C(RESULT_ACCESS) ] = -1,
666                 [ C(RESULT_MISS)   ] = -1,
667         },
668         [ C(OP_PREFETCH) ] = {
669                 [ C(RESULT_ACCESS) ] = -1,
670                 [ C(RESULT_MISS)   ] = -1,
671         },
672  },
673 };
674
675 static u64 intel_pmu_raw_event(u64 hw_event)
676 {
677 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
678 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
679 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
680 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
681 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
682
683 #define CORE_EVNTSEL_MASK               \
684         (INTEL_ARCH_EVTSEL_MASK |       \
685          INTEL_ARCH_UNIT_MASK   |       \
686          INTEL_ARCH_EDGE_MASK   |       \
687          INTEL_ARCH_INV_MASK    |       \
688          INTEL_ARCH_CNT_MASK)
689
690         return hw_event & CORE_EVNTSEL_MASK;
691 }
692
693 static __initconst u64 amd_hw_cache_event_ids
694                                 [PERF_COUNT_HW_CACHE_MAX]
695                                 [PERF_COUNT_HW_CACHE_OP_MAX]
696                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
697 {
698  [ C(L1D) ] = {
699         [ C(OP_READ) ] = {
700                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
701                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
702         },
703         [ C(OP_WRITE) ] = {
704                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
705                 [ C(RESULT_MISS)   ] = 0,
706         },
707         [ C(OP_PREFETCH) ] = {
708                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
709                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
710         },
711  },
712  [ C(L1I ) ] = {
713         [ C(OP_READ) ] = {
714                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
715                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
716         },
717         [ C(OP_WRITE) ] = {
718                 [ C(RESULT_ACCESS) ] = -1,
719                 [ C(RESULT_MISS)   ] = -1,
720         },
721         [ C(OP_PREFETCH) ] = {
722                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
723                 [ C(RESULT_MISS)   ] = 0,
724         },
725  },
726  [ C(LL  ) ] = {
727         [ C(OP_READ) ] = {
728                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
729                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
730         },
731         [ C(OP_WRITE) ] = {
732                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
733                 [ C(RESULT_MISS)   ] = 0,
734         },
735         [ C(OP_PREFETCH) ] = {
736                 [ C(RESULT_ACCESS) ] = 0,
737                 [ C(RESULT_MISS)   ] = 0,
738         },
739  },
740  [ C(DTLB) ] = {
741         [ C(OP_READ) ] = {
742                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
743                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
744         },
745         [ C(OP_WRITE) ] = {
746                 [ C(RESULT_ACCESS) ] = 0,
747                 [ C(RESULT_MISS)   ] = 0,
748         },
749         [ C(OP_PREFETCH) ] = {
750                 [ C(RESULT_ACCESS) ] = 0,
751                 [ C(RESULT_MISS)   ] = 0,
752         },
753  },
754  [ C(ITLB) ] = {
755         [ C(OP_READ) ] = {
756                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
757                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
758         },
759         [ C(OP_WRITE) ] = {
760                 [ C(RESULT_ACCESS) ] = -1,
761                 [ C(RESULT_MISS)   ] = -1,
762         },
763         [ C(OP_PREFETCH) ] = {
764                 [ C(RESULT_ACCESS) ] = -1,
765                 [ C(RESULT_MISS)   ] = -1,
766         },
767  },
768  [ C(BPU ) ] = {
769         [ C(OP_READ) ] = {
770                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
771                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
772         },
773         [ C(OP_WRITE) ] = {
774                 [ C(RESULT_ACCESS) ] = -1,
775                 [ C(RESULT_MISS)   ] = -1,
776         },
777         [ C(OP_PREFETCH) ] = {
778                 [ C(RESULT_ACCESS) ] = -1,
779                 [ C(RESULT_MISS)   ] = -1,
780         },
781  },
782 };
783
784 /*
785  * AMD Performance Monitor K7 and later.
786  */
787 static const u64 amd_perfmon_event_map[] =
788 {
789   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
790   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
791   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
792   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
793   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
794   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
795 };
796
797 static u64 amd_pmu_event_map(int hw_event)
798 {
799         return amd_perfmon_event_map[hw_event];
800 }
801
802 static u64 amd_pmu_raw_event(u64 hw_event)
803 {
804 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
805 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
806 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
807 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
808 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
809
810 #define K7_EVNTSEL_MASK                 \
811         (K7_EVNTSEL_EVENT_MASK |        \
812          K7_EVNTSEL_UNIT_MASK  |        \
813          K7_EVNTSEL_EDGE_MASK  |        \
814          K7_EVNTSEL_INV_MASK   |        \
815          K7_EVNTSEL_REG_MASK)
816
817         return hw_event & K7_EVNTSEL_MASK;
818 }
819
820 /*
821  * Propagate event elapsed time into the generic event.
822  * Can only be executed on the CPU where the event is active.
823  * Returns the delta events processed.
824  */
825 static u64
826 x86_perf_event_update(struct perf_event *event,
827                         struct hw_perf_event *hwc, int idx)
828 {
829         int shift = 64 - x86_pmu.event_bits;
830         u64 prev_raw_count, new_raw_count;
831         s64 delta;
832
833         if (idx == X86_PMC_IDX_FIXED_BTS)
834                 return 0;
835
836         /*
837          * Careful: an NMI might modify the previous event value.
838          *
839          * Our tactic to handle this is to first atomically read and
840          * exchange a new raw count - then add that new-prev delta
841          * count to the generic event atomically:
842          */
843 again:
844         prev_raw_count = atomic64_read(&hwc->prev_count);
845         rdmsrl(hwc->event_base + idx, new_raw_count);
846
847         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
848                                         new_raw_count) != prev_raw_count)
849                 goto again;
850
851         /*
852          * Now we have the new raw value and have updated the prev
853          * timestamp already. We can now calculate the elapsed delta
854          * (event-)time and add that to the generic event.
855          *
856          * Careful, not all hw sign-extends above the physical width
857          * of the count.
858          */
859         delta = (new_raw_count << shift) - (prev_raw_count << shift);
860         delta >>= shift;
861
862         atomic64_add(delta, &event->count);
863         atomic64_sub(delta, &hwc->period_left);
864
865         return new_raw_count;
866 }
867
868 static atomic_t active_events;
869 static DEFINE_MUTEX(pmc_reserve_mutex);
870
871 static bool reserve_pmc_hardware(void)
872 {
873 #ifdef CONFIG_X86_LOCAL_APIC
874         int i;
875
876         if (nmi_watchdog == NMI_LOCAL_APIC)
877                 disable_lapic_nmi_watchdog();
878
879         for (i = 0; i < x86_pmu.num_events; i++) {
880                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
881                         goto perfctr_fail;
882         }
883
884         for (i = 0; i < x86_pmu.num_events; i++) {
885                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
886                         goto eventsel_fail;
887         }
888 #endif
889
890         return true;
891
892 #ifdef CONFIG_X86_LOCAL_APIC
893 eventsel_fail:
894         for (i--; i >= 0; i--)
895                 release_evntsel_nmi(x86_pmu.eventsel + i);
896
897         i = x86_pmu.num_events;
898
899 perfctr_fail:
900         for (i--; i >= 0; i--)
901                 release_perfctr_nmi(x86_pmu.perfctr + i);
902
903         if (nmi_watchdog == NMI_LOCAL_APIC)
904                 enable_lapic_nmi_watchdog();
905
906         return false;
907 #endif
908 }
909
910 static void release_pmc_hardware(void)
911 {
912 #ifdef CONFIG_X86_LOCAL_APIC
913         int i;
914
915         for (i = 0; i < x86_pmu.num_events; i++) {
916                 release_perfctr_nmi(x86_pmu.perfctr + i);
917                 release_evntsel_nmi(x86_pmu.eventsel + i);
918         }
919
920         if (nmi_watchdog == NMI_LOCAL_APIC)
921                 enable_lapic_nmi_watchdog();
922 #endif
923 }
924
925 static inline bool bts_available(void)
926 {
927         return x86_pmu.enable_bts != NULL;
928 }
929
930 static inline void init_debug_store_on_cpu(int cpu)
931 {
932         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
933
934         if (!ds)
935                 return;
936
937         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
938                      (u32)((u64)(unsigned long)ds),
939                      (u32)((u64)(unsigned long)ds >> 32));
940 }
941
942 static inline void fini_debug_store_on_cpu(int cpu)
943 {
944         if (!per_cpu(cpu_hw_events, cpu).ds)
945                 return;
946
947         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
948 }
949
950 static void release_bts_hardware(void)
951 {
952         int cpu;
953
954         if (!bts_available())
955                 return;
956
957         get_online_cpus();
958
959         for_each_online_cpu(cpu)
960                 fini_debug_store_on_cpu(cpu);
961
962         for_each_possible_cpu(cpu) {
963                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
964
965                 if (!ds)
966                         continue;
967
968                 per_cpu(cpu_hw_events, cpu).ds = NULL;
969
970                 kfree((void *)(unsigned long)ds->bts_buffer_base);
971                 kfree(ds);
972         }
973
974         put_online_cpus();
975 }
976
977 static int reserve_bts_hardware(void)
978 {
979         int cpu, err = 0;
980
981         if (!bts_available())
982                 return 0;
983
984         get_online_cpus();
985
986         for_each_possible_cpu(cpu) {
987                 struct debug_store *ds;
988                 void *buffer;
989
990                 err = -ENOMEM;
991                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
992                 if (unlikely(!buffer))
993                         break;
994
995                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
996                 if (unlikely(!ds)) {
997                         kfree(buffer);
998                         break;
999                 }
1000
1001                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
1002                 ds->bts_index = ds->bts_buffer_base;
1003                 ds->bts_absolute_maximum =
1004                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
1005                 ds->bts_interrupt_threshold =
1006                         ds->bts_absolute_maximum - BTS_OVFL_TH;
1007
1008                 per_cpu(cpu_hw_events, cpu).ds = ds;
1009                 err = 0;
1010         }
1011
1012         if (err)
1013                 release_bts_hardware();
1014         else {
1015                 for_each_online_cpu(cpu)
1016                         init_debug_store_on_cpu(cpu);
1017         }
1018
1019         put_online_cpus();
1020
1021         return err;
1022 }
1023
1024 static void hw_perf_event_destroy(struct perf_event *event)
1025 {
1026         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
1027                 release_pmc_hardware();
1028                 release_bts_hardware();
1029                 mutex_unlock(&pmc_reserve_mutex);
1030         }
1031 }
1032
1033 static inline int x86_pmu_initialized(void)
1034 {
1035         return x86_pmu.handle_irq != NULL;
1036 }
1037
1038 static inline int
1039 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
1040 {
1041         unsigned int cache_type, cache_op, cache_result;
1042         u64 config, val;
1043
1044         config = attr->config;
1045
1046         cache_type = (config >>  0) & 0xff;
1047         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1048                 return -EINVAL;
1049
1050         cache_op = (config >>  8) & 0xff;
1051         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1052                 return -EINVAL;
1053
1054         cache_result = (config >> 16) & 0xff;
1055         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1056                 return -EINVAL;
1057
1058         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
1059
1060         if (val == 0)
1061                 return -ENOENT;
1062
1063         if (val == -1)
1064                 return -EINVAL;
1065
1066         hwc->config |= val;
1067
1068         return 0;
1069 }
1070
1071 static void intel_pmu_enable_bts(u64 config)
1072 {
1073         unsigned long debugctlmsr;
1074
1075         debugctlmsr = get_debugctlmsr();
1076
1077         debugctlmsr |= X86_DEBUGCTL_TR;
1078         debugctlmsr |= X86_DEBUGCTL_BTS;
1079         debugctlmsr |= X86_DEBUGCTL_BTINT;
1080
1081         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
1082                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
1083
1084         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
1085                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
1086
1087         update_debugctlmsr(debugctlmsr);
1088 }
1089
1090 static void intel_pmu_disable_bts(void)
1091 {
1092         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1093         unsigned long debugctlmsr;
1094
1095         if (!cpuc->ds)
1096                 return;
1097
1098         debugctlmsr = get_debugctlmsr();
1099
1100         debugctlmsr &=
1101                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
1102                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
1103
1104         update_debugctlmsr(debugctlmsr);
1105 }
1106
1107 /*
1108  * Setup the hardware configuration for a given attr_type
1109  */
1110 static int __hw_perf_event_init(struct perf_event *event)
1111 {
1112         struct perf_event_attr *attr = &event->attr;
1113         struct hw_perf_event *hwc = &event->hw;
1114         u64 config;
1115         int err;
1116
1117         if (!x86_pmu_initialized())
1118                 return -ENODEV;
1119
1120         err = 0;
1121         if (!atomic_inc_not_zero(&active_events)) {
1122                 mutex_lock(&pmc_reserve_mutex);
1123                 if (atomic_read(&active_events) == 0) {
1124                         if (!reserve_pmc_hardware())
1125                                 err = -EBUSY;
1126                         else
1127                                 err = reserve_bts_hardware();
1128                 }
1129                 if (!err)
1130                         atomic_inc(&active_events);
1131                 mutex_unlock(&pmc_reserve_mutex);
1132         }
1133         if (err)
1134                 return err;
1135
1136         event->destroy = hw_perf_event_destroy;
1137
1138         /*
1139          * Generate PMC IRQs:
1140          * (keep 'enabled' bit clear for now)
1141          */
1142         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1143
1144         hwc->idx = -1;
1145
1146         /*
1147          * Count user and OS events unless requested not to.
1148          */
1149         if (!attr->exclude_user)
1150                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1151         if (!attr->exclude_kernel)
1152                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1153
1154         if (!hwc->sample_period) {
1155                 hwc->sample_period = x86_pmu.max_period;
1156                 hwc->last_period = hwc->sample_period;
1157                 atomic64_set(&hwc->period_left, hwc->sample_period);
1158         } else {
1159                 /*
1160                  * If we have a PMU initialized but no APIC
1161                  * interrupts, we cannot sample hardware
1162                  * events (user-space has to fall back and
1163                  * sample via a hrtimer based software event):
1164                  */
1165                 if (!x86_pmu.apic)
1166                         return -EOPNOTSUPP;
1167         }
1168
1169         /*
1170          * Raw hw_event type provide the config in the hw_event structure
1171          */
1172         if (attr->type == PERF_TYPE_RAW) {
1173                 hwc->config |= x86_pmu.raw_event(attr->config);
1174                 return 0;
1175         }
1176
1177         if (attr->type == PERF_TYPE_HW_CACHE)
1178                 return set_ext_hw_attr(hwc, attr);
1179
1180         if (attr->config >= x86_pmu.max_events)
1181                 return -EINVAL;
1182
1183         /*
1184          * The generic map:
1185          */
1186         config = x86_pmu.event_map(attr->config);
1187
1188         if (config == 0)
1189                 return -ENOENT;
1190
1191         if (config == -1LL)
1192                 return -EINVAL;
1193
1194         /*
1195          * Branch tracing:
1196          */
1197         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1198             (hwc->sample_period == 1)) {
1199                 /* BTS is not supported by this architecture. */
1200                 if (!bts_available())
1201                         return -EOPNOTSUPP;
1202
1203                 /* BTS is currently only allowed for user-mode. */
1204                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1205                         return -EOPNOTSUPP;
1206         }
1207
1208         hwc->config |= config;
1209
1210         return 0;
1211 }
1212
1213 static void p6_pmu_disable_all(void)
1214 {
1215         u64 val;
1216
1217         /* p6 only has one enable register */
1218         rdmsrl(MSR_P6_EVNTSEL0, val);
1219         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1220         wrmsrl(MSR_P6_EVNTSEL0, val);
1221 }
1222
1223 static void intel_pmu_disable_all(void)
1224 {
1225         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1226
1227         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1228
1229         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1230                 intel_pmu_disable_bts();
1231 }
1232
1233 static void x86_pmu_disable_all(void)
1234 {
1235         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1236         int idx;
1237
1238         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1239                 u64 val;
1240
1241                 if (!test_bit(idx, cpuc->active_mask))
1242                         continue;
1243                 rdmsrl(x86_pmu.eventsel + idx, val);
1244                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1245                         continue;
1246                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1247                 wrmsrl(x86_pmu.eventsel + idx, val);
1248         }
1249 }
1250
1251 void hw_perf_disable(void)
1252 {
1253         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1254
1255         if (!x86_pmu_initialized())
1256                 return;
1257
1258         if (!cpuc->enabled)
1259                 return;
1260
1261         cpuc->n_added = 0;
1262         cpuc->enabled = 0;
1263         barrier();
1264
1265         x86_pmu.disable_all();
1266 }
1267
1268 static void p6_pmu_enable_all(void)
1269 {
1270         unsigned long val;
1271
1272         /* p6 only has one enable register */
1273         rdmsrl(MSR_P6_EVNTSEL0, val);
1274         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1275         wrmsrl(MSR_P6_EVNTSEL0, val);
1276 }
1277
1278 static void intel_pmu_enable_all(void)
1279 {
1280         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1281
1282         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1283
1284         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1285                 struct perf_event *event =
1286                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1287
1288                 if (WARN_ON_ONCE(!event))
1289                         return;
1290
1291                 intel_pmu_enable_bts(event->hw.config);
1292         }
1293 }
1294
1295 static void x86_pmu_enable_all(void)
1296 {
1297         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1298         int idx;
1299
1300         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1301                 struct perf_event *event = cpuc->events[idx];
1302                 u64 val;
1303
1304                 if (!test_bit(idx, cpuc->active_mask))
1305                         continue;
1306
1307                 val = event->hw.config;
1308                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1309                 wrmsrl(x86_pmu.eventsel + idx, val);
1310         }
1311 }
1312
1313 static const struct pmu pmu;
1314
1315 static inline int is_x86_event(struct perf_event *event)
1316 {
1317         return event->pmu == &pmu;
1318 }
1319
1320 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1321 {
1322         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1323         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1324         int i, j, w, wmax, num = 0;
1325         struct hw_perf_event *hwc;
1326
1327         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1328
1329         for (i = 0; i < n; i++) {
1330                 constraints[i] =
1331                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1332         }
1333
1334         /*
1335          * fastpath, try to reuse previous register
1336          */
1337         for (i = 0; i < n; i++) {
1338                 hwc = &cpuc->event_list[i]->hw;
1339                 c = constraints[i];
1340
1341                 /* never assigned */
1342                 if (hwc->idx == -1)
1343                         break;
1344
1345                 /* constraint still honored */
1346                 if (!test_bit(hwc->idx, c->idxmsk))
1347                         break;
1348
1349                 /* not already used */
1350                 if (test_bit(hwc->idx, used_mask))
1351                         break;
1352
1353                 set_bit(hwc->idx, used_mask);
1354                 if (assign)
1355                         assign[i] = hwc->idx;
1356         }
1357         if (i == n)
1358                 goto done;
1359
1360         /*
1361          * begin slow path
1362          */
1363
1364         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1365
1366         /*
1367          * weight = number of possible counters
1368          *
1369          * 1    = most constrained, only works on one counter
1370          * wmax = least constrained, works on any counter
1371          *
1372          * assign events to counters starting with most
1373          * constrained events.
1374          */
1375         wmax = x86_pmu.num_events;
1376
1377         /*
1378          * when fixed event counters are present,
1379          * wmax is incremented by 1 to account
1380          * for one more choice
1381          */
1382         if (x86_pmu.num_events_fixed)
1383                 wmax++;
1384
1385         for (w = 1, num = n; num && w <= wmax; w++) {
1386                 /* for each event */
1387                 for (i = 0; num && i < n; i++) {
1388                         c = constraints[i];
1389                         hwc = &cpuc->event_list[i]->hw;
1390
1391                         if (c->weight != w)
1392                                 continue;
1393
1394                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1395                                 if (!test_bit(j, used_mask))
1396                                         break;
1397                         }
1398
1399                         if (j == X86_PMC_IDX_MAX)
1400                                 break;
1401
1402                         set_bit(j, used_mask);
1403
1404                         if (assign)
1405                                 assign[i] = j;
1406                         num--;
1407                 }
1408         }
1409 done:
1410         /*
1411          * scheduling failed or is just a simulation,
1412          * free resources if necessary
1413          */
1414         if (!assign || num) {
1415                 for (i = 0; i < n; i++) {
1416                         if (x86_pmu.put_event_constraints)
1417                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1418                 }
1419         }
1420         return num ? -ENOSPC : 0;
1421 }
1422
1423 /*
1424  * dogrp: true if must collect siblings events (group)
1425  * returns total number of events and error code
1426  */
1427 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1428 {
1429         struct perf_event *event;
1430         int n, max_count;
1431
1432         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1433
1434         /* current number of events already accepted */
1435         n = cpuc->n_events;
1436
1437         if (is_x86_event(leader)) {
1438                 if (n >= max_count)
1439                         return -ENOSPC;
1440                 cpuc->event_list[n] = leader;
1441                 n++;
1442         }
1443         if (!dogrp)
1444                 return n;
1445
1446         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1447                 if (!is_x86_event(event) ||
1448                     event->state <= PERF_EVENT_STATE_OFF)
1449                         continue;
1450
1451                 if (n >= max_count)
1452                         return -ENOSPC;
1453
1454                 cpuc->event_list[n] = event;
1455                 n++;
1456         }
1457         return n;
1458 }
1459
1460
1461 static inline void x86_assign_hw_event(struct perf_event *event,
1462                                 struct hw_perf_event *hwc, int idx)
1463 {
1464         hwc->idx = idx;
1465
1466         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1467                 hwc->config_base = 0;
1468                 hwc->event_base = 0;
1469         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1470                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1471                 /*
1472                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1473                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1474                  */
1475                 hwc->event_base =
1476                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1477         } else {
1478                 hwc->config_base = x86_pmu.eventsel;
1479                 hwc->event_base  = x86_pmu.perfctr;
1480         }
1481 }
1482
1483 static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
1484
1485 void hw_perf_enable(void)
1486 {
1487         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1488         struct perf_event *event;
1489         struct hw_perf_event *hwc;
1490         int i;
1491
1492         if (!x86_pmu_initialized())
1493                 return;
1494
1495         if (cpuc->enabled)
1496                 return;
1497
1498         if (cpuc->n_added) {
1499                 /*
1500                  * apply assignment obtained either from
1501                  * hw_perf_group_sched_in() or x86_pmu_enable()
1502                  *
1503                  * step1: save events moving to new counters
1504                  * step2: reprogram moved events into new counters
1505                  */
1506                 for (i = 0; i < cpuc->n_events; i++) {
1507
1508                         event = cpuc->event_list[i];
1509                         hwc = &event->hw;
1510
1511                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1512                                 continue;
1513
1514                         __x86_pmu_disable(event, cpuc);
1515
1516                         hwc->idx = -1;
1517                 }
1518
1519                 for (i = 0; i < cpuc->n_events; i++) {
1520
1521                         event = cpuc->event_list[i];
1522                         hwc = &event->hw;
1523
1524                         if (hwc->idx == -1) {
1525                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1526                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1527                         }
1528                         /*
1529                          * need to mark as active because x86_pmu_disable()
1530                          * clear active_mask and eventsp[] yet it preserves
1531                          * idx
1532                          */
1533                         set_bit(hwc->idx, cpuc->active_mask);
1534                         cpuc->events[hwc->idx] = event;
1535
1536                         x86_pmu.enable(hwc, hwc->idx);
1537                         perf_event_update_userpage(event);
1538                 }
1539                 cpuc->n_added = 0;
1540                 perf_events_lapic_init();
1541         }
1542
1543         cpuc->enabled = 1;
1544         barrier();
1545
1546         x86_pmu.enable_all();
1547 }
1548
1549 static inline u64 intel_pmu_get_status(void)
1550 {
1551         u64 status;
1552
1553         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1554
1555         return status;
1556 }
1557
1558 static inline void intel_pmu_ack_status(u64 ack)
1559 {
1560         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1561 }
1562
1563 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1564 {
1565         (void)checking_wrmsrl(hwc->config_base + idx,
1566                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1567 }
1568
1569 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1570 {
1571         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1572 }
1573
1574 static inline void
1575 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1576 {
1577         int idx = __idx - X86_PMC_IDX_FIXED;
1578         u64 ctrl_val, mask;
1579
1580         mask = 0xfULL << (idx * 4);
1581
1582         rdmsrl(hwc->config_base, ctrl_val);
1583         ctrl_val &= ~mask;
1584         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1585 }
1586
1587 static inline void
1588 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1589 {
1590         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1591         u64 val = P6_NOP_EVENT;
1592
1593         if (cpuc->enabled)
1594                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1595
1596         (void)checking_wrmsrl(hwc->config_base + idx, val);
1597 }
1598
1599 static inline void
1600 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1601 {
1602         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1603                 intel_pmu_disable_bts();
1604                 return;
1605         }
1606
1607         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1608                 intel_pmu_disable_fixed(hwc, idx);
1609                 return;
1610         }
1611
1612         x86_pmu_disable_event(hwc, idx);
1613 }
1614
1615 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1616
1617 /*
1618  * Set the next IRQ period, based on the hwc->period_left value.
1619  * To be called with the event disabled in hw:
1620  */
1621 static int
1622 x86_perf_event_set_period(struct perf_event *event,
1623                              struct hw_perf_event *hwc, int idx)
1624 {
1625         s64 left = atomic64_read(&hwc->period_left);
1626         s64 period = hwc->sample_period;
1627         int err, ret = 0;
1628
1629         if (idx == X86_PMC_IDX_FIXED_BTS)
1630                 return 0;
1631
1632         /*
1633          * If we are way outside a reasonable range then just skip forward:
1634          */
1635         if (unlikely(left <= -period)) {
1636                 left = period;
1637                 atomic64_set(&hwc->period_left, left);
1638                 hwc->last_period = period;
1639                 ret = 1;
1640         }
1641
1642         if (unlikely(left <= 0)) {
1643                 left += period;
1644                 atomic64_set(&hwc->period_left, left);
1645                 hwc->last_period = period;
1646                 ret = 1;
1647         }
1648         /*
1649          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1650          */
1651         if (unlikely(left < 2))
1652                 left = 2;
1653
1654         if (left > x86_pmu.max_period)
1655                 left = x86_pmu.max_period;
1656
1657         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1658
1659         /*
1660          * The hw event starts counting from this event offset,
1661          * mark it to be able to extra future deltas:
1662          */
1663         atomic64_set(&hwc->prev_count, (u64)-left);
1664
1665         err = checking_wrmsrl(hwc->event_base + idx,
1666                              (u64)(-left) & x86_pmu.event_mask);
1667
1668         perf_event_update_userpage(event);
1669
1670         return ret;
1671 }
1672
1673 static inline void
1674 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1675 {
1676         int idx = __idx - X86_PMC_IDX_FIXED;
1677         u64 ctrl_val, bits, mask;
1678         int err;
1679
1680         /*
1681          * Enable IRQ generation (0x8),
1682          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1683          * if requested:
1684          */
1685         bits = 0x8ULL;
1686         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1687                 bits |= 0x2;
1688         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1689                 bits |= 0x1;
1690
1691         /*
1692          * ANY bit is supported in v3 and up
1693          */
1694         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1695                 bits |= 0x4;
1696
1697         bits <<= (idx * 4);
1698         mask = 0xfULL << (idx * 4);
1699
1700         rdmsrl(hwc->config_base, ctrl_val);
1701         ctrl_val &= ~mask;
1702         ctrl_val |= bits;
1703         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1704 }
1705
1706 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1707 {
1708         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1709         u64 val;
1710
1711         val = hwc->config;
1712         if (cpuc->enabled)
1713                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1714
1715         (void)checking_wrmsrl(hwc->config_base + idx, val);
1716 }
1717
1718
1719 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1720 {
1721         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1722                 if (!__get_cpu_var(cpu_hw_events).enabled)
1723                         return;
1724
1725                 intel_pmu_enable_bts(hwc->config);
1726                 return;
1727         }
1728
1729         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1730                 intel_pmu_enable_fixed(hwc, idx);
1731                 return;
1732         }
1733
1734         __x86_pmu_enable_event(hwc, idx);
1735 }
1736
1737 static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1738 {
1739         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1740         if (cpuc->enabled)
1741                 __x86_pmu_enable_event(hwc, idx);
1742 }
1743
1744 /*
1745  * activate a single event
1746  *
1747  * The event is added to the group of enabled events
1748  * but only if it can be scehduled with existing events.
1749  *
1750  * Called with PMU disabled. If successful and return value 1,
1751  * then guaranteed to call perf_enable() and hw_perf_enable()
1752  */
1753 static int x86_pmu_enable(struct perf_event *event)
1754 {
1755         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1756         struct hw_perf_event *hwc;
1757         int assign[X86_PMC_IDX_MAX];
1758         int n, n0, ret;
1759
1760         hwc = &event->hw;
1761
1762         n0 = cpuc->n_events;
1763         n = collect_events(cpuc, event, false);
1764         if (n < 0)
1765                 return n;
1766
1767         ret = x86_schedule_events(cpuc, n, assign);
1768         if (ret)
1769                 return ret;
1770         /*
1771          * copy new assignment, now we know it is possible
1772          * will be used by hw_perf_enable()
1773          */
1774         memcpy(cpuc->assign, assign, n*sizeof(int));
1775
1776         cpuc->n_events = n;
1777         cpuc->n_added  = n - n0;
1778
1779         return 0;
1780 }
1781
1782 static void x86_pmu_unthrottle(struct perf_event *event)
1783 {
1784         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1785         struct hw_perf_event *hwc = &event->hw;
1786
1787         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1788                                 cpuc->events[hwc->idx] != event))
1789                 return;
1790
1791         x86_pmu.enable(hwc, hwc->idx);
1792 }
1793
1794 void perf_event_print_debug(void)
1795 {
1796         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1797         struct cpu_hw_events *cpuc;
1798         unsigned long flags;
1799         int cpu, idx;
1800
1801         if (!x86_pmu.num_events)
1802                 return;
1803
1804         local_irq_save(flags);
1805
1806         cpu = smp_processor_id();
1807         cpuc = &per_cpu(cpu_hw_events, cpu);
1808
1809         if (x86_pmu.version >= 2) {
1810                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1811                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1812                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1813                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1814
1815                 pr_info("\n");
1816                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1817                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1818                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1819                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1820         }
1821         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1822
1823         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1824                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1825                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1826
1827                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1828
1829                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1830                         cpu, idx, pmc_ctrl);
1831                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1832                         cpu, idx, pmc_count);
1833                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1834                         cpu, idx, prev_left);
1835         }
1836         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1837                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1838
1839                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1840                         cpu, idx, pmc_count);
1841         }
1842         local_irq_restore(flags);
1843 }
1844
1845 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1846 {
1847         struct debug_store *ds = cpuc->ds;
1848         struct bts_record {
1849                 u64     from;
1850                 u64     to;
1851                 u64     flags;
1852         };
1853         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1854         struct bts_record *at, *top;
1855         struct perf_output_handle handle;
1856         struct perf_event_header header;
1857         struct perf_sample_data data;
1858         struct pt_regs regs;
1859
1860         if (!event)
1861                 return;
1862
1863         if (!ds)
1864                 return;
1865
1866         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1867         top = (struct bts_record *)(unsigned long)ds->bts_index;
1868
1869         if (top <= at)
1870                 return;
1871
1872         ds->bts_index = ds->bts_buffer_base;
1873
1874
1875         data.period     = event->hw.last_period;
1876         data.addr       = 0;
1877         data.raw        = NULL;
1878         regs.ip         = 0;
1879
1880         /*
1881          * Prepare a generic sample, i.e. fill in the invariant fields.
1882          * We will overwrite the from and to address before we output
1883          * the sample.
1884          */
1885         perf_prepare_sample(&header, &data, event, &regs);
1886
1887         if (perf_output_begin(&handle, event,
1888                               header.size * (top - at), 1, 1))
1889                 return;
1890
1891         for (; at < top; at++) {
1892                 data.ip         = at->from;
1893                 data.addr       = at->to;
1894
1895                 perf_output_sample(&handle, &header, &data, event);
1896         }
1897
1898         perf_output_end(&handle);
1899
1900         /* There's new data available. */
1901         event->hw.interrupts++;
1902         event->pending_kill = POLL_IN;
1903 }
1904
1905 static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc)
1906 {
1907         struct hw_perf_event *hwc = &event->hw;
1908         int idx = hwc->idx;
1909
1910         /*
1911          * Must be done before we disable, otherwise the nmi handler
1912          * could reenable again:
1913          */
1914         clear_bit(idx, cpuc->active_mask);
1915         x86_pmu.disable(hwc, idx);
1916
1917         /*
1918          * Drain the remaining delta count out of a event
1919          * that we are disabling:
1920          */
1921         x86_perf_event_update(event, hwc, idx);
1922
1923         /* Drain the remaining BTS records. */
1924         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1925                 intel_pmu_drain_bts_buffer(cpuc);
1926
1927         cpuc->events[idx] = NULL;
1928 }
1929
1930 static void x86_pmu_disable(struct perf_event *event)
1931 {
1932         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1933         int i;
1934
1935         __x86_pmu_disable(event, cpuc);
1936
1937         for (i = 0; i < cpuc->n_events; i++) {
1938                 if (event == cpuc->event_list[i]) {
1939
1940                         if (x86_pmu.put_event_constraints)
1941                                 x86_pmu.put_event_constraints(cpuc, event);
1942
1943                         while (++i < cpuc->n_events)
1944                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1945
1946                         --cpuc->n_events;
1947                         break;
1948                 }
1949         }
1950         perf_event_update_userpage(event);
1951 }
1952
1953 /*
1954  * Save and restart an expired event. Called by NMI contexts,
1955  * so it has to be careful about preempting normal event ops:
1956  */
1957 static int intel_pmu_save_and_restart(struct perf_event *event)
1958 {
1959         struct hw_perf_event *hwc = &event->hw;
1960         int idx = hwc->idx;
1961         int ret;
1962
1963         x86_perf_event_update(event, hwc, idx);
1964         ret = x86_perf_event_set_period(event, hwc, idx);
1965
1966         if (event->state == PERF_EVENT_STATE_ACTIVE)
1967                 intel_pmu_enable_event(hwc, idx);
1968
1969         return ret;
1970 }
1971
1972 static void intel_pmu_reset(void)
1973 {
1974         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1975         unsigned long flags;
1976         int idx;
1977
1978         if (!x86_pmu.num_events)
1979                 return;
1980
1981         local_irq_save(flags);
1982
1983         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1984
1985         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1986                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1987                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1988         }
1989         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1990                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1991         }
1992         if (ds)
1993                 ds->bts_index = ds->bts_buffer_base;
1994
1995         local_irq_restore(flags);
1996 }
1997
1998 /*
1999  * This handler is triggered by the local APIC, so the APIC IRQ handling
2000  * rules apply:
2001  */
2002 static int intel_pmu_handle_irq(struct pt_regs *regs)
2003 {
2004         struct perf_sample_data data;
2005         struct cpu_hw_events *cpuc;
2006         int bit, loops;
2007         u64 ack, status;
2008
2009         data.addr = 0;
2010         data.raw = NULL;
2011
2012         cpuc = &__get_cpu_var(cpu_hw_events);
2013
2014         perf_disable();
2015         intel_pmu_drain_bts_buffer(cpuc);
2016         status = intel_pmu_get_status();
2017         if (!status) {
2018                 perf_enable();
2019                 return 0;
2020         }
2021
2022         loops = 0;
2023 again:
2024         if (++loops > 100) {
2025                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2026                 perf_event_print_debug();
2027                 intel_pmu_reset();
2028                 perf_enable();
2029                 return 1;
2030         }
2031
2032         inc_irq_stat(apic_perf_irqs);
2033         ack = status;
2034         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2035                 struct perf_event *event = cpuc->events[bit];
2036
2037                 clear_bit(bit, (unsigned long *) &status);
2038                 if (!test_bit(bit, cpuc->active_mask))
2039                         continue;
2040
2041                 if (!intel_pmu_save_and_restart(event))
2042                         continue;
2043
2044                 data.period = event->hw.last_period;
2045
2046                 if (perf_event_overflow(event, 1, &data, regs))
2047                         intel_pmu_disable_event(&event->hw, bit);
2048         }
2049
2050         intel_pmu_ack_status(ack);
2051
2052         /*
2053          * Repeat if there is more work to be done:
2054          */
2055         status = intel_pmu_get_status();
2056         if (status)
2057                 goto again;
2058
2059         perf_enable();
2060
2061         return 1;
2062 }
2063
2064 static int x86_pmu_handle_irq(struct pt_regs *regs)
2065 {
2066         struct perf_sample_data data;
2067         struct cpu_hw_events *cpuc;
2068         struct perf_event *event;
2069         struct hw_perf_event *hwc;
2070         int idx, handled = 0;
2071         u64 val;
2072
2073         data.addr = 0;
2074         data.raw = NULL;
2075
2076         cpuc = &__get_cpu_var(cpu_hw_events);
2077
2078         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2079                 if (!test_bit(idx, cpuc->active_mask))
2080                         continue;
2081
2082                 event = cpuc->events[idx];
2083                 hwc = &event->hw;
2084
2085                 val = x86_perf_event_update(event, hwc, idx);
2086                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2087                         continue;
2088
2089                 /*
2090                  * event overflow
2091                  */
2092                 handled         = 1;
2093                 data.period     = event->hw.last_period;
2094
2095                 if (!x86_perf_event_set_period(event, hwc, idx))
2096                         continue;
2097
2098                 if (perf_event_overflow(event, 1, &data, regs))
2099                         x86_pmu.disable(hwc, idx);
2100         }
2101
2102         if (handled)
2103                 inc_irq_stat(apic_perf_irqs);
2104
2105         return handled;
2106 }
2107
2108 void smp_perf_pending_interrupt(struct pt_regs *regs)
2109 {
2110         irq_enter();
2111         ack_APIC_irq();
2112         inc_irq_stat(apic_pending_irqs);
2113         perf_event_do_pending();
2114         irq_exit();
2115 }
2116
2117 void set_perf_event_pending(void)
2118 {
2119 #ifdef CONFIG_X86_LOCAL_APIC
2120         if (!x86_pmu.apic || !x86_pmu_initialized())
2121                 return;
2122
2123         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2124 #endif
2125 }
2126
2127 void perf_events_lapic_init(void)
2128 {
2129 #ifdef CONFIG_X86_LOCAL_APIC
2130         if (!x86_pmu.apic || !x86_pmu_initialized())
2131                 return;
2132
2133         /*
2134          * Always use NMI for PMU
2135          */
2136         apic_write(APIC_LVTPC, APIC_DM_NMI);
2137 #endif
2138 }
2139
2140 static int __kprobes
2141 perf_event_nmi_handler(struct notifier_block *self,
2142                          unsigned long cmd, void *__args)
2143 {
2144         struct die_args *args = __args;
2145         struct pt_regs *regs;
2146
2147         if (!atomic_read(&active_events))
2148                 return NOTIFY_DONE;
2149
2150         switch (cmd) {
2151         case DIE_NMI:
2152         case DIE_NMI_IPI:
2153                 break;
2154
2155         default:
2156                 return NOTIFY_DONE;
2157         }
2158
2159         regs = args->regs;
2160
2161 #ifdef CONFIG_X86_LOCAL_APIC
2162         apic_write(APIC_LVTPC, APIC_DM_NMI);
2163 #endif
2164         /*
2165          * Can't rely on the handled return value to say it was our NMI, two
2166          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2167          *
2168          * If the first NMI handles both, the latter will be empty and daze
2169          * the CPU.
2170          */
2171         x86_pmu.handle_irq(regs);
2172
2173         return NOTIFY_STOP;
2174 }
2175
2176 static struct event_constraint unconstrained;
2177
2178 static struct event_constraint bts_constraint =
2179         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2180
2181 static struct event_constraint *
2182 intel_special_constraints(struct perf_event *event)
2183 {
2184         unsigned int hw_event;
2185
2186         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2187
2188         if (unlikely((hw_event ==
2189                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2190                      (event->hw.sample_period == 1))) {
2191
2192                 return &bts_constraint;
2193         }
2194         return NULL;
2195 }
2196
2197 static struct event_constraint *
2198 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2199 {
2200         struct event_constraint *c;
2201
2202         c = intel_special_constraints(event);
2203         if (c)
2204                 return c;
2205
2206         if (x86_pmu.event_constraints) {
2207                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2208                         if ((event->hw.config & c->cmask) == c->code)
2209                                 return c;
2210                 }
2211         }
2212
2213         return &unconstrained;
2214 }
2215
2216 static struct event_constraint *
2217 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2218 {
2219         return &unconstrained;
2220 }
2221
2222 static int x86_event_sched_in(struct perf_event *event,
2223                           struct perf_cpu_context *cpuctx, int cpu)
2224 {
2225         int ret = 0;
2226
2227         event->state = PERF_EVENT_STATE_ACTIVE;
2228         event->oncpu = cpu;
2229         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2230
2231         if (!is_x86_event(event))
2232                 ret = event->pmu->enable(event);
2233
2234         if (!ret && !is_software_event(event))
2235                 cpuctx->active_oncpu++;
2236
2237         if (!ret && event->attr.exclusive)
2238                 cpuctx->exclusive = 1;
2239
2240         return ret;
2241 }
2242
2243 static void x86_event_sched_out(struct perf_event *event,
2244                             struct perf_cpu_context *cpuctx, int cpu)
2245 {
2246         event->state = PERF_EVENT_STATE_INACTIVE;
2247         event->oncpu = -1;
2248
2249         if (!is_x86_event(event))
2250                 event->pmu->disable(event);
2251
2252         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2253
2254         if (!is_software_event(event))
2255                 cpuctx->active_oncpu--;
2256
2257         if (event->attr.exclusive || !cpuctx->active_oncpu)
2258                 cpuctx->exclusive = 0;
2259 }
2260
2261 /*
2262  * Called to enable a whole group of events.
2263  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2264  * Assumes the caller has disabled interrupts and has
2265  * frozen the PMU with hw_perf_save_disable.
2266  *
2267  * called with PMU disabled. If successful and return value 1,
2268  * then guaranteed to call perf_enable() and hw_perf_enable()
2269  */
2270 int hw_perf_group_sched_in(struct perf_event *leader,
2271                struct perf_cpu_context *cpuctx,
2272                struct perf_event_context *ctx, int cpu)
2273 {
2274         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2275         struct perf_event *sub;
2276         int assign[X86_PMC_IDX_MAX];
2277         int n0, n1, ret;
2278
2279         /* n0 = total number of events */
2280         n0 = collect_events(cpuc, leader, true);
2281         if (n0 < 0)
2282                 return n0;
2283
2284         ret = x86_schedule_events(cpuc, n0, assign);
2285         if (ret)
2286                 return ret;
2287
2288         ret = x86_event_sched_in(leader, cpuctx, cpu);
2289         if (ret)
2290                 return ret;
2291
2292         n1 = 1;
2293         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2294                 if (sub->state > PERF_EVENT_STATE_OFF) {
2295                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2296                         if (ret)
2297                                 goto undo;
2298                         ++n1;
2299                 }
2300         }
2301         /*
2302          * copy new assignment, now we know it is possible
2303          * will be used by hw_perf_enable()
2304          */
2305         memcpy(cpuc->assign, assign, n0*sizeof(int));
2306
2307         cpuc->n_events  = n0;
2308         cpuc->n_added   = n1;
2309         ctx->nr_active += n1;
2310
2311         /*
2312          * 1 means successful and events are active
2313          * This is not quite true because we defer
2314          * actual activation until hw_perf_enable() but
2315          * this way we* ensure caller won't try to enable
2316          * individual events
2317          */
2318         return 1;
2319 undo:
2320         x86_event_sched_out(leader, cpuctx, cpu);
2321         n0  = 1;
2322         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2323                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2324                         x86_event_sched_out(sub, cpuctx, cpu);
2325                         if (++n0 == n1)
2326                                 break;
2327                 }
2328         }
2329         return ret;
2330 }
2331
2332 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2333         .notifier_call          = perf_event_nmi_handler,
2334         .next                   = NULL,
2335         .priority               = 1
2336 };
2337
2338 static __initconst struct x86_pmu p6_pmu = {
2339         .name                   = "p6",
2340         .handle_irq             = x86_pmu_handle_irq,
2341         .disable_all            = p6_pmu_disable_all,
2342         .enable_all             = p6_pmu_enable_all,
2343         .enable                 = p6_pmu_enable_event,
2344         .disable                = p6_pmu_disable_event,
2345         .eventsel               = MSR_P6_EVNTSEL0,
2346         .perfctr                = MSR_P6_PERFCTR0,
2347         .event_map              = p6_pmu_event_map,
2348         .raw_event              = p6_pmu_raw_event,
2349         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2350         .apic                   = 1,
2351         .max_period             = (1ULL << 31) - 1,
2352         .version                = 0,
2353         .num_events             = 2,
2354         /*
2355          * Events have 40 bits implemented. However they are designed such
2356          * that bits [32-39] are sign extensions of bit 31. As such the
2357          * effective width of a event for P6-like PMU is 32 bits only.
2358          *
2359          * See IA-32 Intel Architecture Software developer manual Vol 3B
2360          */
2361         .event_bits             = 32,
2362         .event_mask             = (1ULL << 32) - 1,
2363         .get_event_constraints  = intel_get_event_constraints,
2364         .event_constraints      = intel_p6_event_constraints
2365 };
2366
2367 static __initconst struct x86_pmu core_pmu = {
2368         .name                   = "core",
2369         .handle_irq             = x86_pmu_handle_irq,
2370         .disable_all            = x86_pmu_disable_all,
2371         .enable_all             = x86_pmu_enable_all,
2372         .enable                 = x86_pmu_enable_event,
2373         .disable                = x86_pmu_disable_event,
2374         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2375         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2376         .event_map              = intel_pmu_event_map,
2377         .raw_event              = intel_pmu_raw_event,
2378         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2379         .apic                   = 1,
2380         /*
2381          * Intel PMCs cannot be accessed sanely above 32 bit width,
2382          * so we install an artificial 1<<31 period regardless of
2383          * the generic event period:
2384          */
2385         .max_period             = (1ULL << 31) - 1,
2386         .get_event_constraints  = intel_get_event_constraints,
2387         .event_constraints      = intel_core_event_constraints,
2388 };
2389
2390 static __initconst struct x86_pmu intel_pmu = {
2391         .name                   = "Intel",
2392         .handle_irq             = intel_pmu_handle_irq,
2393         .disable_all            = intel_pmu_disable_all,
2394         .enable_all             = intel_pmu_enable_all,
2395         .enable                 = intel_pmu_enable_event,
2396         .disable                = intel_pmu_disable_event,
2397         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2398         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2399         .event_map              = intel_pmu_event_map,
2400         .raw_event              = intel_pmu_raw_event,
2401         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2402         .apic                   = 1,
2403         /*
2404          * Intel PMCs cannot be accessed sanely above 32 bit width,
2405          * so we install an artificial 1<<31 period regardless of
2406          * the generic event period:
2407          */
2408         .max_period             = (1ULL << 31) - 1,
2409         .enable_bts             = intel_pmu_enable_bts,
2410         .disable_bts            = intel_pmu_disable_bts,
2411         .get_event_constraints  = intel_get_event_constraints
2412 };
2413
2414 static __initconst struct x86_pmu amd_pmu = {
2415         .name                   = "AMD",
2416         .handle_irq             = x86_pmu_handle_irq,
2417         .disable_all            = x86_pmu_disable_all,
2418         .enable_all             = x86_pmu_enable_all,
2419         .enable                 = x86_pmu_enable_event,
2420         .disable                = x86_pmu_disable_event,
2421         .eventsel               = MSR_K7_EVNTSEL0,
2422         .perfctr                = MSR_K7_PERFCTR0,
2423         .event_map              = amd_pmu_event_map,
2424         .raw_event              = amd_pmu_raw_event,
2425         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2426         .num_events             = 4,
2427         .event_bits             = 48,
2428         .event_mask             = (1ULL << 48) - 1,
2429         .apic                   = 1,
2430         /* use highest bit to detect overflow */
2431         .max_period             = (1ULL << 47) - 1,
2432         .get_event_constraints  = amd_get_event_constraints
2433 };
2434
2435 static __init int p6_pmu_init(void)
2436 {
2437         switch (boot_cpu_data.x86_model) {
2438         case 1:
2439         case 3:  /* Pentium Pro */
2440         case 5:
2441         case 6:  /* Pentium II */
2442         case 7:
2443         case 8:
2444         case 11: /* Pentium III */
2445         case 9:
2446         case 13:
2447                 /* Pentium M */
2448                 break;
2449         default:
2450                 pr_cont("unsupported p6 CPU model %d ",
2451                         boot_cpu_data.x86_model);
2452                 return -ENODEV;
2453         }
2454
2455         x86_pmu = p6_pmu;
2456
2457         return 0;
2458 }
2459
2460 static __init int intel_pmu_init(void)
2461 {
2462         union cpuid10_edx edx;
2463         union cpuid10_eax eax;
2464         unsigned int unused;
2465         unsigned int ebx;
2466         int version;
2467
2468         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2469                 /* check for P6 processor family */
2470            if (boot_cpu_data.x86 == 6) {
2471                 return p6_pmu_init();
2472            } else {
2473                 return -ENODEV;
2474            }
2475         }
2476
2477         /*
2478          * Check whether the Architectural PerfMon supports
2479          * Branch Misses Retired hw_event or not.
2480          */
2481         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2482         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2483                 return -ENODEV;
2484
2485         version = eax.split.version_id;
2486         if (version < 2)
2487                 x86_pmu = core_pmu;
2488         else
2489                 x86_pmu = intel_pmu;
2490
2491         x86_pmu.version                 = version;
2492         x86_pmu.num_events              = eax.split.num_events;
2493         x86_pmu.event_bits              = eax.split.bit_width;
2494         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2495
2496         /*
2497          * Quirk: v2 perfmon does not report fixed-purpose events, so
2498          * assume at least 3 events:
2499          */
2500         if (version > 1)
2501                 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2502
2503         /*
2504          * Install the hw-cache-events table:
2505          */
2506         switch (boot_cpu_data.x86_model) {
2507         case 14: /* 65 nm core solo/duo, "Yonah" */
2508                 pr_cont("Core events, ");
2509                 break;
2510
2511         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2512         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2513         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2514         case 29: /* six-core 45 nm xeon "Dunnington" */
2515                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2516                        sizeof(hw_cache_event_ids));
2517
2518                 x86_pmu.event_constraints = intel_core2_event_constraints;
2519                 pr_cont("Core2 events, ");
2520                 break;
2521
2522         case 26: /* 45 nm nehalem, "Bloomfield" */
2523         case 30: /* 45 nm nehalem, "Lynnfield" */
2524                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2525                        sizeof(hw_cache_event_ids));
2526
2527                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2528                 pr_cont("Nehalem/Corei7 events, ");
2529                 break;
2530         case 28:
2531                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2532                        sizeof(hw_cache_event_ids));
2533
2534                 x86_pmu.event_constraints = intel_gen_event_constraints;
2535                 pr_cont("Atom events, ");
2536                 break;
2537
2538         case 37: /* 32 nm nehalem, "Clarkdale" */
2539         case 44: /* 32 nm nehalem, "Gulftown" */
2540                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2541                        sizeof(hw_cache_event_ids));
2542
2543                 x86_pmu.event_constraints = intel_westmere_event_constraints;
2544                 pr_cont("Westmere events, ");
2545                 break;
2546         default:
2547                 /*
2548                  * default constraints for v2 and up
2549                  */
2550                 x86_pmu.event_constraints = intel_gen_event_constraints;
2551                 pr_cont("generic architected perfmon, ");
2552         }
2553         return 0;
2554 }
2555
2556 static __init int amd_pmu_init(void)
2557 {
2558         /* Performance-monitoring supported from K7 and later: */
2559         if (boot_cpu_data.x86 < 6)
2560                 return -ENODEV;
2561
2562         x86_pmu = amd_pmu;
2563
2564         /* Events are common for all AMDs */
2565         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2566                sizeof(hw_cache_event_ids));
2567
2568         return 0;
2569 }
2570
2571 static void __init pmu_check_apic(void)
2572 {
2573         if (cpu_has_apic)
2574                 return;
2575
2576         x86_pmu.apic = 0;
2577         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2578         pr_info("no hardware sampling interrupt available.\n");
2579 }
2580
2581 void __init init_hw_perf_events(void)
2582 {
2583         int err;
2584
2585         pr_info("Performance Events: ");
2586
2587         switch (boot_cpu_data.x86_vendor) {
2588         case X86_VENDOR_INTEL:
2589                 err = intel_pmu_init();
2590                 break;
2591         case X86_VENDOR_AMD:
2592                 err = amd_pmu_init();
2593                 break;
2594         default:
2595                 return;
2596         }
2597         if (err != 0) {
2598                 pr_cont("no PMU driver, software events only.\n");
2599                 return;
2600         }
2601
2602         pmu_check_apic();
2603
2604         pr_cont("%s PMU driver.\n", x86_pmu.name);
2605
2606         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2607                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2608                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2609                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2610         }
2611         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2612         perf_max_events = x86_pmu.num_events;
2613
2614         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2615                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2616                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2617                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2618         }
2619
2620         perf_event_mask |=
2621                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2622         x86_pmu.intel_ctrl = perf_event_mask;
2623
2624         perf_events_lapic_init();
2625         register_die_notifier(&perf_event_nmi_notifier);
2626
2627         unconstrained = (struct event_constraint)
2628                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
2629                                    0, x86_pmu.num_events);
2630
2631         pr_info("... version:                %d\n",     x86_pmu.version);
2632         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2633         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2634         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2635         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2636         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2637         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2638 }
2639
2640 static inline void x86_pmu_read(struct perf_event *event)
2641 {
2642         x86_perf_event_update(event, &event->hw, event->hw.idx);
2643 }
2644
2645 static const struct pmu pmu = {
2646         .enable         = x86_pmu_enable,
2647         .disable        = x86_pmu_disable,
2648         .read           = x86_pmu_read,
2649         .unthrottle     = x86_pmu_unthrottle,
2650 };
2651
2652 /*
2653  * validate a single event group
2654  *
2655  * validation include:
2656  *      - check events are compatible which each other
2657  *      - events do not compete for the same counter
2658  *      - number of events <= number of counters
2659  *
2660  * validation ensures the group can be loaded onto the
2661  * PMU if it was the only group available.
2662  */
2663 static int validate_group(struct perf_event *event)
2664 {
2665         struct perf_event *leader = event->group_leader;
2666         struct cpu_hw_events *fake_cpuc;
2667         int ret, n;
2668
2669         ret = -ENOMEM;
2670         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2671         if (!fake_cpuc)
2672                 goto out;
2673
2674         /*
2675          * the event is not yet connected with its
2676          * siblings therefore we must first collect
2677          * existing siblings, then add the new event
2678          * before we can simulate the scheduling
2679          */
2680         ret = -ENOSPC;
2681         n = collect_events(fake_cpuc, leader, true);
2682         if (n < 0)
2683                 goto out_free;
2684
2685         fake_cpuc->n_events = n;
2686         n = collect_events(fake_cpuc, event, false);
2687         if (n < 0)
2688                 goto out_free;
2689
2690         fake_cpuc->n_events = n;
2691
2692         ret = x86_schedule_events(fake_cpuc, n, NULL);
2693
2694 out_free:
2695         kfree(fake_cpuc);
2696 out:
2697         return ret;
2698 }
2699
2700 const struct pmu *hw_perf_event_init(struct perf_event *event)
2701 {
2702         const struct pmu *tmp;
2703         int err;
2704
2705         err = __hw_perf_event_init(event);
2706         if (!err) {
2707                 /*
2708                  * we temporarily connect event to its pmu
2709                  * such that validate_group() can classify
2710                  * it as an x86 event using is_x86_event()
2711                  */
2712                 tmp = event->pmu;
2713                 event->pmu = &pmu;
2714
2715                 if (event->group_leader != event)
2716                         err = validate_group(event);
2717
2718                 event->pmu = tmp;
2719         }
2720         if (err) {
2721                 if (event->destroy)
2722                         event->destroy(event);
2723                 return ERR_PTR(err);
2724         }
2725
2726         return &pmu;
2727 }
2728
2729 /*
2730  * callchain support
2731  */
2732
2733 static inline
2734 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2735 {
2736         if (entry->nr < PERF_MAX_STACK_DEPTH)
2737                 entry->ip[entry->nr++] = ip;
2738 }
2739
2740 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2741 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2742
2743
2744 static void
2745 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2746 {
2747         /* Ignore warnings */
2748 }
2749
2750 static void backtrace_warning(void *data, char *msg)
2751 {
2752         /* Ignore warnings */
2753 }
2754
2755 static int backtrace_stack(void *data, char *name)
2756 {
2757         return 0;
2758 }
2759
2760 static void backtrace_address(void *data, unsigned long addr, int reliable)
2761 {
2762         struct perf_callchain_entry *entry = data;
2763
2764         if (reliable)
2765                 callchain_store(entry, addr);
2766 }
2767
2768 static const struct stacktrace_ops backtrace_ops = {
2769         .warning                = backtrace_warning,
2770         .warning_symbol         = backtrace_warning_symbol,
2771         .stack                  = backtrace_stack,
2772         .address                = backtrace_address,
2773         .walk_stack             = print_context_stack_bp,
2774 };
2775
2776 #include "../dumpstack.h"
2777
2778 static void
2779 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2780 {
2781         callchain_store(entry, PERF_CONTEXT_KERNEL);
2782         callchain_store(entry, regs->ip);
2783
2784         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2785 }
2786
2787 /*
2788  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2789  */
2790 static unsigned long
2791 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2792 {
2793         unsigned long offset, addr = (unsigned long)from;
2794         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2795         unsigned long size, len = 0;
2796         struct page *page;
2797         void *map;
2798         int ret;
2799
2800         do {
2801                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2802                 if (!ret)
2803                         break;
2804
2805                 offset = addr & (PAGE_SIZE - 1);
2806                 size = min(PAGE_SIZE - offset, n - len);
2807
2808                 map = kmap_atomic(page, type);
2809                 memcpy(to, map+offset, size);
2810                 kunmap_atomic(map, type);
2811                 put_page(page);
2812
2813                 len  += size;
2814                 to   += size;
2815                 addr += size;
2816
2817         } while (len < n);
2818
2819         return len;
2820 }
2821
2822 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2823 {
2824         unsigned long bytes;
2825
2826         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2827
2828         return bytes == sizeof(*frame);
2829 }
2830
2831 static void
2832 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2833 {
2834         struct stack_frame frame;
2835         const void __user *fp;
2836
2837         if (!user_mode(regs))
2838                 regs = task_pt_regs(current);
2839
2840         fp = (void __user *)regs->bp;
2841
2842         callchain_store(entry, PERF_CONTEXT_USER);
2843         callchain_store(entry, regs->ip);
2844
2845         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2846                 frame.next_frame             = NULL;
2847                 frame.return_address = 0;
2848
2849                 if (!copy_stack_frame(fp, &frame))
2850                         break;
2851
2852                 if ((unsigned long)fp < regs->sp)
2853                         break;
2854
2855                 callchain_store(entry, frame.return_address);
2856                 fp = frame.next_frame;
2857         }
2858 }
2859
2860 static void
2861 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2862 {
2863         int is_user;
2864
2865         if (!regs)
2866                 return;
2867
2868         is_user = user_mode(regs);
2869
2870         if (is_user && current->state != TASK_RUNNING)
2871                 return;
2872
2873         if (!is_user)
2874                 perf_callchain_kernel(regs, entry);
2875
2876         if (current->mm)
2877                 perf_callchain_user(regs, entry);
2878 }
2879
2880 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2881 {
2882         struct perf_callchain_entry *entry;
2883
2884         if (in_nmi())
2885                 entry = &__get_cpu_var(pmc_nmi_entry);
2886         else
2887                 entry = &__get_cpu_var(pmc_irq_entry);
2888
2889         entry->nr = 0;
2890
2891         perf_do_callchain(regs, entry);
2892
2893         return entry;
2894 }
2895
2896 void hw_perf_event_setup_online(int cpu)
2897 {
2898         init_debug_store_on_cpu(cpu);
2899 }