perf_event: x86: Optimize x86_pmu_disable()
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct cpu_hw_events {
84         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
85         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
86         unsigned long           interrupts;
87         int                     enabled;
88         struct debug_store      *ds;
89
90         int                     n_events;
91         int                     n_added;
92         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94 };
95
96 #define EVENT_CONSTRAINT(c, n, m) {     \
97         { .idxmsk64[0] = (n) },         \
98         .code = (c),                    \
99         .cmask = (m),                   \
100         .weight = HWEIGHT64((u64)(n)),  \
101 }
102
103 #define INTEL_EVENT_CONSTRAINT(c, n)    \
104         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
105
106 #define FIXED_EVENT_CONSTRAINT(c, n)    \
107         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
108
109 #define EVENT_CONSTRAINT_END \
110         EVENT_CONSTRAINT(0, 0, 0)
111
112 #define for_each_event_constraint(e, c) \
113         for ((e) = (c); (e)->cmask; (e)++)
114
115 /*
116  * struct x86_pmu - generic x86 pmu
117  */
118 struct x86_pmu {
119         const char      *name;
120         int             version;
121         int             (*handle_irq)(struct pt_regs *);
122         void            (*disable_all)(void);
123         void            (*enable_all)(void);
124         void            (*enable)(struct hw_perf_event *, int);
125         void            (*disable)(struct hw_perf_event *, int);
126         unsigned        eventsel;
127         unsigned        perfctr;
128         u64             (*event_map)(int);
129         u64             (*raw_event)(u64);
130         int             max_events;
131         int             num_events;
132         int             num_events_fixed;
133         int             event_bits;
134         u64             event_mask;
135         int             apic;
136         u64             max_period;
137         u64             intel_ctrl;
138         void            (*enable_bts)(u64 config);
139         void            (*disable_bts)(void);
140
141         struct event_constraint *
142                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
143                                                  struct perf_event *event);
144
145         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
146                                                  struct perf_event *event);
147         struct event_constraint *event_constraints;
148 };
149
150 static struct x86_pmu x86_pmu __read_mostly;
151
152 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
153         .enabled = 1,
154 };
155
156 static int x86_perf_event_set_period(struct perf_event *event,
157                              struct hw_perf_event *hwc, int idx);
158
159 /*
160  * Not sure about some of these
161  */
162 static const u64 p6_perfmon_event_map[] =
163 {
164   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
165   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
166   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
167   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
168   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
169   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
170   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
171 };
172
173 static u64 p6_pmu_event_map(int hw_event)
174 {
175         return p6_perfmon_event_map[hw_event];
176 }
177
178 /*
179  * Event setting that is specified not to count anything.
180  * We use this to effectively disable a counter.
181  *
182  * L2_RQSTS with 0 MESI unit mask.
183  */
184 #define P6_NOP_EVENT                    0x0000002EULL
185
186 static u64 p6_pmu_raw_event(u64 hw_event)
187 {
188 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
189 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
190 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
191 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
192 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
193
194 #define P6_EVNTSEL_MASK                 \
195         (P6_EVNTSEL_EVENT_MASK |        \
196          P6_EVNTSEL_UNIT_MASK  |        \
197          P6_EVNTSEL_EDGE_MASK  |        \
198          P6_EVNTSEL_INV_MASK   |        \
199          P6_EVNTSEL_REG_MASK)
200
201         return hw_event & P6_EVNTSEL_MASK;
202 }
203
204 static struct event_constraint intel_p6_event_constraints[] =
205 {
206         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
207         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
208         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
209         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
210         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
211         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
212         EVENT_CONSTRAINT_END
213 };
214
215 /*
216  * Intel PerfMon v3. Used on Core2 and later.
217  */
218 static const u64 intel_perfmon_event_map[] =
219 {
220   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
221   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
222   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
223   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
224   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
225   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
226   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
227 };
228
229 static struct event_constraint intel_core_event_constraints[] =
230 {
231         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
232         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
233         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
234         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
235         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
236         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
237         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
238         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
239         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
240         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
241         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
242         EVENT_CONSTRAINT_END
243 };
244
245 static struct event_constraint intel_nehalem_event_constraints[] =
246 {
247         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
248         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
249         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
250         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
251         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
252         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
253         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
254         INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
255         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
256         INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
257         INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
258         INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
259         EVENT_CONSTRAINT_END
260 };
261
262 static struct event_constraint intel_gen_event_constraints[] =
263 {
264         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
265         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
266         EVENT_CONSTRAINT_END
267 };
268
269 static u64 intel_pmu_event_map(int hw_event)
270 {
271         return intel_perfmon_event_map[hw_event];
272 }
273
274 /*
275  * Generalized hw caching related hw_event table, filled
276  * in on a per model basis. A value of 0 means
277  * 'not supported', -1 means 'hw_event makes no sense on
278  * this CPU', any other value means the raw hw_event
279  * ID.
280  */
281
282 #define C(x) PERF_COUNT_HW_CACHE_##x
283
284 static u64 __read_mostly hw_cache_event_ids
285                                 [PERF_COUNT_HW_CACHE_MAX]
286                                 [PERF_COUNT_HW_CACHE_OP_MAX]
287                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
288
289 static __initconst u64 nehalem_hw_cache_event_ids
290                                 [PERF_COUNT_HW_CACHE_MAX]
291                                 [PERF_COUNT_HW_CACHE_OP_MAX]
292                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
293 {
294  [ C(L1D) ] = {
295         [ C(OP_READ) ] = {
296                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
297                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
298         },
299         [ C(OP_WRITE) ] = {
300                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
301                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
302         },
303         [ C(OP_PREFETCH) ] = {
304                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
305                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
306         },
307  },
308  [ C(L1I ) ] = {
309         [ C(OP_READ) ] = {
310                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
311                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
312         },
313         [ C(OP_WRITE) ] = {
314                 [ C(RESULT_ACCESS) ] = -1,
315                 [ C(RESULT_MISS)   ] = -1,
316         },
317         [ C(OP_PREFETCH) ] = {
318                 [ C(RESULT_ACCESS) ] = 0x0,
319                 [ C(RESULT_MISS)   ] = 0x0,
320         },
321  },
322  [ C(LL  ) ] = {
323         [ C(OP_READ) ] = {
324                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
325                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
326         },
327         [ C(OP_WRITE) ] = {
328                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
329                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
330         },
331         [ C(OP_PREFETCH) ] = {
332                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
333                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
334         },
335  },
336  [ C(DTLB) ] = {
337         [ C(OP_READ) ] = {
338                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
339                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
340         },
341         [ C(OP_WRITE) ] = {
342                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
343                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
344         },
345         [ C(OP_PREFETCH) ] = {
346                 [ C(RESULT_ACCESS) ] = 0x0,
347                 [ C(RESULT_MISS)   ] = 0x0,
348         },
349  },
350  [ C(ITLB) ] = {
351         [ C(OP_READ) ] = {
352                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
353                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
354         },
355         [ C(OP_WRITE) ] = {
356                 [ C(RESULT_ACCESS) ] = -1,
357                 [ C(RESULT_MISS)   ] = -1,
358         },
359         [ C(OP_PREFETCH) ] = {
360                 [ C(RESULT_ACCESS) ] = -1,
361                 [ C(RESULT_MISS)   ] = -1,
362         },
363  },
364  [ C(BPU ) ] = {
365         [ C(OP_READ) ] = {
366                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
367                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
368         },
369         [ C(OP_WRITE) ] = {
370                 [ C(RESULT_ACCESS) ] = -1,
371                 [ C(RESULT_MISS)   ] = -1,
372         },
373         [ C(OP_PREFETCH) ] = {
374                 [ C(RESULT_ACCESS) ] = -1,
375                 [ C(RESULT_MISS)   ] = -1,
376         },
377  },
378 };
379
380 static __initconst u64 core2_hw_cache_event_ids
381                                 [PERF_COUNT_HW_CACHE_MAX]
382                                 [PERF_COUNT_HW_CACHE_OP_MAX]
383                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
384 {
385  [ C(L1D) ] = {
386         [ C(OP_READ) ] = {
387                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
388                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
389         },
390         [ C(OP_WRITE) ] = {
391                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
392                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
393         },
394         [ C(OP_PREFETCH) ] = {
395                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
396                 [ C(RESULT_MISS)   ] = 0,
397         },
398  },
399  [ C(L1I ) ] = {
400         [ C(OP_READ) ] = {
401                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
402                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
403         },
404         [ C(OP_WRITE) ] = {
405                 [ C(RESULT_ACCESS) ] = -1,
406                 [ C(RESULT_MISS)   ] = -1,
407         },
408         [ C(OP_PREFETCH) ] = {
409                 [ C(RESULT_ACCESS) ] = 0,
410                 [ C(RESULT_MISS)   ] = 0,
411         },
412  },
413  [ C(LL  ) ] = {
414         [ C(OP_READ) ] = {
415                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
416                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
417         },
418         [ C(OP_WRITE) ] = {
419                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
420                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
421         },
422         [ C(OP_PREFETCH) ] = {
423                 [ C(RESULT_ACCESS) ] = 0,
424                 [ C(RESULT_MISS)   ] = 0,
425         },
426  },
427  [ C(DTLB) ] = {
428         [ C(OP_READ) ] = {
429                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
430                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
431         },
432         [ C(OP_WRITE) ] = {
433                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
434                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
435         },
436         [ C(OP_PREFETCH) ] = {
437                 [ C(RESULT_ACCESS) ] = 0,
438                 [ C(RESULT_MISS)   ] = 0,
439         },
440  },
441  [ C(ITLB) ] = {
442         [ C(OP_READ) ] = {
443                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
444                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
445         },
446         [ C(OP_WRITE) ] = {
447                 [ C(RESULT_ACCESS) ] = -1,
448                 [ C(RESULT_MISS)   ] = -1,
449         },
450         [ C(OP_PREFETCH) ] = {
451                 [ C(RESULT_ACCESS) ] = -1,
452                 [ C(RESULT_MISS)   ] = -1,
453         },
454  },
455  [ C(BPU ) ] = {
456         [ C(OP_READ) ] = {
457                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
458                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
459         },
460         [ C(OP_WRITE) ] = {
461                 [ C(RESULT_ACCESS) ] = -1,
462                 [ C(RESULT_MISS)   ] = -1,
463         },
464         [ C(OP_PREFETCH) ] = {
465                 [ C(RESULT_ACCESS) ] = -1,
466                 [ C(RESULT_MISS)   ] = -1,
467         },
468  },
469 };
470
471 static __initconst u64 atom_hw_cache_event_ids
472                                 [PERF_COUNT_HW_CACHE_MAX]
473                                 [PERF_COUNT_HW_CACHE_OP_MAX]
474                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
475 {
476  [ C(L1D) ] = {
477         [ C(OP_READ) ] = {
478                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
479                 [ C(RESULT_MISS)   ] = 0,
480         },
481         [ C(OP_WRITE) ] = {
482                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
483                 [ C(RESULT_MISS)   ] = 0,
484         },
485         [ C(OP_PREFETCH) ] = {
486                 [ C(RESULT_ACCESS) ] = 0x0,
487                 [ C(RESULT_MISS)   ] = 0,
488         },
489  },
490  [ C(L1I ) ] = {
491         [ C(OP_READ) ] = {
492                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
493                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
494         },
495         [ C(OP_WRITE) ] = {
496                 [ C(RESULT_ACCESS) ] = -1,
497                 [ C(RESULT_MISS)   ] = -1,
498         },
499         [ C(OP_PREFETCH) ] = {
500                 [ C(RESULT_ACCESS) ] = 0,
501                 [ C(RESULT_MISS)   ] = 0,
502         },
503  },
504  [ C(LL  ) ] = {
505         [ C(OP_READ) ] = {
506                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
507                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
508         },
509         [ C(OP_WRITE) ] = {
510                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
511                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
512         },
513         [ C(OP_PREFETCH) ] = {
514                 [ C(RESULT_ACCESS) ] = 0,
515                 [ C(RESULT_MISS)   ] = 0,
516         },
517  },
518  [ C(DTLB) ] = {
519         [ C(OP_READ) ] = {
520                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
521                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
522         },
523         [ C(OP_WRITE) ] = {
524                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
525                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
526         },
527         [ C(OP_PREFETCH) ] = {
528                 [ C(RESULT_ACCESS) ] = 0,
529                 [ C(RESULT_MISS)   ] = 0,
530         },
531  },
532  [ C(ITLB) ] = {
533         [ C(OP_READ) ] = {
534                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
535                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
536         },
537         [ C(OP_WRITE) ] = {
538                 [ C(RESULT_ACCESS) ] = -1,
539                 [ C(RESULT_MISS)   ] = -1,
540         },
541         [ C(OP_PREFETCH) ] = {
542                 [ C(RESULT_ACCESS) ] = -1,
543                 [ C(RESULT_MISS)   ] = -1,
544         },
545  },
546  [ C(BPU ) ] = {
547         [ C(OP_READ) ] = {
548                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
549                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
550         },
551         [ C(OP_WRITE) ] = {
552                 [ C(RESULT_ACCESS) ] = -1,
553                 [ C(RESULT_MISS)   ] = -1,
554         },
555         [ C(OP_PREFETCH) ] = {
556                 [ C(RESULT_ACCESS) ] = -1,
557                 [ C(RESULT_MISS)   ] = -1,
558         },
559  },
560 };
561
562 static u64 intel_pmu_raw_event(u64 hw_event)
563 {
564 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
565 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
566 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
567 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
568 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
569
570 #define CORE_EVNTSEL_MASK               \
571         (INTEL_ARCH_EVTSEL_MASK |       \
572          INTEL_ARCH_UNIT_MASK   |       \
573          INTEL_ARCH_EDGE_MASK   |       \
574          INTEL_ARCH_INV_MASK    |       \
575          INTEL_ARCH_CNT_MASK)
576
577         return hw_event & CORE_EVNTSEL_MASK;
578 }
579
580 static __initconst u64 amd_hw_cache_event_ids
581                                 [PERF_COUNT_HW_CACHE_MAX]
582                                 [PERF_COUNT_HW_CACHE_OP_MAX]
583                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
584 {
585  [ C(L1D) ] = {
586         [ C(OP_READ) ] = {
587                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
588                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
589         },
590         [ C(OP_WRITE) ] = {
591                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
592                 [ C(RESULT_MISS)   ] = 0,
593         },
594         [ C(OP_PREFETCH) ] = {
595                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
596                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
597         },
598  },
599  [ C(L1I ) ] = {
600         [ C(OP_READ) ] = {
601                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
602                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
603         },
604         [ C(OP_WRITE) ] = {
605                 [ C(RESULT_ACCESS) ] = -1,
606                 [ C(RESULT_MISS)   ] = -1,
607         },
608         [ C(OP_PREFETCH) ] = {
609                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
610                 [ C(RESULT_MISS)   ] = 0,
611         },
612  },
613  [ C(LL  ) ] = {
614         [ C(OP_READ) ] = {
615                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
616                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
617         },
618         [ C(OP_WRITE) ] = {
619                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
620                 [ C(RESULT_MISS)   ] = 0,
621         },
622         [ C(OP_PREFETCH) ] = {
623                 [ C(RESULT_ACCESS) ] = 0,
624                 [ C(RESULT_MISS)   ] = 0,
625         },
626  },
627  [ C(DTLB) ] = {
628         [ C(OP_READ) ] = {
629                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
630                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
631         },
632         [ C(OP_WRITE) ] = {
633                 [ C(RESULT_ACCESS) ] = 0,
634                 [ C(RESULT_MISS)   ] = 0,
635         },
636         [ C(OP_PREFETCH) ] = {
637                 [ C(RESULT_ACCESS) ] = 0,
638                 [ C(RESULT_MISS)   ] = 0,
639         },
640  },
641  [ C(ITLB) ] = {
642         [ C(OP_READ) ] = {
643                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
644                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
645         },
646         [ C(OP_WRITE) ] = {
647                 [ C(RESULT_ACCESS) ] = -1,
648                 [ C(RESULT_MISS)   ] = -1,
649         },
650         [ C(OP_PREFETCH) ] = {
651                 [ C(RESULT_ACCESS) ] = -1,
652                 [ C(RESULT_MISS)   ] = -1,
653         },
654  },
655  [ C(BPU ) ] = {
656         [ C(OP_READ) ] = {
657                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
658                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
659         },
660         [ C(OP_WRITE) ] = {
661                 [ C(RESULT_ACCESS) ] = -1,
662                 [ C(RESULT_MISS)   ] = -1,
663         },
664         [ C(OP_PREFETCH) ] = {
665                 [ C(RESULT_ACCESS) ] = -1,
666                 [ C(RESULT_MISS)   ] = -1,
667         },
668  },
669 };
670
671 /*
672  * AMD Performance Monitor K7 and later.
673  */
674 static const u64 amd_perfmon_event_map[] =
675 {
676   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
677   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
678   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
679   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
680   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
681   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
682 };
683
684 static u64 amd_pmu_event_map(int hw_event)
685 {
686         return amd_perfmon_event_map[hw_event];
687 }
688
689 static u64 amd_pmu_raw_event(u64 hw_event)
690 {
691 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
692 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
693 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
694 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
695 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
696
697 #define K7_EVNTSEL_MASK                 \
698         (K7_EVNTSEL_EVENT_MASK |        \
699          K7_EVNTSEL_UNIT_MASK  |        \
700          K7_EVNTSEL_EDGE_MASK  |        \
701          K7_EVNTSEL_INV_MASK   |        \
702          K7_EVNTSEL_REG_MASK)
703
704         return hw_event & K7_EVNTSEL_MASK;
705 }
706
707 /*
708  * Propagate event elapsed time into the generic event.
709  * Can only be executed on the CPU where the event is active.
710  * Returns the delta events processed.
711  */
712 static u64
713 x86_perf_event_update(struct perf_event *event,
714                         struct hw_perf_event *hwc, int idx)
715 {
716         int shift = 64 - x86_pmu.event_bits;
717         u64 prev_raw_count, new_raw_count;
718         s64 delta;
719
720         if (idx == X86_PMC_IDX_FIXED_BTS)
721                 return 0;
722
723         /*
724          * Careful: an NMI might modify the previous event value.
725          *
726          * Our tactic to handle this is to first atomically read and
727          * exchange a new raw count - then add that new-prev delta
728          * count to the generic event atomically:
729          */
730 again:
731         prev_raw_count = atomic64_read(&hwc->prev_count);
732         rdmsrl(hwc->event_base + idx, new_raw_count);
733
734         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
735                                         new_raw_count) != prev_raw_count)
736                 goto again;
737
738         /*
739          * Now we have the new raw value and have updated the prev
740          * timestamp already. We can now calculate the elapsed delta
741          * (event-)time and add that to the generic event.
742          *
743          * Careful, not all hw sign-extends above the physical width
744          * of the count.
745          */
746         delta = (new_raw_count << shift) - (prev_raw_count << shift);
747         delta >>= shift;
748
749         atomic64_add(delta, &event->count);
750         atomic64_sub(delta, &hwc->period_left);
751
752         return new_raw_count;
753 }
754
755 static atomic_t active_events;
756 static DEFINE_MUTEX(pmc_reserve_mutex);
757
758 static bool reserve_pmc_hardware(void)
759 {
760 #ifdef CONFIG_X86_LOCAL_APIC
761         int i;
762
763         if (nmi_watchdog == NMI_LOCAL_APIC)
764                 disable_lapic_nmi_watchdog();
765
766         for (i = 0; i < x86_pmu.num_events; i++) {
767                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
768                         goto perfctr_fail;
769         }
770
771         for (i = 0; i < x86_pmu.num_events; i++) {
772                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
773                         goto eventsel_fail;
774         }
775 #endif
776
777         return true;
778
779 #ifdef CONFIG_X86_LOCAL_APIC
780 eventsel_fail:
781         for (i--; i >= 0; i--)
782                 release_evntsel_nmi(x86_pmu.eventsel + i);
783
784         i = x86_pmu.num_events;
785
786 perfctr_fail:
787         for (i--; i >= 0; i--)
788                 release_perfctr_nmi(x86_pmu.perfctr + i);
789
790         if (nmi_watchdog == NMI_LOCAL_APIC)
791                 enable_lapic_nmi_watchdog();
792
793         return false;
794 #endif
795 }
796
797 static void release_pmc_hardware(void)
798 {
799 #ifdef CONFIG_X86_LOCAL_APIC
800         int i;
801
802         for (i = 0; i < x86_pmu.num_events; i++) {
803                 release_perfctr_nmi(x86_pmu.perfctr + i);
804                 release_evntsel_nmi(x86_pmu.eventsel + i);
805         }
806
807         if (nmi_watchdog == NMI_LOCAL_APIC)
808                 enable_lapic_nmi_watchdog();
809 #endif
810 }
811
812 static inline bool bts_available(void)
813 {
814         return x86_pmu.enable_bts != NULL;
815 }
816
817 static inline void init_debug_store_on_cpu(int cpu)
818 {
819         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
820
821         if (!ds)
822                 return;
823
824         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
825                      (u32)((u64)(unsigned long)ds),
826                      (u32)((u64)(unsigned long)ds >> 32));
827 }
828
829 static inline void fini_debug_store_on_cpu(int cpu)
830 {
831         if (!per_cpu(cpu_hw_events, cpu).ds)
832                 return;
833
834         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
835 }
836
837 static void release_bts_hardware(void)
838 {
839         int cpu;
840
841         if (!bts_available())
842                 return;
843
844         get_online_cpus();
845
846         for_each_online_cpu(cpu)
847                 fini_debug_store_on_cpu(cpu);
848
849         for_each_possible_cpu(cpu) {
850                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
851
852                 if (!ds)
853                         continue;
854
855                 per_cpu(cpu_hw_events, cpu).ds = NULL;
856
857                 kfree((void *)(unsigned long)ds->bts_buffer_base);
858                 kfree(ds);
859         }
860
861         put_online_cpus();
862 }
863
864 static int reserve_bts_hardware(void)
865 {
866         int cpu, err = 0;
867
868         if (!bts_available())
869                 return 0;
870
871         get_online_cpus();
872
873         for_each_possible_cpu(cpu) {
874                 struct debug_store *ds;
875                 void *buffer;
876
877                 err = -ENOMEM;
878                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
879                 if (unlikely(!buffer))
880                         break;
881
882                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
883                 if (unlikely(!ds)) {
884                         kfree(buffer);
885                         break;
886                 }
887
888                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
889                 ds->bts_index = ds->bts_buffer_base;
890                 ds->bts_absolute_maximum =
891                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
892                 ds->bts_interrupt_threshold =
893                         ds->bts_absolute_maximum - BTS_OVFL_TH;
894
895                 per_cpu(cpu_hw_events, cpu).ds = ds;
896                 err = 0;
897         }
898
899         if (err)
900                 release_bts_hardware();
901         else {
902                 for_each_online_cpu(cpu)
903                         init_debug_store_on_cpu(cpu);
904         }
905
906         put_online_cpus();
907
908         return err;
909 }
910
911 static void hw_perf_event_destroy(struct perf_event *event)
912 {
913         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
914                 release_pmc_hardware();
915                 release_bts_hardware();
916                 mutex_unlock(&pmc_reserve_mutex);
917         }
918 }
919
920 static inline int x86_pmu_initialized(void)
921 {
922         return x86_pmu.handle_irq != NULL;
923 }
924
925 static inline int
926 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
927 {
928         unsigned int cache_type, cache_op, cache_result;
929         u64 config, val;
930
931         config = attr->config;
932
933         cache_type = (config >>  0) & 0xff;
934         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
935                 return -EINVAL;
936
937         cache_op = (config >>  8) & 0xff;
938         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
939                 return -EINVAL;
940
941         cache_result = (config >> 16) & 0xff;
942         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
943                 return -EINVAL;
944
945         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
946
947         if (val == 0)
948                 return -ENOENT;
949
950         if (val == -1)
951                 return -EINVAL;
952
953         hwc->config |= val;
954
955         return 0;
956 }
957
958 static void intel_pmu_enable_bts(u64 config)
959 {
960         unsigned long debugctlmsr;
961
962         debugctlmsr = get_debugctlmsr();
963
964         debugctlmsr |= X86_DEBUGCTL_TR;
965         debugctlmsr |= X86_DEBUGCTL_BTS;
966         debugctlmsr |= X86_DEBUGCTL_BTINT;
967
968         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
969                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
970
971         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
972                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
973
974         update_debugctlmsr(debugctlmsr);
975 }
976
977 static void intel_pmu_disable_bts(void)
978 {
979         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
980         unsigned long debugctlmsr;
981
982         if (!cpuc->ds)
983                 return;
984
985         debugctlmsr = get_debugctlmsr();
986
987         debugctlmsr &=
988                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
989                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
990
991         update_debugctlmsr(debugctlmsr);
992 }
993
994 /*
995  * Setup the hardware configuration for a given attr_type
996  */
997 static int __hw_perf_event_init(struct perf_event *event)
998 {
999         struct perf_event_attr *attr = &event->attr;
1000         struct hw_perf_event *hwc = &event->hw;
1001         u64 config;
1002         int err;
1003
1004         if (!x86_pmu_initialized())
1005                 return -ENODEV;
1006
1007         err = 0;
1008         if (!atomic_inc_not_zero(&active_events)) {
1009                 mutex_lock(&pmc_reserve_mutex);
1010                 if (atomic_read(&active_events) == 0) {
1011                         if (!reserve_pmc_hardware())
1012                                 err = -EBUSY;
1013                         else
1014                                 err = reserve_bts_hardware();
1015                 }
1016                 if (!err)
1017                         atomic_inc(&active_events);
1018                 mutex_unlock(&pmc_reserve_mutex);
1019         }
1020         if (err)
1021                 return err;
1022
1023         event->destroy = hw_perf_event_destroy;
1024
1025         /*
1026          * Generate PMC IRQs:
1027          * (keep 'enabled' bit clear for now)
1028          */
1029         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1030
1031         hwc->idx = -1;
1032
1033         /*
1034          * Count user and OS events unless requested not to.
1035          */
1036         if (!attr->exclude_user)
1037                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1038         if (!attr->exclude_kernel)
1039                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1040
1041         if (!hwc->sample_period) {
1042                 hwc->sample_period = x86_pmu.max_period;
1043                 hwc->last_period = hwc->sample_period;
1044                 atomic64_set(&hwc->period_left, hwc->sample_period);
1045         } else {
1046                 /*
1047                  * If we have a PMU initialized but no APIC
1048                  * interrupts, we cannot sample hardware
1049                  * events (user-space has to fall back and
1050                  * sample via a hrtimer based software event):
1051                  */
1052                 if (!x86_pmu.apic)
1053                         return -EOPNOTSUPP;
1054         }
1055
1056         /*
1057          * Raw hw_event type provide the config in the hw_event structure
1058          */
1059         if (attr->type == PERF_TYPE_RAW) {
1060                 hwc->config |= x86_pmu.raw_event(attr->config);
1061                 return 0;
1062         }
1063
1064         if (attr->type == PERF_TYPE_HW_CACHE)
1065                 return set_ext_hw_attr(hwc, attr);
1066
1067         if (attr->config >= x86_pmu.max_events)
1068                 return -EINVAL;
1069
1070         /*
1071          * The generic map:
1072          */
1073         config = x86_pmu.event_map(attr->config);
1074
1075         if (config == 0)
1076                 return -ENOENT;
1077
1078         if (config == -1LL)
1079                 return -EINVAL;
1080
1081         /*
1082          * Branch tracing:
1083          */
1084         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1085             (hwc->sample_period == 1)) {
1086                 /* BTS is not supported by this architecture. */
1087                 if (!bts_available())
1088                         return -EOPNOTSUPP;
1089
1090                 /* BTS is currently only allowed for user-mode. */
1091                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1092                         return -EOPNOTSUPP;
1093         }
1094
1095         hwc->config |= config;
1096
1097         return 0;
1098 }
1099
1100 static void p6_pmu_disable_all(void)
1101 {
1102         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1103         u64 val;
1104
1105         if (!cpuc->enabled)
1106                 return;
1107
1108         cpuc->enabled = 0;
1109         barrier();
1110
1111         /* p6 only has one enable register */
1112         rdmsrl(MSR_P6_EVNTSEL0, val);
1113         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1114         wrmsrl(MSR_P6_EVNTSEL0, val);
1115 }
1116
1117 static void intel_pmu_disable_all(void)
1118 {
1119         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1120
1121         if (!cpuc->enabled)
1122                 return;
1123
1124         cpuc->enabled = 0;
1125         barrier();
1126
1127         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1128
1129         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1130                 intel_pmu_disable_bts();
1131 }
1132
1133 static void amd_pmu_disable_all(void)
1134 {
1135         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1136         int idx;
1137
1138         if (!cpuc->enabled)
1139                 return;
1140
1141         cpuc->enabled = 0;
1142         /*
1143          * ensure we write the disable before we start disabling the
1144          * events proper, so that amd_pmu_enable_event() does the
1145          * right thing.
1146          */
1147         barrier();
1148
1149         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1150                 u64 val;
1151
1152                 if (!test_bit(idx, cpuc->active_mask))
1153                         continue;
1154                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1155                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1156                         continue;
1157                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1158                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1159         }
1160 }
1161
1162 void hw_perf_disable(void)
1163 {
1164         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1165
1166         if (!x86_pmu_initialized())
1167                 return;
1168
1169         if (cpuc->enabled)
1170                 cpuc->n_added = 0;
1171
1172         x86_pmu.disable_all();
1173 }
1174
1175 static void p6_pmu_enable_all(void)
1176 {
1177         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1178         unsigned long val;
1179
1180         if (cpuc->enabled)
1181                 return;
1182
1183         cpuc->enabled = 1;
1184         barrier();
1185
1186         /* p6 only has one enable register */
1187         rdmsrl(MSR_P6_EVNTSEL0, val);
1188         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1189         wrmsrl(MSR_P6_EVNTSEL0, val);
1190 }
1191
1192 static void intel_pmu_enable_all(void)
1193 {
1194         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1195
1196         if (cpuc->enabled)
1197                 return;
1198
1199         cpuc->enabled = 1;
1200         barrier();
1201
1202         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1203
1204         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1205                 struct perf_event *event =
1206                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1207
1208                 if (WARN_ON_ONCE(!event))
1209                         return;
1210
1211                 intel_pmu_enable_bts(event->hw.config);
1212         }
1213 }
1214
1215 static void amd_pmu_enable_all(void)
1216 {
1217         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1218         int idx;
1219
1220         if (cpuc->enabled)
1221                 return;
1222
1223         cpuc->enabled = 1;
1224         barrier();
1225
1226         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1227                 struct perf_event *event = cpuc->events[idx];
1228                 u64 val;
1229
1230                 if (!test_bit(idx, cpuc->active_mask))
1231                         continue;
1232
1233                 val = event->hw.config;
1234                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1235                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1236         }
1237 }
1238
1239 static const struct pmu pmu;
1240
1241 static inline int is_x86_event(struct perf_event *event)
1242 {
1243         return event->pmu == &pmu;
1244 }
1245
1246 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1247 {
1248         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1249         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1250         int i, j, w, wmax, num = 0;
1251         struct hw_perf_event *hwc;
1252
1253         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1254
1255         for (i = 0; i < n; i++) {
1256                 constraints[i] =
1257                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1258         }
1259
1260         /*
1261          * fastpath, try to reuse previous register
1262          */
1263         for (i = 0; i < n; i++) {
1264                 hwc = &cpuc->event_list[i]->hw;
1265                 c = constraints[i];
1266
1267                 /* never assigned */
1268                 if (hwc->idx == -1)
1269                         break;
1270
1271                 /* constraint still honored */
1272                 if (!test_bit(hwc->idx, c->idxmsk))
1273                         break;
1274
1275                 /* not already used */
1276                 if (test_bit(hwc->idx, used_mask))
1277                         break;
1278
1279 #if 0
1280                 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1281                          smp_processor_id(),
1282                          hwc->config,
1283                          hwc->idx,
1284                          assign ? 'y' : 'n');
1285 #endif
1286
1287                 set_bit(hwc->idx, used_mask);
1288                 if (assign)
1289                         assign[i] = hwc->idx;
1290         }
1291         if (i == n)
1292                 goto done;
1293
1294         /*
1295          * begin slow path
1296          */
1297
1298         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1299
1300         /*
1301          * weight = number of possible counters
1302          *
1303          * 1    = most constrained, only works on one counter
1304          * wmax = least constrained, works on any counter
1305          *
1306          * assign events to counters starting with most
1307          * constrained events.
1308          */
1309         wmax = x86_pmu.num_events;
1310
1311         /*
1312          * when fixed event counters are present,
1313          * wmax is incremented by 1 to account
1314          * for one more choice
1315          */
1316         if (x86_pmu.num_events_fixed)
1317                 wmax++;
1318
1319         for (w = 1, num = n; num && w <= wmax; w++) {
1320                 /* for each event */
1321                 for (i = 0; num && i < n; i++) {
1322                         c = constraints[i];
1323                         hwc = &cpuc->event_list[i]->hw;
1324
1325                         if (c->weight != w)
1326                                 continue;
1327
1328                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1329                                 if (!test_bit(j, used_mask))
1330                                         break;
1331                         }
1332
1333                         if (j == X86_PMC_IDX_MAX)
1334                                 break;
1335
1336 #if 0
1337                         pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
1338                                 smp_processor_id(),
1339                                 hwc->config,
1340                                 j,
1341                                 assign ? 'y' : 'n');
1342 #endif
1343
1344                         set_bit(j, used_mask);
1345
1346                         if (assign)
1347                                 assign[i] = j;
1348                         num--;
1349                 }
1350         }
1351 done:
1352         /*
1353          * scheduling failed or is just a simulation,
1354          * free resources if necessary
1355          */
1356         if (!assign || num) {
1357                 for (i = 0; i < n; i++) {
1358                         if (x86_pmu.put_event_constraints)
1359                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1360                 }
1361         }
1362         return num ? -ENOSPC : 0;
1363 }
1364
1365 /*
1366  * dogrp: true if must collect siblings events (group)
1367  * returns total number of events and error code
1368  */
1369 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1370 {
1371         struct perf_event *event;
1372         int n, max_count;
1373
1374         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1375
1376         /* current number of events already accepted */
1377         n = cpuc->n_events;
1378
1379         if (is_x86_event(leader)) {
1380                 if (n >= max_count)
1381                         return -ENOSPC;
1382                 cpuc->event_list[n] = leader;
1383                 n++;
1384         }
1385         if (!dogrp)
1386                 return n;
1387
1388         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1389                 if (!is_x86_event(event) ||
1390                     event->state <= PERF_EVENT_STATE_OFF)
1391                         continue;
1392
1393                 if (n >= max_count)
1394                         return -ENOSPC;
1395
1396                 cpuc->event_list[n] = event;
1397                 n++;
1398         }
1399         return n;
1400 }
1401
1402
1403 static inline void x86_assign_hw_event(struct perf_event *event,
1404                                 struct hw_perf_event *hwc, int idx)
1405 {
1406         hwc->idx = idx;
1407
1408         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1409                 hwc->config_base = 0;
1410                 hwc->event_base = 0;
1411         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1412                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1413                 /*
1414                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1415                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1416                  */
1417                 hwc->event_base =
1418                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1419         } else {
1420                 hwc->config_base = x86_pmu.eventsel;
1421                 hwc->event_base  = x86_pmu.perfctr;
1422         }
1423 }
1424
1425 void hw_perf_enable(void)
1426 {
1427         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1428         struct perf_event *event;
1429         struct hw_perf_event *hwc;
1430         int i;
1431
1432         if (!x86_pmu_initialized())
1433                 return;
1434         if (cpuc->n_added) {
1435                 /*
1436                  * apply assignment obtained either from
1437                  * hw_perf_group_sched_in() or x86_pmu_enable()
1438                  *
1439                  * step1: save events moving to new counters
1440                  * step2: reprogram moved events into new counters
1441                  */
1442                 for (i = 0; i < cpuc->n_events; i++) {
1443
1444                         event = cpuc->event_list[i];
1445                         hwc = &event->hw;
1446
1447                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1448                                 continue;
1449
1450                         x86_pmu.disable(hwc, hwc->idx);
1451
1452                         clear_bit(hwc->idx, cpuc->active_mask);
1453                         barrier();
1454                         cpuc->events[hwc->idx] = NULL;
1455
1456                         x86_perf_event_update(event, hwc, hwc->idx);
1457
1458                         hwc->idx = -1;
1459                 }
1460
1461                 for (i = 0; i < cpuc->n_events; i++) {
1462
1463                         event = cpuc->event_list[i];
1464                         hwc = &event->hw;
1465
1466                         if (hwc->idx == -1) {
1467                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1468                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1469                         }
1470                         /*
1471                          * need to mark as active because x86_pmu_disable()
1472                          * clear active_mask and eventsp[] yet it preserves
1473                          * idx
1474                          */
1475                         set_bit(hwc->idx, cpuc->active_mask);
1476                         cpuc->events[hwc->idx] = event;
1477
1478                         x86_pmu.enable(hwc, hwc->idx);
1479                         perf_event_update_userpage(event);
1480                 }
1481                 cpuc->n_added = 0;
1482                 perf_events_lapic_init();
1483         }
1484         x86_pmu.enable_all();
1485 }
1486
1487 static inline u64 intel_pmu_get_status(void)
1488 {
1489         u64 status;
1490
1491         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1492
1493         return status;
1494 }
1495
1496 static inline void intel_pmu_ack_status(u64 ack)
1497 {
1498         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1499 }
1500
1501 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1502 {
1503         (void)checking_wrmsrl(hwc->config_base + idx,
1504                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1505 }
1506
1507 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1508 {
1509         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1510 }
1511
1512 static inline void
1513 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1514 {
1515         int idx = __idx - X86_PMC_IDX_FIXED;
1516         u64 ctrl_val, mask;
1517
1518         mask = 0xfULL << (idx * 4);
1519
1520         rdmsrl(hwc->config_base, ctrl_val);
1521         ctrl_val &= ~mask;
1522         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1523 }
1524
1525 static inline void
1526 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1527 {
1528         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1529         u64 val = P6_NOP_EVENT;
1530
1531         if (cpuc->enabled)
1532                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1533
1534         (void)checking_wrmsrl(hwc->config_base + idx, val);
1535 }
1536
1537 static inline void
1538 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1539 {
1540         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1541                 intel_pmu_disable_bts();
1542                 return;
1543         }
1544
1545         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1546                 intel_pmu_disable_fixed(hwc, idx);
1547                 return;
1548         }
1549
1550         x86_pmu_disable_event(hwc, idx);
1551 }
1552
1553 static inline void
1554 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1555 {
1556         x86_pmu_disable_event(hwc, idx);
1557 }
1558
1559 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1560
1561 /*
1562  * Set the next IRQ period, based on the hwc->period_left value.
1563  * To be called with the event disabled in hw:
1564  */
1565 static int
1566 x86_perf_event_set_period(struct perf_event *event,
1567                              struct hw_perf_event *hwc, int idx)
1568 {
1569         s64 left = atomic64_read(&hwc->period_left);
1570         s64 period = hwc->sample_period;
1571         int err, ret = 0;
1572
1573         if (idx == X86_PMC_IDX_FIXED_BTS)
1574                 return 0;
1575
1576         /*
1577          * If we are way outside a reasonable range then just skip forward:
1578          */
1579         if (unlikely(left <= -period)) {
1580                 left = period;
1581                 atomic64_set(&hwc->period_left, left);
1582                 hwc->last_period = period;
1583                 ret = 1;
1584         }
1585
1586         if (unlikely(left <= 0)) {
1587                 left += period;
1588                 atomic64_set(&hwc->period_left, left);
1589                 hwc->last_period = period;
1590                 ret = 1;
1591         }
1592         /*
1593          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1594          */
1595         if (unlikely(left < 2))
1596                 left = 2;
1597
1598         if (left > x86_pmu.max_period)
1599                 left = x86_pmu.max_period;
1600
1601         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1602
1603         /*
1604          * The hw event starts counting from this event offset,
1605          * mark it to be able to extra future deltas:
1606          */
1607         atomic64_set(&hwc->prev_count, (u64)-left);
1608
1609         err = checking_wrmsrl(hwc->event_base + idx,
1610                              (u64)(-left) & x86_pmu.event_mask);
1611
1612         perf_event_update_userpage(event);
1613
1614         return ret;
1615 }
1616
1617 static inline void
1618 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1619 {
1620         int idx = __idx - X86_PMC_IDX_FIXED;
1621         u64 ctrl_val, bits, mask;
1622         int err;
1623
1624         /*
1625          * Enable IRQ generation (0x8),
1626          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1627          * if requested:
1628          */
1629         bits = 0x8ULL;
1630         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1631                 bits |= 0x2;
1632         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1633                 bits |= 0x1;
1634         bits <<= (idx * 4);
1635         mask = 0xfULL << (idx * 4);
1636
1637         rdmsrl(hwc->config_base, ctrl_val);
1638         ctrl_val &= ~mask;
1639         ctrl_val |= bits;
1640         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1641 }
1642
1643 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1644 {
1645         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1646         u64 val;
1647
1648         val = hwc->config;
1649         if (cpuc->enabled)
1650                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1651
1652         (void)checking_wrmsrl(hwc->config_base + idx, val);
1653 }
1654
1655
1656 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1657 {
1658         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1659                 if (!__get_cpu_var(cpu_hw_events).enabled)
1660                         return;
1661
1662                 intel_pmu_enable_bts(hwc->config);
1663                 return;
1664         }
1665
1666         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1667                 intel_pmu_enable_fixed(hwc, idx);
1668                 return;
1669         }
1670
1671         x86_pmu_enable_event(hwc, idx);
1672 }
1673
1674 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1675 {
1676         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1677
1678         if (cpuc->enabled)
1679                 x86_pmu_enable_event(hwc, idx);
1680 }
1681
1682 /*
1683  * activate a single event
1684  *
1685  * The event is added to the group of enabled events
1686  * but only if it can be scehduled with existing events.
1687  *
1688  * Called with PMU disabled. If successful and return value 1,
1689  * then guaranteed to call perf_enable() and hw_perf_enable()
1690  */
1691 static int x86_pmu_enable(struct perf_event *event)
1692 {
1693         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1694         struct hw_perf_event *hwc;
1695         int assign[X86_PMC_IDX_MAX];
1696         int n, n0, ret;
1697
1698         hwc = &event->hw;
1699
1700         n0 = cpuc->n_events;
1701         n = collect_events(cpuc, event, false);
1702         if (n < 0)
1703                 return n;
1704
1705         ret = x86_schedule_events(cpuc, n, assign);
1706         if (ret)
1707                 return ret;
1708         /*
1709          * copy new assignment, now we know it is possible
1710          * will be used by hw_perf_enable()
1711          */
1712         memcpy(cpuc->assign, assign, n*sizeof(int));
1713
1714         cpuc->n_events = n;
1715         cpuc->n_added  = n - n0;
1716
1717         if (hwc->idx != -1)
1718                 x86_perf_event_set_period(event, hwc, hwc->idx);
1719
1720         return 0;
1721 }
1722
1723 static void x86_pmu_unthrottle(struct perf_event *event)
1724 {
1725         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1726         struct hw_perf_event *hwc = &event->hw;
1727
1728         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1729                                 cpuc->events[hwc->idx] != event))
1730                 return;
1731
1732         x86_pmu.enable(hwc, hwc->idx);
1733 }
1734
1735 void perf_event_print_debug(void)
1736 {
1737         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1738         struct cpu_hw_events *cpuc;
1739         unsigned long flags;
1740         int cpu, idx;
1741
1742         if (!x86_pmu.num_events)
1743                 return;
1744
1745         local_irq_save(flags);
1746
1747         cpu = smp_processor_id();
1748         cpuc = &per_cpu(cpu_hw_events, cpu);
1749
1750         if (x86_pmu.version >= 2) {
1751                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1752                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1753                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1754                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1755
1756                 pr_info("\n");
1757                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1758                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1759                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1760                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1761         }
1762         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1763
1764         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1765                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1766                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1767
1768                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1769
1770                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1771                         cpu, idx, pmc_ctrl);
1772                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1773                         cpu, idx, pmc_count);
1774                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1775                         cpu, idx, prev_left);
1776         }
1777         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1778                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1779
1780                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1781                         cpu, idx, pmc_count);
1782         }
1783         local_irq_restore(flags);
1784 }
1785
1786 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1787 {
1788         struct debug_store *ds = cpuc->ds;
1789         struct bts_record {
1790                 u64     from;
1791                 u64     to;
1792                 u64     flags;
1793         };
1794         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1795         struct bts_record *at, *top;
1796         struct perf_output_handle handle;
1797         struct perf_event_header header;
1798         struct perf_sample_data data;
1799         struct pt_regs regs;
1800
1801         if (!event)
1802                 return;
1803
1804         if (!ds)
1805                 return;
1806
1807         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1808         top = (struct bts_record *)(unsigned long)ds->bts_index;
1809
1810         if (top <= at)
1811                 return;
1812
1813         ds->bts_index = ds->bts_buffer_base;
1814
1815
1816         data.period     = event->hw.last_period;
1817         data.addr       = 0;
1818         data.raw        = NULL;
1819         regs.ip         = 0;
1820
1821         /*
1822          * Prepare a generic sample, i.e. fill in the invariant fields.
1823          * We will overwrite the from and to address before we output
1824          * the sample.
1825          */
1826         perf_prepare_sample(&header, &data, event, &regs);
1827
1828         if (perf_output_begin(&handle, event,
1829                               header.size * (top - at), 1, 1))
1830                 return;
1831
1832         for (; at < top; at++) {
1833                 data.ip         = at->from;
1834                 data.addr       = at->to;
1835
1836                 perf_output_sample(&handle, &header, &data, event);
1837         }
1838
1839         perf_output_end(&handle);
1840
1841         /* There's new data available. */
1842         event->hw.interrupts++;
1843         event->pending_kill = POLL_IN;
1844 }
1845
1846 static void x86_pmu_disable(struct perf_event *event)
1847 {
1848         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1849         struct hw_perf_event *hwc = &event->hw;
1850         int i, idx = hwc->idx;
1851
1852         /*
1853          * Must be done before we disable, otherwise the nmi handler
1854          * could reenable again:
1855          */
1856         clear_bit(idx, cpuc->active_mask);
1857         x86_pmu.disable(hwc, idx);
1858
1859         /*
1860          * Make sure the cleared pointer becomes visible before we
1861          * (potentially) free the event:
1862          */
1863         barrier();
1864
1865         /*
1866          * Drain the remaining delta count out of a event
1867          * that we are disabling:
1868          */
1869         x86_perf_event_update(event, hwc, idx);
1870
1871         /* Drain the remaining BTS records. */
1872         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1873                 intel_pmu_drain_bts_buffer(cpuc);
1874
1875         cpuc->events[idx] = NULL;
1876
1877         for (i = 0; i < cpuc->n_events; i++) {
1878                 if (event == cpuc->event_list[i]) {
1879
1880                         if (x86_pmu.put_event_constraints)
1881                                 x86_pmu.put_event_constraints(cpuc, event);
1882
1883                         while (++i < cpuc->n_events)
1884                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1885
1886                         --cpuc->n_events;
1887                         break;
1888                 }
1889         }
1890         perf_event_update_userpage(event);
1891 }
1892
1893 /*
1894  * Save and restart an expired event. Called by NMI contexts,
1895  * so it has to be careful about preempting normal event ops:
1896  */
1897 static int intel_pmu_save_and_restart(struct perf_event *event)
1898 {
1899         struct hw_perf_event *hwc = &event->hw;
1900         int idx = hwc->idx;
1901         int ret;
1902
1903         x86_perf_event_update(event, hwc, idx);
1904         ret = x86_perf_event_set_period(event, hwc, idx);
1905
1906         if (event->state == PERF_EVENT_STATE_ACTIVE)
1907                 intel_pmu_enable_event(hwc, idx);
1908
1909         return ret;
1910 }
1911
1912 static void intel_pmu_reset(void)
1913 {
1914         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1915         unsigned long flags;
1916         int idx;
1917
1918         if (!x86_pmu.num_events)
1919                 return;
1920
1921         local_irq_save(flags);
1922
1923         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1924
1925         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1926                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1927                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1928         }
1929         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1930                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1931         }
1932         if (ds)
1933                 ds->bts_index = ds->bts_buffer_base;
1934
1935         local_irq_restore(flags);
1936 }
1937
1938 static int p6_pmu_handle_irq(struct pt_regs *regs)
1939 {
1940         struct perf_sample_data data;
1941         struct cpu_hw_events *cpuc;
1942         struct perf_event *event;
1943         struct hw_perf_event *hwc;
1944         int idx, handled = 0;
1945         u64 val;
1946
1947         data.addr = 0;
1948         data.raw = NULL;
1949
1950         cpuc = &__get_cpu_var(cpu_hw_events);
1951
1952         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1953                 if (!test_bit(idx, cpuc->active_mask))
1954                         continue;
1955
1956                 event = cpuc->events[idx];
1957                 hwc = &event->hw;
1958
1959                 val = x86_perf_event_update(event, hwc, idx);
1960                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1961                         continue;
1962
1963                 /*
1964                  * event overflow
1965                  */
1966                 handled         = 1;
1967                 data.period     = event->hw.last_period;
1968
1969                 if (!x86_perf_event_set_period(event, hwc, idx))
1970                         continue;
1971
1972                 if (perf_event_overflow(event, 1, &data, regs))
1973                         p6_pmu_disable_event(hwc, idx);
1974         }
1975
1976         if (handled)
1977                 inc_irq_stat(apic_perf_irqs);
1978
1979         return handled;
1980 }
1981
1982 /*
1983  * This handler is triggered by the local APIC, so the APIC IRQ handling
1984  * rules apply:
1985  */
1986 static int intel_pmu_handle_irq(struct pt_regs *regs)
1987 {
1988         struct perf_sample_data data;
1989         struct cpu_hw_events *cpuc;
1990         int bit, loops;
1991         u64 ack, status;
1992
1993         data.addr = 0;
1994         data.raw = NULL;
1995
1996         cpuc = &__get_cpu_var(cpu_hw_events);
1997
1998         perf_disable();
1999         intel_pmu_drain_bts_buffer(cpuc);
2000         status = intel_pmu_get_status();
2001         if (!status) {
2002                 perf_enable();
2003                 return 0;
2004         }
2005
2006         loops = 0;
2007 again:
2008         if (++loops > 100) {
2009                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2010                 perf_event_print_debug();
2011                 intel_pmu_reset();
2012                 perf_enable();
2013                 return 1;
2014         }
2015
2016         inc_irq_stat(apic_perf_irqs);
2017         ack = status;
2018         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2019                 struct perf_event *event = cpuc->events[bit];
2020
2021                 clear_bit(bit, (unsigned long *) &status);
2022                 if (!test_bit(bit, cpuc->active_mask))
2023                         continue;
2024
2025                 if (!intel_pmu_save_and_restart(event))
2026                         continue;
2027
2028                 data.period = event->hw.last_period;
2029
2030                 if (perf_event_overflow(event, 1, &data, regs))
2031                         intel_pmu_disable_event(&event->hw, bit);
2032         }
2033
2034         intel_pmu_ack_status(ack);
2035
2036         /*
2037          * Repeat if there is more work to be done:
2038          */
2039         status = intel_pmu_get_status();
2040         if (status)
2041                 goto again;
2042
2043         perf_enable();
2044
2045         return 1;
2046 }
2047
2048 static int amd_pmu_handle_irq(struct pt_regs *regs)
2049 {
2050         struct perf_sample_data data;
2051         struct cpu_hw_events *cpuc;
2052         struct perf_event *event;
2053         struct hw_perf_event *hwc;
2054         int idx, handled = 0;
2055         u64 val;
2056
2057         data.addr = 0;
2058         data.raw = NULL;
2059
2060         cpuc = &__get_cpu_var(cpu_hw_events);
2061
2062         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2063                 if (!test_bit(idx, cpuc->active_mask))
2064                         continue;
2065
2066                 event = cpuc->events[idx];
2067                 hwc = &event->hw;
2068
2069                 val = x86_perf_event_update(event, hwc, idx);
2070                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2071                         continue;
2072
2073                 /*
2074                  * event overflow
2075                  */
2076                 handled         = 1;
2077                 data.period     = event->hw.last_period;
2078
2079                 if (!x86_perf_event_set_period(event, hwc, idx))
2080                         continue;
2081
2082                 if (perf_event_overflow(event, 1, &data, regs))
2083                         amd_pmu_disable_event(hwc, idx);
2084         }
2085
2086         if (handled)
2087                 inc_irq_stat(apic_perf_irqs);
2088
2089         return handled;
2090 }
2091
2092 void smp_perf_pending_interrupt(struct pt_regs *regs)
2093 {
2094         irq_enter();
2095         ack_APIC_irq();
2096         inc_irq_stat(apic_pending_irqs);
2097         perf_event_do_pending();
2098         irq_exit();
2099 }
2100
2101 void set_perf_event_pending(void)
2102 {
2103 #ifdef CONFIG_X86_LOCAL_APIC
2104         if (!x86_pmu.apic || !x86_pmu_initialized())
2105                 return;
2106
2107         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2108 #endif
2109 }
2110
2111 void perf_events_lapic_init(void)
2112 {
2113 #ifdef CONFIG_X86_LOCAL_APIC
2114         if (!x86_pmu.apic || !x86_pmu_initialized())
2115                 return;
2116
2117         /*
2118          * Always use NMI for PMU
2119          */
2120         apic_write(APIC_LVTPC, APIC_DM_NMI);
2121 #endif
2122 }
2123
2124 static int __kprobes
2125 perf_event_nmi_handler(struct notifier_block *self,
2126                          unsigned long cmd, void *__args)
2127 {
2128         struct die_args *args = __args;
2129         struct pt_regs *regs;
2130
2131         if (!atomic_read(&active_events))
2132                 return NOTIFY_DONE;
2133
2134         switch (cmd) {
2135         case DIE_NMI:
2136         case DIE_NMI_IPI:
2137                 break;
2138
2139         default:
2140                 return NOTIFY_DONE;
2141         }
2142
2143         regs = args->regs;
2144
2145 #ifdef CONFIG_X86_LOCAL_APIC
2146         apic_write(APIC_LVTPC, APIC_DM_NMI);
2147 #endif
2148         /*
2149          * Can't rely on the handled return value to say it was our NMI, two
2150          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2151          *
2152          * If the first NMI handles both, the latter will be empty and daze
2153          * the CPU.
2154          */
2155         x86_pmu.handle_irq(regs);
2156
2157         return NOTIFY_STOP;
2158 }
2159
2160 static struct event_constraint unconstrained;
2161
2162 static struct event_constraint bts_constraint =
2163         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2164
2165 static struct event_constraint *
2166 intel_special_constraints(struct perf_event *event)
2167 {
2168         unsigned int hw_event;
2169
2170         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2171
2172         if (unlikely((hw_event ==
2173                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2174                      (event->hw.sample_period == 1))) {
2175
2176                 return &bts_constraint;
2177         }
2178         return NULL;
2179 }
2180
2181 static struct event_constraint *
2182 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2183 {
2184         struct event_constraint *c;
2185
2186         c = intel_special_constraints(event);
2187         if (c)
2188                 return c;
2189
2190         if (x86_pmu.event_constraints) {
2191                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2192                         if ((event->hw.config & c->cmask) == c->code)
2193                                 return c;
2194                 }
2195         }
2196
2197         return &unconstrained;
2198 }
2199
2200 static struct event_constraint *
2201 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2202 {
2203         return &unconstrained;
2204 }
2205
2206 static int x86_event_sched_in(struct perf_event *event,
2207                           struct perf_cpu_context *cpuctx, int cpu)
2208 {
2209         int ret = 0;
2210
2211         event->state = PERF_EVENT_STATE_ACTIVE;
2212         event->oncpu = cpu;
2213         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2214
2215         if (!is_x86_event(event))
2216                 ret = event->pmu->enable(event);
2217
2218         if (!ret && !is_software_event(event))
2219                 cpuctx->active_oncpu++;
2220
2221         if (!ret && event->attr.exclusive)
2222                 cpuctx->exclusive = 1;
2223
2224         return ret;
2225 }
2226
2227 static void x86_event_sched_out(struct perf_event *event,
2228                             struct perf_cpu_context *cpuctx, int cpu)
2229 {
2230         event->state = PERF_EVENT_STATE_INACTIVE;
2231         event->oncpu = -1;
2232
2233         if (!is_x86_event(event))
2234                 event->pmu->disable(event);
2235
2236         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2237
2238         if (!is_software_event(event))
2239                 cpuctx->active_oncpu--;
2240
2241         if (event->attr.exclusive || !cpuctx->active_oncpu)
2242                 cpuctx->exclusive = 0;
2243 }
2244
2245 /*
2246  * Called to enable a whole group of events.
2247  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2248  * Assumes the caller has disabled interrupts and has
2249  * frozen the PMU with hw_perf_save_disable.
2250  *
2251  * called with PMU disabled. If successful and return value 1,
2252  * then guaranteed to call perf_enable() and hw_perf_enable()
2253  */
2254 int hw_perf_group_sched_in(struct perf_event *leader,
2255                struct perf_cpu_context *cpuctx,
2256                struct perf_event_context *ctx, int cpu)
2257 {
2258         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2259         struct perf_event *sub;
2260         int assign[X86_PMC_IDX_MAX];
2261         int n0, n1, ret;
2262
2263         /* n0 = total number of events */
2264         n0 = collect_events(cpuc, leader, true);
2265         if (n0 < 0)
2266                 return n0;
2267
2268         ret = x86_schedule_events(cpuc, n0, assign);
2269         if (ret)
2270                 return ret;
2271
2272         ret = x86_event_sched_in(leader, cpuctx, cpu);
2273         if (ret)
2274                 return ret;
2275
2276         n1 = 1;
2277         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2278                 if (sub->state > PERF_EVENT_STATE_OFF) {
2279                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2280                         if (ret)
2281                                 goto undo;
2282                         ++n1;
2283                 }
2284         }
2285         /*
2286          * copy new assignment, now we know it is possible
2287          * will be used by hw_perf_enable()
2288          */
2289         memcpy(cpuc->assign, assign, n0*sizeof(int));
2290
2291         cpuc->n_events  = n0;
2292         cpuc->n_added   = n1;
2293         ctx->nr_active += n1;
2294
2295         /*
2296          * 1 means successful and events are active
2297          * This is not quite true because we defer
2298          * actual activation until hw_perf_enable() but
2299          * this way we* ensure caller won't try to enable
2300          * individual events
2301          */
2302         return 1;
2303 undo:
2304         x86_event_sched_out(leader, cpuctx, cpu);
2305         n0  = 1;
2306         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2307                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2308                         x86_event_sched_out(sub, cpuctx, cpu);
2309                         if (++n0 == n1)
2310                                 break;
2311                 }
2312         }
2313         return ret;
2314 }
2315
2316 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2317         .notifier_call          = perf_event_nmi_handler,
2318         .next                   = NULL,
2319         .priority               = 1
2320 };
2321
2322 static __initconst struct x86_pmu p6_pmu = {
2323         .name                   = "p6",
2324         .handle_irq             = p6_pmu_handle_irq,
2325         .disable_all            = p6_pmu_disable_all,
2326         .enable_all             = p6_pmu_enable_all,
2327         .enable                 = p6_pmu_enable_event,
2328         .disable                = p6_pmu_disable_event,
2329         .eventsel               = MSR_P6_EVNTSEL0,
2330         .perfctr                = MSR_P6_PERFCTR0,
2331         .event_map              = p6_pmu_event_map,
2332         .raw_event              = p6_pmu_raw_event,
2333         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2334         .apic                   = 1,
2335         .max_period             = (1ULL << 31) - 1,
2336         .version                = 0,
2337         .num_events             = 2,
2338         /*
2339          * Events have 40 bits implemented. However they are designed such
2340          * that bits [32-39] are sign extensions of bit 31. As such the
2341          * effective width of a event for P6-like PMU is 32 bits only.
2342          *
2343          * See IA-32 Intel Architecture Software developer manual Vol 3B
2344          */
2345         .event_bits             = 32,
2346         .event_mask             = (1ULL << 32) - 1,
2347         .get_event_constraints  = intel_get_event_constraints,
2348         .event_constraints      = intel_p6_event_constraints
2349 };
2350
2351 static __initconst struct x86_pmu intel_pmu = {
2352         .name                   = "Intel",
2353         .handle_irq             = intel_pmu_handle_irq,
2354         .disable_all            = intel_pmu_disable_all,
2355         .enable_all             = intel_pmu_enable_all,
2356         .enable                 = intel_pmu_enable_event,
2357         .disable                = intel_pmu_disable_event,
2358         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2359         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2360         .event_map              = intel_pmu_event_map,
2361         .raw_event              = intel_pmu_raw_event,
2362         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2363         .apic                   = 1,
2364         /*
2365          * Intel PMCs cannot be accessed sanely above 32 bit width,
2366          * so we install an artificial 1<<31 period regardless of
2367          * the generic event period:
2368          */
2369         .max_period             = (1ULL << 31) - 1,
2370         .enable_bts             = intel_pmu_enable_bts,
2371         .disable_bts            = intel_pmu_disable_bts,
2372         .get_event_constraints  = intel_get_event_constraints
2373 };
2374
2375 static __initconst struct x86_pmu amd_pmu = {
2376         .name                   = "AMD",
2377         .handle_irq             = amd_pmu_handle_irq,
2378         .disable_all            = amd_pmu_disable_all,
2379         .enable_all             = amd_pmu_enable_all,
2380         .enable                 = amd_pmu_enable_event,
2381         .disable                = amd_pmu_disable_event,
2382         .eventsel               = MSR_K7_EVNTSEL0,
2383         .perfctr                = MSR_K7_PERFCTR0,
2384         .event_map              = amd_pmu_event_map,
2385         .raw_event              = amd_pmu_raw_event,
2386         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2387         .num_events             = 4,
2388         .event_bits             = 48,
2389         .event_mask             = (1ULL << 48) - 1,
2390         .apic                   = 1,
2391         /* use highest bit to detect overflow */
2392         .max_period             = (1ULL << 47) - 1,
2393         .get_event_constraints  = amd_get_event_constraints
2394 };
2395
2396 static __init int p6_pmu_init(void)
2397 {
2398         switch (boot_cpu_data.x86_model) {
2399         case 1:
2400         case 3:  /* Pentium Pro */
2401         case 5:
2402         case 6:  /* Pentium II */
2403         case 7:
2404         case 8:
2405         case 11: /* Pentium III */
2406         case 9:
2407         case 13:
2408                 /* Pentium M */
2409                 break;
2410         default:
2411                 pr_cont("unsupported p6 CPU model %d ",
2412                         boot_cpu_data.x86_model);
2413                 return -ENODEV;
2414         }
2415
2416         x86_pmu = p6_pmu;
2417
2418         return 0;
2419 }
2420
2421 static __init int intel_pmu_init(void)
2422 {
2423         union cpuid10_edx edx;
2424         union cpuid10_eax eax;
2425         unsigned int unused;
2426         unsigned int ebx;
2427         int version;
2428
2429         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2430                 /* check for P6 processor family */
2431            if (boot_cpu_data.x86 == 6) {
2432                 return p6_pmu_init();
2433            } else {
2434                 return -ENODEV;
2435            }
2436         }
2437
2438         /*
2439          * Check whether the Architectural PerfMon supports
2440          * Branch Misses Retired hw_event or not.
2441          */
2442         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2443         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2444                 return -ENODEV;
2445
2446         version = eax.split.version_id;
2447         if (version < 2)
2448                 return -ENODEV;
2449
2450         x86_pmu                         = intel_pmu;
2451         x86_pmu.version                 = version;
2452         x86_pmu.num_events              = eax.split.num_events;
2453         x86_pmu.event_bits              = eax.split.bit_width;
2454         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2455
2456         /*
2457          * Quirk: v2 perfmon does not report fixed-purpose events, so
2458          * assume at least 3 events:
2459          */
2460         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2461
2462         /*
2463          * Install the hw-cache-events table:
2464          */
2465         switch (boot_cpu_data.x86_model) {
2466         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2467         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2468         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2469         case 29: /* six-core 45 nm xeon "Dunnington" */
2470                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2471                        sizeof(hw_cache_event_ids));
2472
2473                 x86_pmu.event_constraints = intel_core_event_constraints;
2474                 pr_cont("Core2 events, ");
2475                 break;
2476         case 26:
2477                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2478                        sizeof(hw_cache_event_ids));
2479
2480                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2481                 pr_cont("Nehalem/Corei7 events, ");
2482                 break;
2483         case 28:
2484                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2485                        sizeof(hw_cache_event_ids));
2486
2487                 x86_pmu.event_constraints = intel_gen_event_constraints;
2488                 pr_cont("Atom events, ");
2489                 break;
2490         default:
2491                 /*
2492                  * default constraints for v2 and up
2493                  */
2494                 x86_pmu.event_constraints = intel_gen_event_constraints;
2495                 pr_cont("generic architected perfmon, ");
2496         }
2497         return 0;
2498 }
2499
2500 static __init int amd_pmu_init(void)
2501 {
2502         /* Performance-monitoring supported from K7 and later: */
2503         if (boot_cpu_data.x86 < 6)
2504                 return -ENODEV;
2505
2506         x86_pmu = amd_pmu;
2507
2508         /* Events are common for all AMDs */
2509         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2510                sizeof(hw_cache_event_ids));
2511
2512         return 0;
2513 }
2514
2515 static void __init pmu_check_apic(void)
2516 {
2517         if (cpu_has_apic)
2518                 return;
2519
2520         x86_pmu.apic = 0;
2521         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2522         pr_info("no hardware sampling interrupt available.\n");
2523 }
2524
2525 void __init init_hw_perf_events(void)
2526 {
2527         int err;
2528
2529         pr_info("Performance Events: ");
2530
2531         switch (boot_cpu_data.x86_vendor) {
2532         case X86_VENDOR_INTEL:
2533                 err = intel_pmu_init();
2534                 break;
2535         case X86_VENDOR_AMD:
2536                 err = amd_pmu_init();
2537                 break;
2538         default:
2539                 return;
2540         }
2541         if (err != 0) {
2542                 pr_cont("no PMU driver, software events only.\n");
2543                 return;
2544         }
2545
2546         pmu_check_apic();
2547
2548         pr_cont("%s PMU driver.\n", x86_pmu.name);
2549
2550         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2551                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2552                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2553                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2554         }
2555         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2556         perf_max_events = x86_pmu.num_events;
2557
2558         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2559                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2560                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2561                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2562         }
2563
2564         perf_event_mask |=
2565                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2566         x86_pmu.intel_ctrl = perf_event_mask;
2567
2568         perf_events_lapic_init();
2569         register_die_notifier(&perf_event_nmi_notifier);
2570
2571         unconstrained = (struct event_constraint)
2572                 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2573
2574         pr_info("... version:                %d\n",     x86_pmu.version);
2575         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2576         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2577         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2578         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2579         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2580         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2581 }
2582
2583 static inline void x86_pmu_read(struct perf_event *event)
2584 {
2585         x86_perf_event_update(event, &event->hw, event->hw.idx);
2586 }
2587
2588 static const struct pmu pmu = {
2589         .enable         = x86_pmu_enable,
2590         .disable        = x86_pmu_disable,
2591         .read           = x86_pmu_read,
2592         .unthrottle     = x86_pmu_unthrottle,
2593 };
2594
2595 /*
2596  * validate a single event group
2597  *
2598  * validation include:
2599  *      - check events are compatible which each other
2600  *      - events do not compete for the same counter
2601  *      - number of events <= number of counters
2602  *
2603  * validation ensures the group can be loaded onto the
2604  * PMU if it was the only group available.
2605  */
2606 static int validate_group(struct perf_event *event)
2607 {
2608         struct perf_event *leader = event->group_leader;
2609         struct cpu_hw_events *fake_cpuc;
2610         int ret, n;
2611
2612         ret = -ENOMEM;
2613         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2614         if (!fake_cpuc)
2615                 goto out;
2616
2617         /*
2618          * the event is not yet connected with its
2619          * siblings therefore we must first collect
2620          * existing siblings, then add the new event
2621          * before we can simulate the scheduling
2622          */
2623         ret = -ENOSPC;
2624         n = collect_events(fake_cpuc, leader, true);
2625         if (n < 0)
2626                 goto out_free;
2627
2628         fake_cpuc->n_events = n;
2629         n = collect_events(fake_cpuc, event, false);
2630         if (n < 0)
2631                 goto out_free;
2632
2633         fake_cpuc->n_events = n;
2634
2635         ret = x86_schedule_events(fake_cpuc, n, NULL);
2636
2637 out_free:
2638         kfree(fake_cpuc);
2639 out:
2640         return ret;
2641 }
2642
2643 const struct pmu *hw_perf_event_init(struct perf_event *event)
2644 {
2645         const struct pmu *tmp;
2646         int err;
2647
2648         err = __hw_perf_event_init(event);
2649         if (!err) {
2650                 /*
2651                  * we temporarily connect event to its pmu
2652                  * such that validate_group() can classify
2653                  * it as an x86 event using is_x86_event()
2654                  */
2655                 tmp = event->pmu;
2656                 event->pmu = &pmu;
2657
2658                 if (event->group_leader != event)
2659                         err = validate_group(event);
2660
2661                 event->pmu = tmp;
2662         }
2663         if (err) {
2664                 if (event->destroy)
2665                         event->destroy(event);
2666                 return ERR_PTR(err);
2667         }
2668
2669         return &pmu;
2670 }
2671
2672 /*
2673  * callchain support
2674  */
2675
2676 static inline
2677 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2678 {
2679         if (entry->nr < PERF_MAX_STACK_DEPTH)
2680                 entry->ip[entry->nr++] = ip;
2681 }
2682
2683 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2684 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2685
2686
2687 static void
2688 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2689 {
2690         /* Ignore warnings */
2691 }
2692
2693 static void backtrace_warning(void *data, char *msg)
2694 {
2695         /* Ignore warnings */
2696 }
2697
2698 static int backtrace_stack(void *data, char *name)
2699 {
2700         return 0;
2701 }
2702
2703 static void backtrace_address(void *data, unsigned long addr, int reliable)
2704 {
2705         struct perf_callchain_entry *entry = data;
2706
2707         if (reliable)
2708                 callchain_store(entry, addr);
2709 }
2710
2711 static const struct stacktrace_ops backtrace_ops = {
2712         .warning                = backtrace_warning,
2713         .warning_symbol         = backtrace_warning_symbol,
2714         .stack                  = backtrace_stack,
2715         .address                = backtrace_address,
2716         .walk_stack             = print_context_stack_bp,
2717 };
2718
2719 #include "../dumpstack.h"
2720
2721 static void
2722 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2723 {
2724         callchain_store(entry, PERF_CONTEXT_KERNEL);
2725         callchain_store(entry, regs->ip);
2726
2727         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2728 }
2729
2730 /*
2731  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2732  */
2733 static unsigned long
2734 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2735 {
2736         unsigned long offset, addr = (unsigned long)from;
2737         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2738         unsigned long size, len = 0;
2739         struct page *page;
2740         void *map;
2741         int ret;
2742
2743         do {
2744                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2745                 if (!ret)
2746                         break;
2747
2748                 offset = addr & (PAGE_SIZE - 1);
2749                 size = min(PAGE_SIZE - offset, n - len);
2750
2751                 map = kmap_atomic(page, type);
2752                 memcpy(to, map+offset, size);
2753                 kunmap_atomic(map, type);
2754                 put_page(page);
2755
2756                 len  += size;
2757                 to   += size;
2758                 addr += size;
2759
2760         } while (len < n);
2761
2762         return len;
2763 }
2764
2765 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2766 {
2767         unsigned long bytes;
2768
2769         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2770
2771         return bytes == sizeof(*frame);
2772 }
2773
2774 static void
2775 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2776 {
2777         struct stack_frame frame;
2778         const void __user *fp;
2779
2780         if (!user_mode(regs))
2781                 regs = task_pt_regs(current);
2782
2783         fp = (void __user *)regs->bp;
2784
2785         callchain_store(entry, PERF_CONTEXT_USER);
2786         callchain_store(entry, regs->ip);
2787
2788         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2789                 frame.next_frame             = NULL;
2790                 frame.return_address = 0;
2791
2792                 if (!copy_stack_frame(fp, &frame))
2793                         break;
2794
2795                 if ((unsigned long)fp < regs->sp)
2796                         break;
2797
2798                 callchain_store(entry, frame.return_address);
2799                 fp = frame.next_frame;
2800         }
2801 }
2802
2803 static void
2804 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2805 {
2806         int is_user;
2807
2808         if (!regs)
2809                 return;
2810
2811         is_user = user_mode(regs);
2812
2813         if (is_user && current->state != TASK_RUNNING)
2814                 return;
2815
2816         if (!is_user)
2817                 perf_callchain_kernel(regs, entry);
2818
2819         if (current->mm)
2820                 perf_callchain_user(regs, entry);
2821 }
2822
2823 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2824 {
2825         struct perf_callchain_entry *entry;
2826
2827         if (in_nmi())
2828                 entry = &__get_cpu_var(pmc_nmi_entry);
2829         else
2830                 entry = &__get_cpu_var(pmc_irq_entry);
2831
2832         entry->nr = 0;
2833
2834         perf_do_callchain(regs, entry);
2835
2836         return entry;
2837 }
2838
2839 void hw_perf_event_setup_online(int cpu)
2840 {
2841         init_debug_store_on_cpu(cpu);
2842 }