092ad566734c9df66b9f74728520b93ee1ff6f96
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26
27 #include <asm/apic.h>
28 #include <asm/stacktrace.h>
29 #include <asm/nmi.h>
30
31 static u64 perf_event_mask __read_mostly;
32
33 /* The maximal number of PEBS events: */
34 #define MAX_PEBS_EVENTS 4
35
36 /* The size of a BTS record in bytes: */
37 #define BTS_RECORD_SIZE         24
38
39 /* The size of a per-cpu BTS buffer in bytes: */
40 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
41
42 /* The BTS overflow threshold in bytes from the end of the buffer: */
43 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
44
45
46 /*
47  * Bits in the debugctlmsr controlling branch tracing.
48  */
49 #define X86_DEBUGCTL_TR                 (1 << 6)
50 #define X86_DEBUGCTL_BTS                (1 << 7)
51 #define X86_DEBUGCTL_BTINT              (1 << 8)
52 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
53 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
54
55 /*
56  * A debug store configuration.
57  *
58  * We only support architectures that use 64bit fields.
59  */
60 struct debug_store {
61         u64     bts_buffer_base;
62         u64     bts_index;
63         u64     bts_absolute_maximum;
64         u64     bts_interrupt_threshold;
65         u64     pebs_buffer_base;
66         u64     pebs_index;
67         u64     pebs_absolute_maximum;
68         u64     pebs_interrupt_threshold;
69         u64     pebs_event_reset[MAX_PEBS_EVENTS];
70 };
71
72 struct event_constraint {
73         union {
74                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75                 u64             idxmsk64[1];
76         };
77         int     code;
78         int     cmask;
79 };
80
81 struct cpu_hw_events {
82         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
83         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
84         unsigned long           interrupts;
85         int                     enabled;
86         struct debug_store      *ds;
87
88         int                     n_events;
89         int                     n_added;
90         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
91         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
92 };
93
94 #define EVENT_CONSTRAINT(c, n, m) {     \
95         { .idxmsk64[0] = (n) },         \
96         .code = (c),                    \
97         .cmask = (m),                   \
98 }
99
100 #define INTEL_EVENT_CONSTRAINT(c, n)    \
101         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
102
103 #define FIXED_EVENT_CONSTRAINT(c, n)    \
104         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
105
106 #define EVENT_CONSTRAINT_END \
107         EVENT_CONSTRAINT(0, 0, 0)
108
109 #define for_each_event_constraint(e, c) \
110         for ((e) = (c); (e)->cmask; (e)++)
111
112 /*
113  * struct x86_pmu - generic x86 pmu
114  */
115 struct x86_pmu {
116         const char      *name;
117         int             version;
118         int             (*handle_irq)(struct pt_regs *);
119         void            (*disable_all)(void);
120         void            (*enable_all)(void);
121         void            (*enable)(struct hw_perf_event *, int);
122         void            (*disable)(struct hw_perf_event *, int);
123         unsigned        eventsel;
124         unsigned        perfctr;
125         u64             (*event_map)(int);
126         u64             (*raw_event)(u64);
127         int             max_events;
128         int             num_events;
129         int             num_events_fixed;
130         int             event_bits;
131         u64             event_mask;
132         int             apic;
133         u64             max_period;
134         u64             intel_ctrl;
135         void            (*enable_bts)(u64 config);
136         void            (*disable_bts)(void);
137
138         struct event_constraint *
139                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
140                                                  struct perf_event *event);
141
142         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
143                                                  struct perf_event *event);
144         struct event_constraint *event_constraints;
145 };
146
147 static struct x86_pmu x86_pmu __read_mostly;
148
149 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
150         .enabled = 1,
151 };
152
153 static int x86_perf_event_set_period(struct perf_event *event,
154                              struct hw_perf_event *hwc, int idx);
155
156 /*
157  * Not sure about some of these
158  */
159 static const u64 p6_perfmon_event_map[] =
160 {
161   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
162   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
163   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
164   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
165   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
166   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
167   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
168 };
169
170 static u64 p6_pmu_event_map(int hw_event)
171 {
172         return p6_perfmon_event_map[hw_event];
173 }
174
175 /*
176  * Event setting that is specified not to count anything.
177  * We use this to effectively disable a counter.
178  *
179  * L2_RQSTS with 0 MESI unit mask.
180  */
181 #define P6_NOP_EVENT                    0x0000002EULL
182
183 static u64 p6_pmu_raw_event(u64 hw_event)
184 {
185 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
186 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
187 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
188 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
189 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
190
191 #define P6_EVNTSEL_MASK                 \
192         (P6_EVNTSEL_EVENT_MASK |        \
193          P6_EVNTSEL_UNIT_MASK  |        \
194          P6_EVNTSEL_EDGE_MASK  |        \
195          P6_EVNTSEL_INV_MASK   |        \
196          P6_EVNTSEL_REG_MASK)
197
198         return hw_event & P6_EVNTSEL_MASK;
199 }
200
201 static struct event_constraint intel_p6_event_constraints[] =
202 {
203         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
204         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
205         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
206         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
207         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
208         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
209         EVENT_CONSTRAINT_END
210 };
211
212 /*
213  * Intel PerfMon v3. Used on Core2 and later.
214  */
215 static const u64 intel_perfmon_event_map[] =
216 {
217   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
218   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
219   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
220   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
221   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
222   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
223   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
224 };
225
226 static struct event_constraint intel_core_event_constraints[] =
227 {
228         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
229         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
230         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
231         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
232         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
233         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
234         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
235         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
236         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
237         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
238         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
239         EVENT_CONSTRAINT_END
240 };
241
242 static struct event_constraint intel_nehalem_event_constraints[] =
243 {
244         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
245         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
246         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
247         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
248         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
249         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
250         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
251         INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
252         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
253         INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
254         INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
255         INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
256         EVENT_CONSTRAINT_END
257 };
258
259 static struct event_constraint intel_gen_event_constraints[] =
260 {
261         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
262         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
263         EVENT_CONSTRAINT_END
264 };
265
266 static u64 intel_pmu_event_map(int hw_event)
267 {
268         return intel_perfmon_event_map[hw_event];
269 }
270
271 /*
272  * Generalized hw caching related hw_event table, filled
273  * in on a per model basis. A value of 0 means
274  * 'not supported', -1 means 'hw_event makes no sense on
275  * this CPU', any other value means the raw hw_event
276  * ID.
277  */
278
279 #define C(x) PERF_COUNT_HW_CACHE_##x
280
281 static u64 __read_mostly hw_cache_event_ids
282                                 [PERF_COUNT_HW_CACHE_MAX]
283                                 [PERF_COUNT_HW_CACHE_OP_MAX]
284                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
285
286 static __initconst u64 nehalem_hw_cache_event_ids
287                                 [PERF_COUNT_HW_CACHE_MAX]
288                                 [PERF_COUNT_HW_CACHE_OP_MAX]
289                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
290 {
291  [ C(L1D) ] = {
292         [ C(OP_READ) ] = {
293                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
294                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
295         },
296         [ C(OP_WRITE) ] = {
297                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
298                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
299         },
300         [ C(OP_PREFETCH) ] = {
301                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
302                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
303         },
304  },
305  [ C(L1I ) ] = {
306         [ C(OP_READ) ] = {
307                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
308                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
309         },
310         [ C(OP_WRITE) ] = {
311                 [ C(RESULT_ACCESS) ] = -1,
312                 [ C(RESULT_MISS)   ] = -1,
313         },
314         [ C(OP_PREFETCH) ] = {
315                 [ C(RESULT_ACCESS) ] = 0x0,
316                 [ C(RESULT_MISS)   ] = 0x0,
317         },
318  },
319  [ C(LL  ) ] = {
320         [ C(OP_READ) ] = {
321                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
322                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
323         },
324         [ C(OP_WRITE) ] = {
325                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
326                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
327         },
328         [ C(OP_PREFETCH) ] = {
329                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
330                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
331         },
332  },
333  [ C(DTLB) ] = {
334         [ C(OP_READ) ] = {
335                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
336                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
337         },
338         [ C(OP_WRITE) ] = {
339                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
340                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
341         },
342         [ C(OP_PREFETCH) ] = {
343                 [ C(RESULT_ACCESS) ] = 0x0,
344                 [ C(RESULT_MISS)   ] = 0x0,
345         },
346  },
347  [ C(ITLB) ] = {
348         [ C(OP_READ) ] = {
349                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
350                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
351         },
352         [ C(OP_WRITE) ] = {
353                 [ C(RESULT_ACCESS) ] = -1,
354                 [ C(RESULT_MISS)   ] = -1,
355         },
356         [ C(OP_PREFETCH) ] = {
357                 [ C(RESULT_ACCESS) ] = -1,
358                 [ C(RESULT_MISS)   ] = -1,
359         },
360  },
361  [ C(BPU ) ] = {
362         [ C(OP_READ) ] = {
363                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
364                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
365         },
366         [ C(OP_WRITE) ] = {
367                 [ C(RESULT_ACCESS) ] = -1,
368                 [ C(RESULT_MISS)   ] = -1,
369         },
370         [ C(OP_PREFETCH) ] = {
371                 [ C(RESULT_ACCESS) ] = -1,
372                 [ C(RESULT_MISS)   ] = -1,
373         },
374  },
375 };
376
377 static __initconst u64 core2_hw_cache_event_ids
378                                 [PERF_COUNT_HW_CACHE_MAX]
379                                 [PERF_COUNT_HW_CACHE_OP_MAX]
380                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
381 {
382  [ C(L1D) ] = {
383         [ C(OP_READ) ] = {
384                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
385                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
386         },
387         [ C(OP_WRITE) ] = {
388                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
389                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
390         },
391         [ C(OP_PREFETCH) ] = {
392                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
393                 [ C(RESULT_MISS)   ] = 0,
394         },
395  },
396  [ C(L1I ) ] = {
397         [ C(OP_READ) ] = {
398                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
399                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
400         },
401         [ C(OP_WRITE) ] = {
402                 [ C(RESULT_ACCESS) ] = -1,
403                 [ C(RESULT_MISS)   ] = -1,
404         },
405         [ C(OP_PREFETCH) ] = {
406                 [ C(RESULT_ACCESS) ] = 0,
407                 [ C(RESULT_MISS)   ] = 0,
408         },
409  },
410  [ C(LL  ) ] = {
411         [ C(OP_READ) ] = {
412                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
413                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
414         },
415         [ C(OP_WRITE) ] = {
416                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
417                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
418         },
419         [ C(OP_PREFETCH) ] = {
420                 [ C(RESULT_ACCESS) ] = 0,
421                 [ C(RESULT_MISS)   ] = 0,
422         },
423  },
424  [ C(DTLB) ] = {
425         [ C(OP_READ) ] = {
426                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
427                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
428         },
429         [ C(OP_WRITE) ] = {
430                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
431                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
432         },
433         [ C(OP_PREFETCH) ] = {
434                 [ C(RESULT_ACCESS) ] = 0,
435                 [ C(RESULT_MISS)   ] = 0,
436         },
437  },
438  [ C(ITLB) ] = {
439         [ C(OP_READ) ] = {
440                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
441                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
442         },
443         [ C(OP_WRITE) ] = {
444                 [ C(RESULT_ACCESS) ] = -1,
445                 [ C(RESULT_MISS)   ] = -1,
446         },
447         [ C(OP_PREFETCH) ] = {
448                 [ C(RESULT_ACCESS) ] = -1,
449                 [ C(RESULT_MISS)   ] = -1,
450         },
451  },
452  [ C(BPU ) ] = {
453         [ C(OP_READ) ] = {
454                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
455                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
456         },
457         [ C(OP_WRITE) ] = {
458                 [ C(RESULT_ACCESS) ] = -1,
459                 [ C(RESULT_MISS)   ] = -1,
460         },
461         [ C(OP_PREFETCH) ] = {
462                 [ C(RESULT_ACCESS) ] = -1,
463                 [ C(RESULT_MISS)   ] = -1,
464         },
465  },
466 };
467
468 static __initconst u64 atom_hw_cache_event_ids
469                                 [PERF_COUNT_HW_CACHE_MAX]
470                                 [PERF_COUNT_HW_CACHE_OP_MAX]
471                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
472 {
473  [ C(L1D) ] = {
474         [ C(OP_READ) ] = {
475                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
476                 [ C(RESULT_MISS)   ] = 0,
477         },
478         [ C(OP_WRITE) ] = {
479                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
480                 [ C(RESULT_MISS)   ] = 0,
481         },
482         [ C(OP_PREFETCH) ] = {
483                 [ C(RESULT_ACCESS) ] = 0x0,
484                 [ C(RESULT_MISS)   ] = 0,
485         },
486  },
487  [ C(L1I ) ] = {
488         [ C(OP_READ) ] = {
489                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
490                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
491         },
492         [ C(OP_WRITE) ] = {
493                 [ C(RESULT_ACCESS) ] = -1,
494                 [ C(RESULT_MISS)   ] = -1,
495         },
496         [ C(OP_PREFETCH) ] = {
497                 [ C(RESULT_ACCESS) ] = 0,
498                 [ C(RESULT_MISS)   ] = 0,
499         },
500  },
501  [ C(LL  ) ] = {
502         [ C(OP_READ) ] = {
503                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
504                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
505         },
506         [ C(OP_WRITE) ] = {
507                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
508                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
509         },
510         [ C(OP_PREFETCH) ] = {
511                 [ C(RESULT_ACCESS) ] = 0,
512                 [ C(RESULT_MISS)   ] = 0,
513         },
514  },
515  [ C(DTLB) ] = {
516         [ C(OP_READ) ] = {
517                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
518                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
519         },
520         [ C(OP_WRITE) ] = {
521                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
522                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
523         },
524         [ C(OP_PREFETCH) ] = {
525                 [ C(RESULT_ACCESS) ] = 0,
526                 [ C(RESULT_MISS)   ] = 0,
527         },
528  },
529  [ C(ITLB) ] = {
530         [ C(OP_READ) ] = {
531                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
532                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
533         },
534         [ C(OP_WRITE) ] = {
535                 [ C(RESULT_ACCESS) ] = -1,
536                 [ C(RESULT_MISS)   ] = -1,
537         },
538         [ C(OP_PREFETCH) ] = {
539                 [ C(RESULT_ACCESS) ] = -1,
540                 [ C(RESULT_MISS)   ] = -1,
541         },
542  },
543  [ C(BPU ) ] = {
544         [ C(OP_READ) ] = {
545                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
546                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
547         },
548         [ C(OP_WRITE) ] = {
549                 [ C(RESULT_ACCESS) ] = -1,
550                 [ C(RESULT_MISS)   ] = -1,
551         },
552         [ C(OP_PREFETCH) ] = {
553                 [ C(RESULT_ACCESS) ] = -1,
554                 [ C(RESULT_MISS)   ] = -1,
555         },
556  },
557 };
558
559 static u64 intel_pmu_raw_event(u64 hw_event)
560 {
561 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
562 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
563 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
564 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
565 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
566
567 #define CORE_EVNTSEL_MASK               \
568         (INTEL_ARCH_EVTSEL_MASK |       \
569          INTEL_ARCH_UNIT_MASK   |       \
570          INTEL_ARCH_EDGE_MASK   |       \
571          INTEL_ARCH_INV_MASK    |       \
572          INTEL_ARCH_CNT_MASK)
573
574         return hw_event & CORE_EVNTSEL_MASK;
575 }
576
577 static __initconst u64 amd_hw_cache_event_ids
578                                 [PERF_COUNT_HW_CACHE_MAX]
579                                 [PERF_COUNT_HW_CACHE_OP_MAX]
580                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
581 {
582  [ C(L1D) ] = {
583         [ C(OP_READ) ] = {
584                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
585                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
586         },
587         [ C(OP_WRITE) ] = {
588                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
589                 [ C(RESULT_MISS)   ] = 0,
590         },
591         [ C(OP_PREFETCH) ] = {
592                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
593                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
594         },
595  },
596  [ C(L1I ) ] = {
597         [ C(OP_READ) ] = {
598                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
599                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
600         },
601         [ C(OP_WRITE) ] = {
602                 [ C(RESULT_ACCESS) ] = -1,
603                 [ C(RESULT_MISS)   ] = -1,
604         },
605         [ C(OP_PREFETCH) ] = {
606                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
607                 [ C(RESULT_MISS)   ] = 0,
608         },
609  },
610  [ C(LL  ) ] = {
611         [ C(OP_READ) ] = {
612                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
613                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
614         },
615         [ C(OP_WRITE) ] = {
616                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
617                 [ C(RESULT_MISS)   ] = 0,
618         },
619         [ C(OP_PREFETCH) ] = {
620                 [ C(RESULT_ACCESS) ] = 0,
621                 [ C(RESULT_MISS)   ] = 0,
622         },
623  },
624  [ C(DTLB) ] = {
625         [ C(OP_READ) ] = {
626                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
627                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
628         },
629         [ C(OP_WRITE) ] = {
630                 [ C(RESULT_ACCESS) ] = 0,
631                 [ C(RESULT_MISS)   ] = 0,
632         },
633         [ C(OP_PREFETCH) ] = {
634                 [ C(RESULT_ACCESS) ] = 0,
635                 [ C(RESULT_MISS)   ] = 0,
636         },
637  },
638  [ C(ITLB) ] = {
639         [ C(OP_READ) ] = {
640                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
641                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
642         },
643         [ C(OP_WRITE) ] = {
644                 [ C(RESULT_ACCESS) ] = -1,
645                 [ C(RESULT_MISS)   ] = -1,
646         },
647         [ C(OP_PREFETCH) ] = {
648                 [ C(RESULT_ACCESS) ] = -1,
649                 [ C(RESULT_MISS)   ] = -1,
650         },
651  },
652  [ C(BPU ) ] = {
653         [ C(OP_READ) ] = {
654                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
655                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
656         },
657         [ C(OP_WRITE) ] = {
658                 [ C(RESULT_ACCESS) ] = -1,
659                 [ C(RESULT_MISS)   ] = -1,
660         },
661         [ C(OP_PREFETCH) ] = {
662                 [ C(RESULT_ACCESS) ] = -1,
663                 [ C(RESULT_MISS)   ] = -1,
664         },
665  },
666 };
667
668 /*
669  * AMD Performance Monitor K7 and later.
670  */
671 static const u64 amd_perfmon_event_map[] =
672 {
673   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
674   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
675   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
676   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
677   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
678   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
679 };
680
681 static u64 amd_pmu_event_map(int hw_event)
682 {
683         return amd_perfmon_event_map[hw_event];
684 }
685
686 static u64 amd_pmu_raw_event(u64 hw_event)
687 {
688 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
689 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
690 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
691 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
692 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
693
694 #define K7_EVNTSEL_MASK                 \
695         (K7_EVNTSEL_EVENT_MASK |        \
696          K7_EVNTSEL_UNIT_MASK  |        \
697          K7_EVNTSEL_EDGE_MASK  |        \
698          K7_EVNTSEL_INV_MASK   |        \
699          K7_EVNTSEL_REG_MASK)
700
701         return hw_event & K7_EVNTSEL_MASK;
702 }
703
704 /*
705  * Propagate event elapsed time into the generic event.
706  * Can only be executed on the CPU where the event is active.
707  * Returns the delta events processed.
708  */
709 static u64
710 x86_perf_event_update(struct perf_event *event,
711                         struct hw_perf_event *hwc, int idx)
712 {
713         int shift = 64 - x86_pmu.event_bits;
714         u64 prev_raw_count, new_raw_count;
715         s64 delta;
716
717         if (idx == X86_PMC_IDX_FIXED_BTS)
718                 return 0;
719
720         /*
721          * Careful: an NMI might modify the previous event value.
722          *
723          * Our tactic to handle this is to first atomically read and
724          * exchange a new raw count - then add that new-prev delta
725          * count to the generic event atomically:
726          */
727 again:
728         prev_raw_count = atomic64_read(&hwc->prev_count);
729         rdmsrl(hwc->event_base + idx, new_raw_count);
730
731         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
732                                         new_raw_count) != prev_raw_count)
733                 goto again;
734
735         /*
736          * Now we have the new raw value and have updated the prev
737          * timestamp already. We can now calculate the elapsed delta
738          * (event-)time and add that to the generic event.
739          *
740          * Careful, not all hw sign-extends above the physical width
741          * of the count.
742          */
743         delta = (new_raw_count << shift) - (prev_raw_count << shift);
744         delta >>= shift;
745
746         atomic64_add(delta, &event->count);
747         atomic64_sub(delta, &hwc->period_left);
748
749         return new_raw_count;
750 }
751
752 static atomic_t active_events;
753 static DEFINE_MUTEX(pmc_reserve_mutex);
754
755 static bool reserve_pmc_hardware(void)
756 {
757 #ifdef CONFIG_X86_LOCAL_APIC
758         int i;
759
760         if (nmi_watchdog == NMI_LOCAL_APIC)
761                 disable_lapic_nmi_watchdog();
762
763         for (i = 0; i < x86_pmu.num_events; i++) {
764                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
765                         goto perfctr_fail;
766         }
767
768         for (i = 0; i < x86_pmu.num_events; i++) {
769                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
770                         goto eventsel_fail;
771         }
772 #endif
773
774         return true;
775
776 #ifdef CONFIG_X86_LOCAL_APIC
777 eventsel_fail:
778         for (i--; i >= 0; i--)
779                 release_evntsel_nmi(x86_pmu.eventsel + i);
780
781         i = x86_pmu.num_events;
782
783 perfctr_fail:
784         for (i--; i >= 0; i--)
785                 release_perfctr_nmi(x86_pmu.perfctr + i);
786
787         if (nmi_watchdog == NMI_LOCAL_APIC)
788                 enable_lapic_nmi_watchdog();
789
790         return false;
791 #endif
792 }
793
794 static void release_pmc_hardware(void)
795 {
796 #ifdef CONFIG_X86_LOCAL_APIC
797         int i;
798
799         for (i = 0; i < x86_pmu.num_events; i++) {
800                 release_perfctr_nmi(x86_pmu.perfctr + i);
801                 release_evntsel_nmi(x86_pmu.eventsel + i);
802         }
803
804         if (nmi_watchdog == NMI_LOCAL_APIC)
805                 enable_lapic_nmi_watchdog();
806 #endif
807 }
808
809 static inline bool bts_available(void)
810 {
811         return x86_pmu.enable_bts != NULL;
812 }
813
814 static inline void init_debug_store_on_cpu(int cpu)
815 {
816         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
817
818         if (!ds)
819                 return;
820
821         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
822                      (u32)((u64)(unsigned long)ds),
823                      (u32)((u64)(unsigned long)ds >> 32));
824 }
825
826 static inline void fini_debug_store_on_cpu(int cpu)
827 {
828         if (!per_cpu(cpu_hw_events, cpu).ds)
829                 return;
830
831         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
832 }
833
834 static void release_bts_hardware(void)
835 {
836         int cpu;
837
838         if (!bts_available())
839                 return;
840
841         get_online_cpus();
842
843         for_each_online_cpu(cpu)
844                 fini_debug_store_on_cpu(cpu);
845
846         for_each_possible_cpu(cpu) {
847                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
848
849                 if (!ds)
850                         continue;
851
852                 per_cpu(cpu_hw_events, cpu).ds = NULL;
853
854                 kfree((void *)(unsigned long)ds->bts_buffer_base);
855                 kfree(ds);
856         }
857
858         put_online_cpus();
859 }
860
861 static int reserve_bts_hardware(void)
862 {
863         int cpu, err = 0;
864
865         if (!bts_available())
866                 return 0;
867
868         get_online_cpus();
869
870         for_each_possible_cpu(cpu) {
871                 struct debug_store *ds;
872                 void *buffer;
873
874                 err = -ENOMEM;
875                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
876                 if (unlikely(!buffer))
877                         break;
878
879                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
880                 if (unlikely(!ds)) {
881                         kfree(buffer);
882                         break;
883                 }
884
885                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
886                 ds->bts_index = ds->bts_buffer_base;
887                 ds->bts_absolute_maximum =
888                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
889                 ds->bts_interrupt_threshold =
890                         ds->bts_absolute_maximum - BTS_OVFL_TH;
891
892                 per_cpu(cpu_hw_events, cpu).ds = ds;
893                 err = 0;
894         }
895
896         if (err)
897                 release_bts_hardware();
898         else {
899                 for_each_online_cpu(cpu)
900                         init_debug_store_on_cpu(cpu);
901         }
902
903         put_online_cpus();
904
905         return err;
906 }
907
908 static void hw_perf_event_destroy(struct perf_event *event)
909 {
910         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
911                 release_pmc_hardware();
912                 release_bts_hardware();
913                 mutex_unlock(&pmc_reserve_mutex);
914         }
915 }
916
917 static inline int x86_pmu_initialized(void)
918 {
919         return x86_pmu.handle_irq != NULL;
920 }
921
922 static inline int
923 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
924 {
925         unsigned int cache_type, cache_op, cache_result;
926         u64 config, val;
927
928         config = attr->config;
929
930         cache_type = (config >>  0) & 0xff;
931         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
932                 return -EINVAL;
933
934         cache_op = (config >>  8) & 0xff;
935         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
936                 return -EINVAL;
937
938         cache_result = (config >> 16) & 0xff;
939         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
940                 return -EINVAL;
941
942         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
943
944         if (val == 0)
945                 return -ENOENT;
946
947         if (val == -1)
948                 return -EINVAL;
949
950         hwc->config |= val;
951
952         return 0;
953 }
954
955 static void intel_pmu_enable_bts(u64 config)
956 {
957         unsigned long debugctlmsr;
958
959         debugctlmsr = get_debugctlmsr();
960
961         debugctlmsr |= X86_DEBUGCTL_TR;
962         debugctlmsr |= X86_DEBUGCTL_BTS;
963         debugctlmsr |= X86_DEBUGCTL_BTINT;
964
965         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
966                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
967
968         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
969                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
970
971         update_debugctlmsr(debugctlmsr);
972 }
973
974 static void intel_pmu_disable_bts(void)
975 {
976         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
977         unsigned long debugctlmsr;
978
979         if (!cpuc->ds)
980                 return;
981
982         debugctlmsr = get_debugctlmsr();
983
984         debugctlmsr &=
985                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
986                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
987
988         update_debugctlmsr(debugctlmsr);
989 }
990
991 /*
992  * Setup the hardware configuration for a given attr_type
993  */
994 static int __hw_perf_event_init(struct perf_event *event)
995 {
996         struct perf_event_attr *attr = &event->attr;
997         struct hw_perf_event *hwc = &event->hw;
998         u64 config;
999         int err;
1000
1001         if (!x86_pmu_initialized())
1002                 return -ENODEV;
1003
1004         err = 0;
1005         if (!atomic_inc_not_zero(&active_events)) {
1006                 mutex_lock(&pmc_reserve_mutex);
1007                 if (atomic_read(&active_events) == 0) {
1008                         if (!reserve_pmc_hardware())
1009                                 err = -EBUSY;
1010                         else
1011                                 err = reserve_bts_hardware();
1012                 }
1013                 if (!err)
1014                         atomic_inc(&active_events);
1015                 mutex_unlock(&pmc_reserve_mutex);
1016         }
1017         if (err)
1018                 return err;
1019
1020         event->destroy = hw_perf_event_destroy;
1021
1022         /*
1023          * Generate PMC IRQs:
1024          * (keep 'enabled' bit clear for now)
1025          */
1026         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1027
1028         hwc->idx = -1;
1029
1030         /*
1031          * Count user and OS events unless requested not to.
1032          */
1033         if (!attr->exclude_user)
1034                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1035         if (!attr->exclude_kernel)
1036                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1037
1038         if (!hwc->sample_period) {
1039                 hwc->sample_period = x86_pmu.max_period;
1040                 hwc->last_period = hwc->sample_period;
1041                 atomic64_set(&hwc->period_left, hwc->sample_period);
1042         } else {
1043                 /*
1044                  * If we have a PMU initialized but no APIC
1045                  * interrupts, we cannot sample hardware
1046                  * events (user-space has to fall back and
1047                  * sample via a hrtimer based software event):
1048                  */
1049                 if (!x86_pmu.apic)
1050                         return -EOPNOTSUPP;
1051         }
1052
1053         /*
1054          * Raw hw_event type provide the config in the hw_event structure
1055          */
1056         if (attr->type == PERF_TYPE_RAW) {
1057                 hwc->config |= x86_pmu.raw_event(attr->config);
1058                 return 0;
1059         }
1060
1061         if (attr->type == PERF_TYPE_HW_CACHE)
1062                 return set_ext_hw_attr(hwc, attr);
1063
1064         if (attr->config >= x86_pmu.max_events)
1065                 return -EINVAL;
1066
1067         /*
1068          * The generic map:
1069          */
1070         config = x86_pmu.event_map(attr->config);
1071
1072         if (config == 0)
1073                 return -ENOENT;
1074
1075         if (config == -1LL)
1076                 return -EINVAL;
1077
1078         /*
1079          * Branch tracing:
1080          */
1081         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1082             (hwc->sample_period == 1)) {
1083                 /* BTS is not supported by this architecture. */
1084                 if (!bts_available())
1085                         return -EOPNOTSUPP;
1086
1087                 /* BTS is currently only allowed for user-mode. */
1088                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1089                         return -EOPNOTSUPP;
1090         }
1091
1092         hwc->config |= config;
1093
1094         return 0;
1095 }
1096
1097 static void p6_pmu_disable_all(void)
1098 {
1099         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1100         u64 val;
1101
1102         if (!cpuc->enabled)
1103                 return;
1104
1105         cpuc->enabled = 0;
1106         barrier();
1107
1108         /* p6 only has one enable register */
1109         rdmsrl(MSR_P6_EVNTSEL0, val);
1110         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1111         wrmsrl(MSR_P6_EVNTSEL0, val);
1112 }
1113
1114 static void intel_pmu_disable_all(void)
1115 {
1116         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1117
1118         if (!cpuc->enabled)
1119                 return;
1120
1121         cpuc->enabled = 0;
1122         barrier();
1123
1124         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1125
1126         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1127                 intel_pmu_disable_bts();
1128 }
1129
1130 static void amd_pmu_disable_all(void)
1131 {
1132         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1133         int idx;
1134
1135         if (!cpuc->enabled)
1136                 return;
1137
1138         cpuc->enabled = 0;
1139         /*
1140          * ensure we write the disable before we start disabling the
1141          * events proper, so that amd_pmu_enable_event() does the
1142          * right thing.
1143          */
1144         barrier();
1145
1146         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1147                 u64 val;
1148
1149                 if (!test_bit(idx, cpuc->active_mask))
1150                         continue;
1151                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1152                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1153                         continue;
1154                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1155                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1156         }
1157 }
1158
1159 void hw_perf_disable(void)
1160 {
1161         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1162
1163         if (!x86_pmu_initialized())
1164                 return;
1165
1166         if (cpuc->enabled)
1167                 cpuc->n_added = 0;
1168
1169         x86_pmu.disable_all();
1170 }
1171
1172 static void p6_pmu_enable_all(void)
1173 {
1174         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1175         unsigned long val;
1176
1177         if (cpuc->enabled)
1178                 return;
1179
1180         cpuc->enabled = 1;
1181         barrier();
1182
1183         /* p6 only has one enable register */
1184         rdmsrl(MSR_P6_EVNTSEL0, val);
1185         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1186         wrmsrl(MSR_P6_EVNTSEL0, val);
1187 }
1188
1189 static void intel_pmu_enable_all(void)
1190 {
1191         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1192
1193         if (cpuc->enabled)
1194                 return;
1195
1196         cpuc->enabled = 1;
1197         barrier();
1198
1199         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1200
1201         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1202                 struct perf_event *event =
1203                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1204
1205                 if (WARN_ON_ONCE(!event))
1206                         return;
1207
1208                 intel_pmu_enable_bts(event->hw.config);
1209         }
1210 }
1211
1212 static void amd_pmu_enable_all(void)
1213 {
1214         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1215         int idx;
1216
1217         if (cpuc->enabled)
1218                 return;
1219
1220         cpuc->enabled = 1;
1221         barrier();
1222
1223         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1224                 struct perf_event *event = cpuc->events[idx];
1225                 u64 val;
1226
1227                 if (!test_bit(idx, cpuc->active_mask))
1228                         continue;
1229
1230                 val = event->hw.config;
1231                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1232                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1233         }
1234 }
1235
1236 static const struct pmu pmu;
1237
1238 static inline int is_x86_event(struct perf_event *event)
1239 {
1240         return event->pmu == &pmu;
1241 }
1242
1243 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1244 {
1245         int i, j , w, num;
1246         int weight, wmax;
1247         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1248         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1249         struct hw_perf_event *hwc;
1250
1251         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1252
1253         for (i = 0; i < n; i++) {
1254                 constraints[i] =
1255                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1256         }
1257
1258         /*
1259          * fastpath, try to reuse previous register
1260          */
1261         for (i = 0, num = n; i < n; i++, num--) {
1262                 hwc = &cpuc->event_list[i]->hw;
1263                 c = constraints[i];
1264
1265                 /* never assigned */
1266                 if (hwc->idx == -1)
1267                         break;
1268
1269                 /* constraint still honored */
1270                 if (!test_bit(hwc->idx, c->idxmsk))
1271                         break;
1272
1273                 /* not already used */
1274                 if (test_bit(hwc->idx, used_mask))
1275                         break;
1276
1277 #if 0
1278                 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1279                          smp_processor_id(),
1280                          hwc->config,
1281                          hwc->idx,
1282                          assign ? 'y' : 'n');
1283 #endif
1284
1285                 set_bit(hwc->idx, used_mask);
1286                 if (assign)
1287                         assign[i] = hwc->idx;
1288         }
1289         if (!num)
1290                 goto done;
1291
1292         /*
1293          * begin slow path
1294          */
1295
1296         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1297
1298         /*
1299          * weight = number of possible counters
1300          *
1301          * 1    = most constrained, only works on one counter
1302          * wmax = least constrained, works on any counter
1303          *
1304          * assign events to counters starting with most
1305          * constrained events.
1306          */
1307         wmax = x86_pmu.num_events;
1308
1309         /*
1310          * when fixed event counters are present,
1311          * wmax is incremented by 1 to account
1312          * for one more choice
1313          */
1314         if (x86_pmu.num_events_fixed)
1315                 wmax++;
1316
1317         for (w = 1, num = n; num && w <= wmax; w++) {
1318                 /* for each event */
1319                 for (i = 0; num && i < n; i++) {
1320                         c = constraints[i];
1321                         hwc = &cpuc->event_list[i]->hw;
1322
1323                         weight = bitmap_weight(c->idxmsk, X86_PMC_IDX_MAX);
1324                         if (weight != w)
1325                                 continue;
1326
1327                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1328                                 if (!test_bit(j, used_mask))
1329                                         break;
1330                         }
1331
1332                         if (j == X86_PMC_IDX_MAX)
1333                                 break;
1334
1335 #if 0
1336                         pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
1337                                 smp_processor_id(),
1338                                 hwc->config,
1339                                 j,
1340                                 assign ? 'y' : 'n');
1341 #endif
1342
1343                         set_bit(j, used_mask);
1344
1345                         if (assign)
1346                                 assign[i] = j;
1347                         num--;
1348                 }
1349         }
1350 done:
1351         /*
1352          * scheduling failed or is just a simulation,
1353          * free resources if necessary
1354          */
1355         if (!assign || num) {
1356                 for (i = 0; i < n; i++) {
1357                         if (x86_pmu.put_event_constraints)
1358                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1359                 }
1360         }
1361         return num ? -ENOSPC : 0;
1362 }
1363
1364 /*
1365  * dogrp: true if must collect siblings events (group)
1366  * returns total number of events and error code
1367  */
1368 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1369 {
1370         struct perf_event *event;
1371         int n, max_count;
1372
1373         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1374
1375         /* current number of events already accepted */
1376         n = cpuc->n_events;
1377
1378         if (is_x86_event(leader)) {
1379                 if (n >= max_count)
1380                         return -ENOSPC;
1381                 cpuc->event_list[n] = leader;
1382                 n++;
1383         }
1384         if (!dogrp)
1385                 return n;
1386
1387         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1388                 if (!is_x86_event(event) ||
1389                     event->state <= PERF_EVENT_STATE_OFF)
1390                         continue;
1391
1392                 if (n >= max_count)
1393                         return -ENOSPC;
1394
1395                 cpuc->event_list[n] = event;
1396                 n++;
1397         }
1398         return n;
1399 }
1400
1401
1402 static inline void x86_assign_hw_event(struct perf_event *event,
1403                                 struct hw_perf_event *hwc, int idx)
1404 {
1405         hwc->idx = idx;
1406
1407         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1408                 hwc->config_base = 0;
1409                 hwc->event_base = 0;
1410         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1411                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1412                 /*
1413                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1414                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1415                  */
1416                 hwc->event_base =
1417                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1418         } else {
1419                 hwc->config_base = x86_pmu.eventsel;
1420                 hwc->event_base  = x86_pmu.perfctr;
1421         }
1422 }
1423
1424 void hw_perf_enable(void)
1425 {
1426         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1427         struct perf_event *event;
1428         struct hw_perf_event *hwc;
1429         int i;
1430
1431         if (!x86_pmu_initialized())
1432                 return;
1433         if (cpuc->n_added) {
1434                 /*
1435                  * apply assignment obtained either from
1436                  * hw_perf_group_sched_in() or x86_pmu_enable()
1437                  *
1438                  * step1: save events moving to new counters
1439                  * step2: reprogram moved events into new counters
1440                  */
1441                 for (i = 0; i < cpuc->n_events; i++) {
1442
1443                         event = cpuc->event_list[i];
1444                         hwc = &event->hw;
1445
1446                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1447                                 continue;
1448
1449                         x86_pmu.disable(hwc, hwc->idx);
1450
1451                         clear_bit(hwc->idx, cpuc->active_mask);
1452                         barrier();
1453                         cpuc->events[hwc->idx] = NULL;
1454
1455                         x86_perf_event_update(event, hwc, hwc->idx);
1456
1457                         hwc->idx = -1;
1458                 }
1459
1460                 for (i = 0; i < cpuc->n_events; i++) {
1461
1462                         event = cpuc->event_list[i];
1463                         hwc = &event->hw;
1464
1465                         if (hwc->idx == -1) {
1466                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1467                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1468                         }
1469                         /*
1470                          * need to mark as active because x86_pmu_disable()
1471                          * clear active_mask and eventsp[] yet it preserves
1472                          * idx
1473                          */
1474                         set_bit(hwc->idx, cpuc->active_mask);
1475                         cpuc->events[hwc->idx] = event;
1476
1477                         x86_pmu.enable(hwc, hwc->idx);
1478                         perf_event_update_userpage(event);
1479                 }
1480                 cpuc->n_added = 0;
1481                 perf_events_lapic_init();
1482         }
1483         x86_pmu.enable_all();
1484 }
1485
1486 static inline u64 intel_pmu_get_status(void)
1487 {
1488         u64 status;
1489
1490         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1491
1492         return status;
1493 }
1494
1495 static inline void intel_pmu_ack_status(u64 ack)
1496 {
1497         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1498 }
1499
1500 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1501 {
1502         (void)checking_wrmsrl(hwc->config_base + idx,
1503                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1504 }
1505
1506 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1507 {
1508         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1509 }
1510
1511 static inline void
1512 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1513 {
1514         int idx = __idx - X86_PMC_IDX_FIXED;
1515         u64 ctrl_val, mask;
1516
1517         mask = 0xfULL << (idx * 4);
1518
1519         rdmsrl(hwc->config_base, ctrl_val);
1520         ctrl_val &= ~mask;
1521         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1522 }
1523
1524 static inline void
1525 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1526 {
1527         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1528         u64 val = P6_NOP_EVENT;
1529
1530         if (cpuc->enabled)
1531                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1532
1533         (void)checking_wrmsrl(hwc->config_base + idx, val);
1534 }
1535
1536 static inline void
1537 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1538 {
1539         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1540                 intel_pmu_disable_bts();
1541                 return;
1542         }
1543
1544         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1545                 intel_pmu_disable_fixed(hwc, idx);
1546                 return;
1547         }
1548
1549         x86_pmu_disable_event(hwc, idx);
1550 }
1551
1552 static inline void
1553 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1554 {
1555         x86_pmu_disable_event(hwc, idx);
1556 }
1557
1558 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1559
1560 /*
1561  * Set the next IRQ period, based on the hwc->period_left value.
1562  * To be called with the event disabled in hw:
1563  */
1564 static int
1565 x86_perf_event_set_period(struct perf_event *event,
1566                              struct hw_perf_event *hwc, int idx)
1567 {
1568         s64 left = atomic64_read(&hwc->period_left);
1569         s64 period = hwc->sample_period;
1570         int err, ret = 0;
1571
1572         if (idx == X86_PMC_IDX_FIXED_BTS)
1573                 return 0;
1574
1575         /*
1576          * If we are way outside a reasonable range then just skip forward:
1577          */
1578         if (unlikely(left <= -period)) {
1579                 left = period;
1580                 atomic64_set(&hwc->period_left, left);
1581                 hwc->last_period = period;
1582                 ret = 1;
1583         }
1584
1585         if (unlikely(left <= 0)) {
1586                 left += period;
1587                 atomic64_set(&hwc->period_left, left);
1588                 hwc->last_period = period;
1589                 ret = 1;
1590         }
1591         /*
1592          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1593          */
1594         if (unlikely(left < 2))
1595                 left = 2;
1596
1597         if (left > x86_pmu.max_period)
1598                 left = x86_pmu.max_period;
1599
1600         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1601
1602         /*
1603          * The hw event starts counting from this event offset,
1604          * mark it to be able to extra future deltas:
1605          */
1606         atomic64_set(&hwc->prev_count, (u64)-left);
1607
1608         err = checking_wrmsrl(hwc->event_base + idx,
1609                              (u64)(-left) & x86_pmu.event_mask);
1610
1611         perf_event_update_userpage(event);
1612
1613         return ret;
1614 }
1615
1616 static inline void
1617 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1618 {
1619         int idx = __idx - X86_PMC_IDX_FIXED;
1620         u64 ctrl_val, bits, mask;
1621         int err;
1622
1623         /*
1624          * Enable IRQ generation (0x8),
1625          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1626          * if requested:
1627          */
1628         bits = 0x8ULL;
1629         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1630                 bits |= 0x2;
1631         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1632                 bits |= 0x1;
1633         bits <<= (idx * 4);
1634         mask = 0xfULL << (idx * 4);
1635
1636         rdmsrl(hwc->config_base, ctrl_val);
1637         ctrl_val &= ~mask;
1638         ctrl_val |= bits;
1639         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1640 }
1641
1642 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1643 {
1644         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1645         u64 val;
1646
1647         val = hwc->config;
1648         if (cpuc->enabled)
1649                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1650
1651         (void)checking_wrmsrl(hwc->config_base + idx, val);
1652 }
1653
1654
1655 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1656 {
1657         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1658                 if (!__get_cpu_var(cpu_hw_events).enabled)
1659                         return;
1660
1661                 intel_pmu_enable_bts(hwc->config);
1662                 return;
1663         }
1664
1665         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1666                 intel_pmu_enable_fixed(hwc, idx);
1667                 return;
1668         }
1669
1670         x86_pmu_enable_event(hwc, idx);
1671 }
1672
1673 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1674 {
1675         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1676
1677         if (cpuc->enabled)
1678                 x86_pmu_enable_event(hwc, idx);
1679 }
1680
1681 /*
1682  * activate a single event
1683  *
1684  * The event is added to the group of enabled events
1685  * but only if it can be scehduled with existing events.
1686  *
1687  * Called with PMU disabled. If successful and return value 1,
1688  * then guaranteed to call perf_enable() and hw_perf_enable()
1689  */
1690 static int x86_pmu_enable(struct perf_event *event)
1691 {
1692         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1693         struct hw_perf_event *hwc;
1694         int assign[X86_PMC_IDX_MAX];
1695         int n, n0, ret;
1696
1697         hwc = &event->hw;
1698
1699         n0 = cpuc->n_events;
1700         n = collect_events(cpuc, event, false);
1701         if (n < 0)
1702                 return n;
1703
1704         ret = x86_schedule_events(cpuc, n, assign);
1705         if (ret)
1706                 return ret;
1707         /*
1708          * copy new assignment, now we know it is possible
1709          * will be used by hw_perf_enable()
1710          */
1711         memcpy(cpuc->assign, assign, n*sizeof(int));
1712
1713         cpuc->n_events = n;
1714         cpuc->n_added  = n - n0;
1715
1716         if (hwc->idx != -1)
1717                 x86_perf_event_set_period(event, hwc, hwc->idx);
1718
1719         return 0;
1720 }
1721
1722 static void x86_pmu_unthrottle(struct perf_event *event)
1723 {
1724         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1725         struct hw_perf_event *hwc = &event->hw;
1726
1727         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1728                                 cpuc->events[hwc->idx] != event))
1729                 return;
1730
1731         x86_pmu.enable(hwc, hwc->idx);
1732 }
1733
1734 void perf_event_print_debug(void)
1735 {
1736         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1737         struct cpu_hw_events *cpuc;
1738         unsigned long flags;
1739         int cpu, idx;
1740
1741         if (!x86_pmu.num_events)
1742                 return;
1743
1744         local_irq_save(flags);
1745
1746         cpu = smp_processor_id();
1747         cpuc = &per_cpu(cpu_hw_events, cpu);
1748
1749         if (x86_pmu.version >= 2) {
1750                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1751                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1752                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1753                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1754
1755                 pr_info("\n");
1756                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1757                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1758                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1759                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1760         }
1761         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1762
1763         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1764                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1765                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1766
1767                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1768
1769                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1770                         cpu, idx, pmc_ctrl);
1771                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1772                         cpu, idx, pmc_count);
1773                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1774                         cpu, idx, prev_left);
1775         }
1776         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1777                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1778
1779                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1780                         cpu, idx, pmc_count);
1781         }
1782         local_irq_restore(flags);
1783 }
1784
1785 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1786 {
1787         struct debug_store *ds = cpuc->ds;
1788         struct bts_record {
1789                 u64     from;
1790                 u64     to;
1791                 u64     flags;
1792         };
1793         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1794         struct bts_record *at, *top;
1795         struct perf_output_handle handle;
1796         struct perf_event_header header;
1797         struct perf_sample_data data;
1798         struct pt_regs regs;
1799
1800         if (!event)
1801                 return;
1802
1803         if (!ds)
1804                 return;
1805
1806         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1807         top = (struct bts_record *)(unsigned long)ds->bts_index;
1808
1809         if (top <= at)
1810                 return;
1811
1812         ds->bts_index = ds->bts_buffer_base;
1813
1814
1815         data.period     = event->hw.last_period;
1816         data.addr       = 0;
1817         data.raw        = NULL;
1818         regs.ip         = 0;
1819
1820         /*
1821          * Prepare a generic sample, i.e. fill in the invariant fields.
1822          * We will overwrite the from and to address before we output
1823          * the sample.
1824          */
1825         perf_prepare_sample(&header, &data, event, &regs);
1826
1827         if (perf_output_begin(&handle, event,
1828                               header.size * (top - at), 1, 1))
1829                 return;
1830
1831         for (; at < top; at++) {
1832                 data.ip         = at->from;
1833                 data.addr       = at->to;
1834
1835                 perf_output_sample(&handle, &header, &data, event);
1836         }
1837
1838         perf_output_end(&handle);
1839
1840         /* There's new data available. */
1841         event->hw.interrupts++;
1842         event->pending_kill = POLL_IN;
1843 }
1844
1845 static void x86_pmu_disable(struct perf_event *event)
1846 {
1847         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1848         struct hw_perf_event *hwc = &event->hw;
1849         int i, idx = hwc->idx;
1850
1851         /*
1852          * Must be done before we disable, otherwise the nmi handler
1853          * could reenable again:
1854          */
1855         clear_bit(idx, cpuc->active_mask);
1856         x86_pmu.disable(hwc, idx);
1857
1858         /*
1859          * Make sure the cleared pointer becomes visible before we
1860          * (potentially) free the event:
1861          */
1862         barrier();
1863
1864         /*
1865          * Drain the remaining delta count out of a event
1866          * that we are disabling:
1867          */
1868         x86_perf_event_update(event, hwc, idx);
1869
1870         /* Drain the remaining BTS records. */
1871         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1872                 intel_pmu_drain_bts_buffer(cpuc);
1873
1874         cpuc->events[idx] = NULL;
1875
1876         for (i = 0; i < cpuc->n_events; i++) {
1877                 if (event == cpuc->event_list[i]) {
1878
1879                         if (x86_pmu.put_event_constraints)
1880                                 x86_pmu.put_event_constraints(cpuc, event);
1881
1882                         while (++i < cpuc->n_events)
1883                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1884
1885                         --cpuc->n_events;
1886                 }
1887         }
1888         perf_event_update_userpage(event);
1889 }
1890
1891 /*
1892  * Save and restart an expired event. Called by NMI contexts,
1893  * so it has to be careful about preempting normal event ops:
1894  */
1895 static int intel_pmu_save_and_restart(struct perf_event *event)
1896 {
1897         struct hw_perf_event *hwc = &event->hw;
1898         int idx = hwc->idx;
1899         int ret;
1900
1901         x86_perf_event_update(event, hwc, idx);
1902         ret = x86_perf_event_set_period(event, hwc, idx);
1903
1904         if (event->state == PERF_EVENT_STATE_ACTIVE)
1905                 intel_pmu_enable_event(hwc, idx);
1906
1907         return ret;
1908 }
1909
1910 static void intel_pmu_reset(void)
1911 {
1912         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1913         unsigned long flags;
1914         int idx;
1915
1916         if (!x86_pmu.num_events)
1917                 return;
1918
1919         local_irq_save(flags);
1920
1921         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1922
1923         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1924                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1925                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1926         }
1927         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1928                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1929         }
1930         if (ds)
1931                 ds->bts_index = ds->bts_buffer_base;
1932
1933         local_irq_restore(flags);
1934 }
1935
1936 static int p6_pmu_handle_irq(struct pt_regs *regs)
1937 {
1938         struct perf_sample_data data;
1939         struct cpu_hw_events *cpuc;
1940         struct perf_event *event;
1941         struct hw_perf_event *hwc;
1942         int idx, handled = 0;
1943         u64 val;
1944
1945         data.addr = 0;
1946         data.raw = NULL;
1947
1948         cpuc = &__get_cpu_var(cpu_hw_events);
1949
1950         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1951                 if (!test_bit(idx, cpuc->active_mask))
1952                         continue;
1953
1954                 event = cpuc->events[idx];
1955                 hwc = &event->hw;
1956
1957                 val = x86_perf_event_update(event, hwc, idx);
1958                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1959                         continue;
1960
1961                 /*
1962                  * event overflow
1963                  */
1964                 handled         = 1;
1965                 data.period     = event->hw.last_period;
1966
1967                 if (!x86_perf_event_set_period(event, hwc, idx))
1968                         continue;
1969
1970                 if (perf_event_overflow(event, 1, &data, regs))
1971                         p6_pmu_disable_event(hwc, idx);
1972         }
1973
1974         if (handled)
1975                 inc_irq_stat(apic_perf_irqs);
1976
1977         return handled;
1978 }
1979
1980 /*
1981  * This handler is triggered by the local APIC, so the APIC IRQ handling
1982  * rules apply:
1983  */
1984 static int intel_pmu_handle_irq(struct pt_regs *regs)
1985 {
1986         struct perf_sample_data data;
1987         struct cpu_hw_events *cpuc;
1988         int bit, loops;
1989         u64 ack, status;
1990
1991         data.addr = 0;
1992         data.raw = NULL;
1993
1994         cpuc = &__get_cpu_var(cpu_hw_events);
1995
1996         perf_disable();
1997         intel_pmu_drain_bts_buffer(cpuc);
1998         status = intel_pmu_get_status();
1999         if (!status) {
2000                 perf_enable();
2001                 return 0;
2002         }
2003
2004         loops = 0;
2005 again:
2006         if (++loops > 100) {
2007                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2008                 perf_event_print_debug();
2009                 intel_pmu_reset();
2010                 perf_enable();
2011                 return 1;
2012         }
2013
2014         inc_irq_stat(apic_perf_irqs);
2015         ack = status;
2016         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2017                 struct perf_event *event = cpuc->events[bit];
2018
2019                 clear_bit(bit, (unsigned long *) &status);
2020                 if (!test_bit(bit, cpuc->active_mask))
2021                         continue;
2022
2023                 if (!intel_pmu_save_and_restart(event))
2024                         continue;
2025
2026                 data.period = event->hw.last_period;
2027
2028                 if (perf_event_overflow(event, 1, &data, regs))
2029                         intel_pmu_disable_event(&event->hw, bit);
2030         }
2031
2032         intel_pmu_ack_status(ack);
2033
2034         /*
2035          * Repeat if there is more work to be done:
2036          */
2037         status = intel_pmu_get_status();
2038         if (status)
2039                 goto again;
2040
2041         perf_enable();
2042
2043         return 1;
2044 }
2045
2046 static int amd_pmu_handle_irq(struct pt_regs *regs)
2047 {
2048         struct perf_sample_data data;
2049         struct cpu_hw_events *cpuc;
2050         struct perf_event *event;
2051         struct hw_perf_event *hwc;
2052         int idx, handled = 0;
2053         u64 val;
2054
2055         data.addr = 0;
2056         data.raw = NULL;
2057
2058         cpuc = &__get_cpu_var(cpu_hw_events);
2059
2060         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2061                 if (!test_bit(idx, cpuc->active_mask))
2062                         continue;
2063
2064                 event = cpuc->events[idx];
2065                 hwc = &event->hw;
2066
2067                 val = x86_perf_event_update(event, hwc, idx);
2068                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2069                         continue;
2070
2071                 /*
2072                  * event overflow
2073                  */
2074                 handled         = 1;
2075                 data.period     = event->hw.last_period;
2076
2077                 if (!x86_perf_event_set_period(event, hwc, idx))
2078                         continue;
2079
2080                 if (perf_event_overflow(event, 1, &data, regs))
2081                         amd_pmu_disable_event(hwc, idx);
2082         }
2083
2084         if (handled)
2085                 inc_irq_stat(apic_perf_irqs);
2086
2087         return handled;
2088 }
2089
2090 void smp_perf_pending_interrupt(struct pt_regs *regs)
2091 {
2092         irq_enter();
2093         ack_APIC_irq();
2094         inc_irq_stat(apic_pending_irqs);
2095         perf_event_do_pending();
2096         irq_exit();
2097 }
2098
2099 void set_perf_event_pending(void)
2100 {
2101 #ifdef CONFIG_X86_LOCAL_APIC
2102         if (!x86_pmu.apic || !x86_pmu_initialized())
2103                 return;
2104
2105         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2106 #endif
2107 }
2108
2109 void perf_events_lapic_init(void)
2110 {
2111 #ifdef CONFIG_X86_LOCAL_APIC
2112         if (!x86_pmu.apic || !x86_pmu_initialized())
2113                 return;
2114
2115         /*
2116          * Always use NMI for PMU
2117          */
2118         apic_write(APIC_LVTPC, APIC_DM_NMI);
2119 #endif
2120 }
2121
2122 static int __kprobes
2123 perf_event_nmi_handler(struct notifier_block *self,
2124                          unsigned long cmd, void *__args)
2125 {
2126         struct die_args *args = __args;
2127         struct pt_regs *regs;
2128
2129         if (!atomic_read(&active_events))
2130                 return NOTIFY_DONE;
2131
2132         switch (cmd) {
2133         case DIE_NMI:
2134         case DIE_NMI_IPI:
2135                 break;
2136
2137         default:
2138                 return NOTIFY_DONE;
2139         }
2140
2141         regs = args->regs;
2142
2143 #ifdef CONFIG_X86_LOCAL_APIC
2144         apic_write(APIC_LVTPC, APIC_DM_NMI);
2145 #endif
2146         /*
2147          * Can't rely on the handled return value to say it was our NMI, two
2148          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2149          *
2150          * If the first NMI handles both, the latter will be empty and daze
2151          * the CPU.
2152          */
2153         x86_pmu.handle_irq(regs);
2154
2155         return NOTIFY_STOP;
2156 }
2157
2158 static struct event_constraint unconstrained;
2159
2160 static struct event_constraint bts_constraint =
2161         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2162
2163 static struct event_constraint *
2164 intel_special_constraints(struct perf_event *event)
2165 {
2166         unsigned int hw_event;
2167
2168         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2169
2170         if (unlikely((hw_event ==
2171                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2172                      (event->hw.sample_period == 1))) {
2173
2174                 return &bts_constraint;
2175         }
2176         return NULL;
2177 }
2178
2179 static struct event_constraint *
2180 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2181 {
2182         struct event_constraint *c;
2183
2184         c = intel_special_constraints(event);
2185         if (c)
2186                 return c;
2187
2188         if (x86_pmu.event_constraints) {
2189                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2190                         if ((event->hw.config & c->cmask) == c->code)
2191                                 return c;
2192                 }
2193         }
2194
2195         return &unconstrained;
2196 }
2197
2198 static struct event_constraint *
2199 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2200 {
2201         return &unconstrained;
2202 }
2203
2204 static int x86_event_sched_in(struct perf_event *event,
2205                           struct perf_cpu_context *cpuctx, int cpu)
2206 {
2207         int ret = 0;
2208
2209         event->state = PERF_EVENT_STATE_ACTIVE;
2210         event->oncpu = cpu;
2211         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2212
2213         if (!is_x86_event(event))
2214                 ret = event->pmu->enable(event);
2215
2216         if (!ret && !is_software_event(event))
2217                 cpuctx->active_oncpu++;
2218
2219         if (!ret && event->attr.exclusive)
2220                 cpuctx->exclusive = 1;
2221
2222         return ret;
2223 }
2224
2225 static void x86_event_sched_out(struct perf_event *event,
2226                             struct perf_cpu_context *cpuctx, int cpu)
2227 {
2228         event->state = PERF_EVENT_STATE_INACTIVE;
2229         event->oncpu = -1;
2230
2231         if (!is_x86_event(event))
2232                 event->pmu->disable(event);
2233
2234         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2235
2236         if (!is_software_event(event))
2237                 cpuctx->active_oncpu--;
2238
2239         if (event->attr.exclusive || !cpuctx->active_oncpu)
2240                 cpuctx->exclusive = 0;
2241 }
2242
2243 /*
2244  * Called to enable a whole group of events.
2245  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2246  * Assumes the caller has disabled interrupts and has
2247  * frozen the PMU with hw_perf_save_disable.
2248  *
2249  * called with PMU disabled. If successful and return value 1,
2250  * then guaranteed to call perf_enable() and hw_perf_enable()
2251  */
2252 int hw_perf_group_sched_in(struct perf_event *leader,
2253                struct perf_cpu_context *cpuctx,
2254                struct perf_event_context *ctx, int cpu)
2255 {
2256         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2257         struct perf_event *sub;
2258         int assign[X86_PMC_IDX_MAX];
2259         int n0, n1, ret;
2260
2261         /* n0 = total number of events */
2262         n0 = collect_events(cpuc, leader, true);
2263         if (n0 < 0)
2264                 return n0;
2265
2266         ret = x86_schedule_events(cpuc, n0, assign);
2267         if (ret)
2268                 return ret;
2269
2270         ret = x86_event_sched_in(leader, cpuctx, cpu);
2271         if (ret)
2272                 return ret;
2273
2274         n1 = 1;
2275         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2276                 if (sub->state > PERF_EVENT_STATE_OFF) {
2277                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2278                         if (ret)
2279                                 goto undo;
2280                         ++n1;
2281                 }
2282         }
2283         /*
2284          * copy new assignment, now we know it is possible
2285          * will be used by hw_perf_enable()
2286          */
2287         memcpy(cpuc->assign, assign, n0*sizeof(int));
2288
2289         cpuc->n_events  = n0;
2290         cpuc->n_added   = n1;
2291         ctx->nr_active += n1;
2292
2293         /*
2294          * 1 means successful and events are active
2295          * This is not quite true because we defer
2296          * actual activation until hw_perf_enable() but
2297          * this way we* ensure caller won't try to enable
2298          * individual events
2299          */
2300         return 1;
2301 undo:
2302         x86_event_sched_out(leader, cpuctx, cpu);
2303         n0  = 1;
2304         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2305                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2306                         x86_event_sched_out(sub, cpuctx, cpu);
2307                         if (++n0 == n1)
2308                                 break;
2309                 }
2310         }
2311         return ret;
2312 }
2313
2314 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2315         .notifier_call          = perf_event_nmi_handler,
2316         .next                   = NULL,
2317         .priority               = 1
2318 };
2319
2320 static __initconst struct x86_pmu p6_pmu = {
2321         .name                   = "p6",
2322         .handle_irq             = p6_pmu_handle_irq,
2323         .disable_all            = p6_pmu_disable_all,
2324         .enable_all             = p6_pmu_enable_all,
2325         .enable                 = p6_pmu_enable_event,
2326         .disable                = p6_pmu_disable_event,
2327         .eventsel               = MSR_P6_EVNTSEL0,
2328         .perfctr                = MSR_P6_PERFCTR0,
2329         .event_map              = p6_pmu_event_map,
2330         .raw_event              = p6_pmu_raw_event,
2331         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2332         .apic                   = 1,
2333         .max_period             = (1ULL << 31) - 1,
2334         .version                = 0,
2335         .num_events             = 2,
2336         /*
2337          * Events have 40 bits implemented. However they are designed such
2338          * that bits [32-39] are sign extensions of bit 31. As such the
2339          * effective width of a event for P6-like PMU is 32 bits only.
2340          *
2341          * See IA-32 Intel Architecture Software developer manual Vol 3B
2342          */
2343         .event_bits             = 32,
2344         .event_mask             = (1ULL << 32) - 1,
2345         .get_event_constraints  = intel_get_event_constraints,
2346         .event_constraints      = intel_p6_event_constraints
2347 };
2348
2349 static __initconst struct x86_pmu intel_pmu = {
2350         .name                   = "Intel",
2351         .handle_irq             = intel_pmu_handle_irq,
2352         .disable_all            = intel_pmu_disable_all,
2353         .enable_all             = intel_pmu_enable_all,
2354         .enable                 = intel_pmu_enable_event,
2355         .disable                = intel_pmu_disable_event,
2356         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2357         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2358         .event_map              = intel_pmu_event_map,
2359         .raw_event              = intel_pmu_raw_event,
2360         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2361         .apic                   = 1,
2362         /*
2363          * Intel PMCs cannot be accessed sanely above 32 bit width,
2364          * so we install an artificial 1<<31 period regardless of
2365          * the generic event period:
2366          */
2367         .max_period             = (1ULL << 31) - 1,
2368         .enable_bts             = intel_pmu_enable_bts,
2369         .disable_bts            = intel_pmu_disable_bts,
2370         .get_event_constraints  = intel_get_event_constraints
2371 };
2372
2373 static __initconst struct x86_pmu amd_pmu = {
2374         .name                   = "AMD",
2375         .handle_irq             = amd_pmu_handle_irq,
2376         .disable_all            = amd_pmu_disable_all,
2377         .enable_all             = amd_pmu_enable_all,
2378         .enable                 = amd_pmu_enable_event,
2379         .disable                = amd_pmu_disable_event,
2380         .eventsel               = MSR_K7_EVNTSEL0,
2381         .perfctr                = MSR_K7_PERFCTR0,
2382         .event_map              = amd_pmu_event_map,
2383         .raw_event              = amd_pmu_raw_event,
2384         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2385         .num_events             = 4,
2386         .event_bits             = 48,
2387         .event_mask             = (1ULL << 48) - 1,
2388         .apic                   = 1,
2389         /* use highest bit to detect overflow */
2390         .max_period             = (1ULL << 47) - 1,
2391         .get_event_constraints  = amd_get_event_constraints
2392 };
2393
2394 static __init int p6_pmu_init(void)
2395 {
2396         switch (boot_cpu_data.x86_model) {
2397         case 1:
2398         case 3:  /* Pentium Pro */
2399         case 5:
2400         case 6:  /* Pentium II */
2401         case 7:
2402         case 8:
2403         case 11: /* Pentium III */
2404         case 9:
2405         case 13:
2406                 /* Pentium M */
2407                 break;
2408         default:
2409                 pr_cont("unsupported p6 CPU model %d ",
2410                         boot_cpu_data.x86_model);
2411                 return -ENODEV;
2412         }
2413
2414         x86_pmu = p6_pmu;
2415
2416         return 0;
2417 }
2418
2419 static __init int intel_pmu_init(void)
2420 {
2421         union cpuid10_edx edx;
2422         union cpuid10_eax eax;
2423         unsigned int unused;
2424         unsigned int ebx;
2425         int version;
2426
2427         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2428                 /* check for P6 processor family */
2429            if (boot_cpu_data.x86 == 6) {
2430                 return p6_pmu_init();
2431            } else {
2432                 return -ENODEV;
2433            }
2434         }
2435
2436         /*
2437          * Check whether the Architectural PerfMon supports
2438          * Branch Misses Retired hw_event or not.
2439          */
2440         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2441         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2442                 return -ENODEV;
2443
2444         version = eax.split.version_id;
2445         if (version < 2)
2446                 return -ENODEV;
2447
2448         x86_pmu                         = intel_pmu;
2449         x86_pmu.version                 = version;
2450         x86_pmu.num_events              = eax.split.num_events;
2451         x86_pmu.event_bits              = eax.split.bit_width;
2452         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2453
2454         /*
2455          * Quirk: v2 perfmon does not report fixed-purpose events, so
2456          * assume at least 3 events:
2457          */
2458         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2459
2460         /*
2461          * Install the hw-cache-events table:
2462          */
2463         switch (boot_cpu_data.x86_model) {
2464         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2465         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2466         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2467         case 29: /* six-core 45 nm xeon "Dunnington" */
2468                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2469                        sizeof(hw_cache_event_ids));
2470
2471                 x86_pmu.event_constraints = intel_core_event_constraints;
2472                 pr_cont("Core2 events, ");
2473                 break;
2474         case 26:
2475                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2476                        sizeof(hw_cache_event_ids));
2477
2478                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2479                 pr_cont("Nehalem/Corei7 events, ");
2480                 break;
2481         case 28:
2482                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2483                        sizeof(hw_cache_event_ids));
2484
2485                 x86_pmu.event_constraints = intel_gen_event_constraints;
2486                 pr_cont("Atom events, ");
2487                 break;
2488         default:
2489                 /*
2490                  * default constraints for v2 and up
2491                  */
2492                 x86_pmu.event_constraints = intel_gen_event_constraints;
2493                 pr_cont("generic architected perfmon, ");
2494         }
2495         return 0;
2496 }
2497
2498 static __init int amd_pmu_init(void)
2499 {
2500         /* Performance-monitoring supported from K7 and later: */
2501         if (boot_cpu_data.x86 < 6)
2502                 return -ENODEV;
2503
2504         x86_pmu = amd_pmu;
2505
2506         /* Events are common for all AMDs */
2507         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2508                sizeof(hw_cache_event_ids));
2509
2510         return 0;
2511 }
2512
2513 static void __init pmu_check_apic(void)
2514 {
2515         if (cpu_has_apic)
2516                 return;
2517
2518         x86_pmu.apic = 0;
2519         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2520         pr_info("no hardware sampling interrupt available.\n");
2521 }
2522
2523 void __init init_hw_perf_events(void)
2524 {
2525         int err;
2526
2527         pr_info("Performance Events: ");
2528
2529         switch (boot_cpu_data.x86_vendor) {
2530         case X86_VENDOR_INTEL:
2531                 err = intel_pmu_init();
2532                 break;
2533         case X86_VENDOR_AMD:
2534                 err = amd_pmu_init();
2535                 break;
2536         default:
2537                 return;
2538         }
2539         if (err != 0) {
2540                 pr_cont("no PMU driver, software events only.\n");
2541                 return;
2542         }
2543
2544         pmu_check_apic();
2545
2546         pr_cont("%s PMU driver.\n", x86_pmu.name);
2547
2548         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2549                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2550                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2551                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2552         }
2553         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2554         perf_max_events = x86_pmu.num_events;
2555
2556         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2557                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2558                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2559                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2560         }
2561
2562         perf_event_mask |=
2563                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2564         x86_pmu.intel_ctrl = perf_event_mask;
2565
2566         perf_events_lapic_init();
2567         register_die_notifier(&perf_event_nmi_notifier);
2568
2569         unconstrained = (struct event_constraint)
2570                 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2571
2572         pr_info("... version:                %d\n",     x86_pmu.version);
2573         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2574         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2575         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2576         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2577         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2578         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2579 }
2580
2581 static inline void x86_pmu_read(struct perf_event *event)
2582 {
2583         x86_perf_event_update(event, &event->hw, event->hw.idx);
2584 }
2585
2586 static const struct pmu pmu = {
2587         .enable         = x86_pmu_enable,
2588         .disable        = x86_pmu_disable,
2589         .read           = x86_pmu_read,
2590         .unthrottle     = x86_pmu_unthrottle,
2591 };
2592
2593 /*
2594  * validate a single event group
2595  *
2596  * validation include:
2597  *      - check events are compatible which each other
2598  *      - events do not compete for the same counter
2599  *      - number of events <= number of counters
2600  *
2601  * validation ensures the group can be loaded onto the
2602  * PMU if it was the only group available.
2603  */
2604 static int validate_group(struct perf_event *event)
2605 {
2606         struct perf_event *leader = event->group_leader;
2607         struct cpu_hw_events *fake_cpuc;
2608         int ret, n;
2609
2610         ret = -ENOMEM;
2611         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2612         if (!fake_cpuc)
2613                 goto out;
2614
2615         /*
2616          * the event is not yet connected with its
2617          * siblings therefore we must first collect
2618          * existing siblings, then add the new event
2619          * before we can simulate the scheduling
2620          */
2621         ret = -ENOSPC;
2622         n = collect_events(fake_cpuc, leader, true);
2623         if (n < 0)
2624                 goto out_free;
2625
2626         fake_cpuc->n_events = n;
2627         n = collect_events(fake_cpuc, event, false);
2628         if (n < 0)
2629                 goto out_free;
2630
2631         fake_cpuc->n_events = n;
2632
2633         ret = x86_schedule_events(fake_cpuc, n, NULL);
2634
2635 out_free:
2636         kfree(fake_cpuc);
2637 out:
2638         return ret;
2639 }
2640
2641 const struct pmu *hw_perf_event_init(struct perf_event *event)
2642 {
2643         const struct pmu *tmp;
2644         int err;
2645
2646         err = __hw_perf_event_init(event);
2647         if (!err) {
2648                 /*
2649                  * we temporarily connect event to its pmu
2650                  * such that validate_group() can classify
2651                  * it as an x86 event using is_x86_event()
2652                  */
2653                 tmp = event->pmu;
2654                 event->pmu = &pmu;
2655
2656                 if (event->group_leader != event)
2657                         err = validate_group(event);
2658
2659                 event->pmu = tmp;
2660         }
2661         if (err) {
2662                 if (event->destroy)
2663                         event->destroy(event);
2664                 return ERR_PTR(err);
2665         }
2666
2667         return &pmu;
2668 }
2669
2670 /*
2671  * callchain support
2672  */
2673
2674 static inline
2675 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2676 {
2677         if (entry->nr < PERF_MAX_STACK_DEPTH)
2678                 entry->ip[entry->nr++] = ip;
2679 }
2680
2681 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2682 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2683
2684
2685 static void
2686 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2687 {
2688         /* Ignore warnings */
2689 }
2690
2691 static void backtrace_warning(void *data, char *msg)
2692 {
2693         /* Ignore warnings */
2694 }
2695
2696 static int backtrace_stack(void *data, char *name)
2697 {
2698         return 0;
2699 }
2700
2701 static void backtrace_address(void *data, unsigned long addr, int reliable)
2702 {
2703         struct perf_callchain_entry *entry = data;
2704
2705         if (reliable)
2706                 callchain_store(entry, addr);
2707 }
2708
2709 static const struct stacktrace_ops backtrace_ops = {
2710         .warning                = backtrace_warning,
2711         .warning_symbol         = backtrace_warning_symbol,
2712         .stack                  = backtrace_stack,
2713         .address                = backtrace_address,
2714         .walk_stack             = print_context_stack_bp,
2715 };
2716
2717 #include "../dumpstack.h"
2718
2719 static void
2720 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2721 {
2722         callchain_store(entry, PERF_CONTEXT_KERNEL);
2723         callchain_store(entry, regs->ip);
2724
2725         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2726 }
2727
2728 /*
2729  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2730  */
2731 static unsigned long
2732 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2733 {
2734         unsigned long offset, addr = (unsigned long)from;
2735         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2736         unsigned long size, len = 0;
2737         struct page *page;
2738         void *map;
2739         int ret;
2740
2741         do {
2742                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2743                 if (!ret)
2744                         break;
2745
2746                 offset = addr & (PAGE_SIZE - 1);
2747                 size = min(PAGE_SIZE - offset, n - len);
2748
2749                 map = kmap_atomic(page, type);
2750                 memcpy(to, map+offset, size);
2751                 kunmap_atomic(map, type);
2752                 put_page(page);
2753
2754                 len  += size;
2755                 to   += size;
2756                 addr += size;
2757
2758         } while (len < n);
2759
2760         return len;
2761 }
2762
2763 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2764 {
2765         unsigned long bytes;
2766
2767         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2768
2769         return bytes == sizeof(*frame);
2770 }
2771
2772 static void
2773 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2774 {
2775         struct stack_frame frame;
2776         const void __user *fp;
2777
2778         if (!user_mode(regs))
2779                 regs = task_pt_regs(current);
2780
2781         fp = (void __user *)regs->bp;
2782
2783         callchain_store(entry, PERF_CONTEXT_USER);
2784         callchain_store(entry, regs->ip);
2785
2786         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2787                 frame.next_frame             = NULL;
2788                 frame.return_address = 0;
2789
2790                 if (!copy_stack_frame(fp, &frame))
2791                         break;
2792
2793                 if ((unsigned long)fp < regs->sp)
2794                         break;
2795
2796                 callchain_store(entry, frame.return_address);
2797                 fp = frame.next_frame;
2798         }
2799 }
2800
2801 static void
2802 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2803 {
2804         int is_user;
2805
2806         if (!regs)
2807                 return;
2808
2809         is_user = user_mode(regs);
2810
2811         if (is_user && current->state != TASK_RUNNING)
2812                 return;
2813
2814         if (!is_user)
2815                 perf_callchain_kernel(regs, entry);
2816
2817         if (current->mm)
2818                 perf_callchain_user(regs, entry);
2819 }
2820
2821 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2822 {
2823         struct perf_callchain_entry *entry;
2824
2825         if (in_nmi())
2826                 entry = &__get_cpu_var(pmc_nmi_entry);
2827         else
2828                 entry = &__get_cpu_var(pmc_irq_entry);
2829
2830         entry->nr = 0;
2831
2832         perf_do_callchain(regs, entry);
2833
2834         return entry;
2835 }
2836
2837 void hw_perf_event_setup_online(int cpu)
2838 {
2839         init_debug_store_on_cpu(cpu);
2840 }