dda5b0a3ff6014b8a0741a186ed0e3968b63d298
[linux-2.6.git] / include / linux / perf_event.h
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * User-space ABI bits:
23  */
24
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29         PERF_TYPE_HARDWARE                      = 0,
30         PERF_TYPE_SOFTWARE                      = 1,
31         PERF_TYPE_TRACEPOINT                    = 2,
32         PERF_TYPE_HW_CACHE                      = 3,
33         PERF_TYPE_RAW                           = 4,
34         PERF_TYPE_BREAKPOINT                    = 5,
35
36         PERF_TYPE_MAX,                          /* non-ABI */
37 };
38
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45         /*
46          * Common hardware events, generalized by the kernel:
47          */
48         PERF_COUNT_HW_CPU_CYCLES                = 0,
49         PERF_COUNT_HW_INSTRUCTIONS              = 1,
50         PERF_COUNT_HW_CACHE_REFERENCES          = 2,
51         PERF_COUNT_HW_CACHE_MISSES              = 3,
52         PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
53         PERF_COUNT_HW_BRANCH_MISSES             = 5,
54         PERF_COUNT_HW_BUS_CYCLES                = 6,
55
56         PERF_COUNT_HW_MAX,                      /* non-ABI */
57 };
58
59 /*
60  * Generalized hardware cache events:
61  *
62  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63  *       { read, write, prefetch } x
64  *       { accesses, misses }
65  */
66 enum perf_hw_cache_id {
67         PERF_COUNT_HW_CACHE_L1D                 = 0,
68         PERF_COUNT_HW_CACHE_L1I                 = 1,
69         PERF_COUNT_HW_CACHE_LL                  = 2,
70         PERF_COUNT_HW_CACHE_DTLB                = 3,
71         PERF_COUNT_HW_CACHE_ITLB                = 4,
72         PERF_COUNT_HW_CACHE_BPU                 = 5,
73
74         PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
75 };
76
77 enum perf_hw_cache_op_id {
78         PERF_COUNT_HW_CACHE_OP_READ             = 0,
79         PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
80         PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
81
82         PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
83 };
84
85 enum perf_hw_cache_op_result_id {
86         PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
87         PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
88
89         PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
90 };
91
92 /*
93  * Special "software" events provided by the kernel, even if the hardware
94  * does not support performance events. These events measure various
95  * physical and sw events of the kernel (and allow the profiling of them as
96  * well):
97  */
98 enum perf_sw_ids {
99         PERF_COUNT_SW_CPU_CLOCK                 = 0,
100         PERF_COUNT_SW_TASK_CLOCK                = 1,
101         PERF_COUNT_SW_PAGE_FAULTS               = 2,
102         PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
103         PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
104         PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
105         PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
106         PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
107         PERF_COUNT_SW_EMULATION_FAULTS          = 8,
108
109         PERF_COUNT_SW_MAX,                      /* non-ABI */
110 };
111
112 /*
113  * Bits that can be set in attr.sample_type to request information
114  * in the overflow packets.
115  */
116 enum perf_event_sample_format {
117         PERF_SAMPLE_IP                          = 1U << 0,
118         PERF_SAMPLE_TID                         = 1U << 1,
119         PERF_SAMPLE_TIME                        = 1U << 2,
120         PERF_SAMPLE_ADDR                        = 1U << 3,
121         PERF_SAMPLE_READ                        = 1U << 4,
122         PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
123         PERF_SAMPLE_ID                          = 1U << 6,
124         PERF_SAMPLE_CPU                         = 1U << 7,
125         PERF_SAMPLE_PERIOD                      = 1U << 8,
126         PERF_SAMPLE_STREAM_ID                   = 1U << 9,
127         PERF_SAMPLE_RAW                         = 1U << 10,
128
129         PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
130 };
131
132 /*
133  * The format of the data returned by read() on a perf event fd,
134  * as specified by attr.read_format:
135  *
136  * struct read_format {
137  *      { u64           value;
138  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
139  *        { u64         time_running; } && PERF_FORMAT_RUNNING
140  *        { u64         id;           } && PERF_FORMAT_ID
141  *      } && !PERF_FORMAT_GROUP
142  *
143  *      { u64           nr;
144  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
145  *        { u64         time_running; } && PERF_FORMAT_RUNNING
146  *        { u64         value;
147  *          { u64       id;           } && PERF_FORMAT_ID
148  *        }             cntr[nr];
149  *      } && PERF_FORMAT_GROUP
150  * };
151  */
152 enum perf_event_read_format {
153         PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
154         PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
155         PERF_FORMAT_ID                          = 1U << 2,
156         PERF_FORMAT_GROUP                       = 1U << 3,
157
158         PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
159 };
160
161 #define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
162
163 /*
164  * Hardware event_id to monitor via a performance monitoring event:
165  */
166 struct perf_event_attr {
167
168         /*
169          * Major type: hardware/software/tracepoint/etc.
170          */
171         __u32                   type;
172
173         /*
174          * Size of the attr structure, for fwd/bwd compat.
175          */
176         __u32                   size;
177
178         /*
179          * Type specific configuration information.
180          */
181         __u64                   config;
182
183         union {
184                 __u64           sample_period;
185                 __u64           sample_freq;
186         };
187
188         __u64                   sample_type;
189         __u64                   read_format;
190
191         __u64                   disabled       :  1, /* off by default        */
192                                 inherit        :  1, /* children inherit it   */
193                                 pinned         :  1, /* must always be on PMU */
194                                 exclusive      :  1, /* only group on PMU     */
195                                 exclude_user   :  1, /* don't count user      */
196                                 exclude_kernel :  1, /* ditto kernel          */
197                                 exclude_hv     :  1, /* ditto hypervisor      */
198                                 exclude_idle   :  1, /* don't count when idle */
199                                 mmap           :  1, /* include mmap data     */
200                                 comm           :  1, /* include comm data     */
201                                 freq           :  1, /* use freq, not period  */
202                                 inherit_stat   :  1, /* per task counts       */
203                                 enable_on_exec :  1, /* next exec enables     */
204                                 task           :  1, /* trace fork/exit       */
205                                 watermark      :  1, /* wakeup_watermark      */
206                                 /*
207                                  * precise_ip:
208                                  *
209                                  *  0 - SAMPLE_IP can have arbitrary skid
210                                  *  1 - SAMPLE_IP must have constant skid
211                                  *  2 - SAMPLE_IP requested to have 0 skid
212                                  *  3 - SAMPLE_IP must have 0 skid
213                                  *
214                                  *  See also PERF_RECORD_MISC_EXACT_IP
215                                  */
216                                 precise_ip     :  2, /* skid constraint       */
217                                 mmap_data      :  1, /* non-exec mmap data    */
218                                 sample_id_all  :  1, /* sample_type all events */
219
220                                 __reserved_1   : 45;
221
222         union {
223                 __u32           wakeup_events;    /* wakeup every n events */
224                 __u32           wakeup_watermark; /* bytes before wakeup   */
225         };
226
227         __u32                   bp_type;
228         __u64                   bp_addr;
229         __u64                   bp_len;
230 };
231
232 /*
233  * Ioctls that can be done on a perf event fd:
234  */
235 #define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
236 #define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
237 #define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
238 #define PERF_EVENT_IOC_RESET            _IO ('$', 3)
239 #define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, __u64)
240 #define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
241 #define PERF_EVENT_IOC_SET_FILTER       _IOW('$', 6, char *)
242
243 enum perf_event_ioc_flags {
244         PERF_IOC_FLAG_GROUP             = 1U << 0,
245 };
246
247 /*
248  * Structure of the page that can be mapped via mmap
249  */
250 struct perf_event_mmap_page {
251         __u32   version;                /* version number of this structure */
252         __u32   compat_version;         /* lowest version this is compat with */
253
254         /*
255          * Bits needed to read the hw events in user-space.
256          *
257          *   u32 seq;
258          *   s64 count;
259          *
260          *   do {
261          *     seq = pc->lock;
262          *
263          *     barrier()
264          *     if (pc->index) {
265          *       count = pmc_read(pc->index - 1);
266          *       count += pc->offset;
267          *     } else
268          *       goto regular_read;
269          *
270          *     barrier();
271          *   } while (pc->lock != seq);
272          *
273          * NOTE: for obvious reason this only works on self-monitoring
274          *       processes.
275          */
276         __u32   lock;                   /* seqlock for synchronization */
277         __u32   index;                  /* hardware event identifier */
278         __s64   offset;                 /* add to hardware event value */
279         __u64   time_enabled;           /* time event active */
280         __u64   time_running;           /* time event on cpu */
281
282                 /*
283                  * Hole for extension of the self monitor capabilities
284                  */
285
286         __u64   __reserved[123];        /* align to 1k */
287
288         /*
289          * Control data for the mmap() data buffer.
290          *
291          * User-space reading the @data_head value should issue an rmb(), on
292          * SMP capable platforms, after reading this value -- see
293          * perf_event_wakeup().
294          *
295          * When the mapping is PROT_WRITE the @data_tail value should be
296          * written by userspace to reflect the last read data. In this case
297          * the kernel will not over-write unread data.
298          */
299         __u64   data_head;              /* head in the data section */
300         __u64   data_tail;              /* user-space written tail */
301 };
302
303 #define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
304 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
305 #define PERF_RECORD_MISC_KERNEL                 (1 << 0)
306 #define PERF_RECORD_MISC_USER                   (2 << 0)
307 #define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
308 #define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
309 #define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
310
311 /*
312  * Indicates that the content of PERF_SAMPLE_IP points to
313  * the actual instruction that triggered the event. See also
314  * perf_event_attr::precise_ip.
315  */
316 #define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
317 /*
318  * Reserve the last bit to indicate some extended misc field
319  */
320 #define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
321
322 struct perf_event_header {
323         __u32   type;
324         __u16   misc;
325         __u16   size;
326 };
327
328 enum perf_event_type {
329
330         /*
331          * If perf_event_attr.sample_id_all is set then all event types will
332          * have the sample_type selected fields related to where/when
333          * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
334          * described in PERF_RECORD_SAMPLE below, it will be stashed just after
335          * the perf_event_header and the fields already present for the existing
336          * fields, i.e. at the end of the payload. That way a newer perf.data
337          * file will be supported by older perf tools, with these new optional
338          * fields being ignored.
339          *
340          * The MMAP events record the PROT_EXEC mappings so that we can
341          * correlate userspace IPs to code. They have the following structure:
342          *
343          * struct {
344          *      struct perf_event_header        header;
345          *
346          *      u32                             pid, tid;
347          *      u64                             addr;
348          *      u64                             len;
349          *      u64                             pgoff;
350          *      char                            filename[];
351          * };
352          */
353         PERF_RECORD_MMAP                        = 1,
354
355         /*
356          * struct {
357          *      struct perf_event_header        header;
358          *      u64                             id;
359          *      u64                             lost;
360          * };
361          */
362         PERF_RECORD_LOST                        = 2,
363
364         /*
365          * struct {
366          *      struct perf_event_header        header;
367          *
368          *      u32                             pid, tid;
369          *      char                            comm[];
370          * };
371          */
372         PERF_RECORD_COMM                        = 3,
373
374         /*
375          * struct {
376          *      struct perf_event_header        header;
377          *      u32                             pid, ppid;
378          *      u32                             tid, ptid;
379          *      u64                             time;
380          * };
381          */
382         PERF_RECORD_EXIT                        = 4,
383
384         /*
385          * struct {
386          *      struct perf_event_header        header;
387          *      u64                             time;
388          *      u64                             id;
389          *      u64                             stream_id;
390          * };
391          */
392         PERF_RECORD_THROTTLE                    = 5,
393         PERF_RECORD_UNTHROTTLE                  = 6,
394
395         /*
396          * struct {
397          *      struct perf_event_header        header;
398          *      u32                             pid, ppid;
399          *      u32                             tid, ptid;
400          *      u64                             time;
401          * };
402          */
403         PERF_RECORD_FORK                        = 7,
404
405         /*
406          * struct {
407          *      struct perf_event_header        header;
408          *      u32                             pid, tid;
409          *
410          *      struct read_format              values;
411          * };
412          */
413         PERF_RECORD_READ                        = 8,
414
415         /*
416          * struct {
417          *      struct perf_event_header        header;
418          *
419          *      { u64                   ip;       } && PERF_SAMPLE_IP
420          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
421          *      { u64                   time;     } && PERF_SAMPLE_TIME
422          *      { u64                   addr;     } && PERF_SAMPLE_ADDR
423          *      { u64                   id;       } && PERF_SAMPLE_ID
424          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
425          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
426          *      { u64                   period;   } && PERF_SAMPLE_PERIOD
427          *
428          *      { struct read_format    values;   } && PERF_SAMPLE_READ
429          *
430          *      { u64                   nr,
431          *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
432          *
433          *      #
434          *      # The RAW record below is opaque data wrt the ABI
435          *      #
436          *      # That is, the ABI doesn't make any promises wrt to
437          *      # the stability of its content, it may vary depending
438          *      # on event, hardware, kernel version and phase of
439          *      # the moon.
440          *      #
441          *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
442          *      #
443          *
444          *      { u32                   size;
445          *        char                  data[size];}&& PERF_SAMPLE_RAW
446          * };
447          */
448         PERF_RECORD_SAMPLE                      = 9,
449
450         PERF_RECORD_MAX,                        /* non-ABI */
451 };
452
453 enum perf_callchain_context {
454         PERF_CONTEXT_HV                 = (__u64)-32,
455         PERF_CONTEXT_KERNEL             = (__u64)-128,
456         PERF_CONTEXT_USER               = (__u64)-512,
457
458         PERF_CONTEXT_GUEST              = (__u64)-2048,
459         PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
460         PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
461
462         PERF_CONTEXT_MAX                = (__u64)-4095,
463 };
464
465 #define PERF_FLAG_FD_NO_GROUP   (1U << 0)
466 #define PERF_FLAG_FD_OUTPUT     (1U << 1)
467
468 #ifdef __KERNEL__
469 /*
470  * Kernel-internal data types and definitions:
471  */
472
473 #ifdef CONFIG_PERF_EVENTS
474 # include <asm/perf_event.h>
475 # include <asm/local64.h>
476 #endif
477
478 struct perf_guest_info_callbacks {
479         int (*is_in_guest) (void);
480         int (*is_user_mode) (void);
481         unsigned long (*get_guest_ip) (void);
482 };
483
484 #ifdef CONFIG_HAVE_HW_BREAKPOINT
485 #include <asm/hw_breakpoint.h>
486 #endif
487
488 #include <linux/list.h>
489 #include <linux/mutex.h>
490 #include <linux/rculist.h>
491 #include <linux/rcupdate.h>
492 #include <linux/spinlock.h>
493 #include <linux/hrtimer.h>
494 #include <linux/fs.h>
495 #include <linux/pid_namespace.h>
496 #include <linux/workqueue.h>
497 #include <linux/ftrace.h>
498 #include <linux/cpu.h>
499 #include <linux/irq_work.h>
500 #include <linux/jump_label_ref.h>
501 #include <asm/atomic.h>
502 #include <asm/local.h>
503
504 #define PERF_MAX_STACK_DEPTH            255
505
506 struct perf_callchain_entry {
507         __u64                           nr;
508         __u64                           ip[PERF_MAX_STACK_DEPTH];
509 };
510
511 struct perf_raw_record {
512         u32                             size;
513         void                            *data;
514 };
515
516 struct perf_branch_entry {
517         __u64                           from;
518         __u64                           to;
519         __u64                           flags;
520 };
521
522 struct perf_branch_stack {
523         __u64                           nr;
524         struct perf_branch_entry        entries[0];
525 };
526
527 struct task_struct;
528
529 /**
530  * struct hw_perf_event - performance event hardware details:
531  */
532 struct hw_perf_event {
533 #ifdef CONFIG_PERF_EVENTS
534         union {
535                 struct { /* hardware */
536                         u64             config;
537                         u64             last_tag;
538                         unsigned long   config_base;
539                         unsigned long   event_base;
540                         int             idx;
541                         int             last_cpu;
542                 };
543                 struct { /* software */
544                         struct hrtimer  hrtimer;
545                 };
546 #ifdef CONFIG_HAVE_HW_BREAKPOINT
547                 struct { /* breakpoint */
548                         struct arch_hw_breakpoint       info;
549                         struct list_head                bp_list;
550                         /*
551                          * Crufty hack to avoid the chicken and egg
552                          * problem hw_breakpoint has with context
553                          * creation and event initalization.
554                          */
555                         struct task_struct              *bp_target;
556                 };
557 #endif
558         };
559         int                             state;
560         local64_t                       prev_count;
561         u64                             sample_period;
562         u64                             last_period;
563         local64_t                       period_left;
564         u64                             interrupts;
565
566         u64                             freq_time_stamp;
567         u64                             freq_count_stamp;
568 #endif
569 };
570
571 /*
572  * hw_perf_event::state flags
573  */
574 #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
575 #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
576 #define PERF_HES_ARCH           0x04
577
578 struct perf_event;
579
580 /*
581  * Common implementation detail of pmu::{start,commit,cancel}_txn
582  */
583 #define PERF_EVENT_TXN 0x1
584
585 /**
586  * struct pmu - generic performance monitoring unit
587  */
588 struct pmu {
589         struct list_head                entry;
590
591         struct device                   *dev;
592         char                            *name;
593         int                             type;
594
595         int * __percpu                  pmu_disable_count;
596         struct perf_cpu_context * __percpu pmu_cpu_context;
597         int                             task_ctx_nr;
598
599         /*
600          * Fully disable/enable this PMU, can be used to protect from the PMI
601          * as well as for lazy/batch writing of the MSRs.
602          */
603         void (*pmu_enable)              (struct pmu *pmu); /* optional */
604         void (*pmu_disable)             (struct pmu *pmu); /* optional */
605
606         /*
607          * Try and initialize the event for this PMU.
608          * Should return -ENOENT when the @event doesn't match this PMU.
609          */
610         int (*event_init)               (struct perf_event *event);
611
612 #define PERF_EF_START   0x01            /* start the counter when adding    */
613 #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
614 #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
615
616         /*
617          * Adds/Removes a counter to/from the PMU, can be done inside
618          * a transaction, see the ->*_txn() methods.
619          */
620         int  (*add)                     (struct perf_event *event, int flags);
621         void (*del)                     (struct perf_event *event, int flags);
622
623         /*
624          * Starts/Stops a counter present on the PMU. The PMI handler
625          * should stop the counter when perf_event_overflow() returns
626          * !0. ->start() will be used to continue.
627          */
628         void (*start)                   (struct perf_event *event, int flags);
629         void (*stop)                    (struct perf_event *event, int flags);
630
631         /*
632          * Updates the counter value of the event.
633          */
634         void (*read)                    (struct perf_event *event);
635
636         /*
637          * Group events scheduling is treated as a transaction, add
638          * group events as a whole and perform one schedulability test.
639          * If the test fails, roll back the whole group
640          *
641          * Start the transaction, after this ->add() doesn't need to
642          * do schedulability tests.
643          */
644         void (*start_txn)       (struct pmu *pmu); /* optional */
645         /*
646          * If ->start_txn() disabled the ->add() schedulability test
647          * then ->commit_txn() is required to perform one. On success
648          * the transaction is closed. On error the transaction is kept
649          * open until ->cancel_txn() is called.
650          */
651         int  (*commit_txn)      (struct pmu *pmu); /* optional */
652         /*
653          * Will cancel the transaction, assumes ->del() is called
654          * for each successfull ->add() during the transaction.
655          */
656         void (*cancel_txn)      (struct pmu *pmu); /* optional */
657 };
658
659 /**
660  * enum perf_event_active_state - the states of a event
661  */
662 enum perf_event_active_state {
663         PERF_EVENT_STATE_ERROR          = -2,
664         PERF_EVENT_STATE_OFF            = -1,
665         PERF_EVENT_STATE_INACTIVE       =  0,
666         PERF_EVENT_STATE_ACTIVE         =  1,
667 };
668
669 struct file;
670
671 #define PERF_BUFFER_WRITABLE            0x01
672
673 struct perf_buffer {
674         atomic_t                        refcount;
675         struct rcu_head                 rcu_head;
676 #ifdef CONFIG_PERF_USE_VMALLOC
677         struct work_struct              work;
678         int                             page_order;     /* allocation order  */
679 #endif
680         int                             nr_pages;       /* nr of data pages  */
681         int                             writable;       /* are we writable   */
682
683         atomic_t                        poll;           /* POLL_ for wakeups */
684
685         local_t                         head;           /* write position    */
686         local_t                         nest;           /* nested writers    */
687         local_t                         events;         /* event limit       */
688         local_t                         wakeup;         /* wakeup stamp      */
689         local_t                         lost;           /* nr records lost   */
690
691         long                            watermark;      /* wakeup watermark  */
692
693         struct perf_event_mmap_page     *user_page;
694         void                            *data_pages[0];
695 };
696
697 struct perf_sample_data;
698
699 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
700                                         struct perf_sample_data *,
701                                         struct pt_regs *regs);
702
703 enum perf_group_flag {
704         PERF_GROUP_SOFTWARE = 0x1,
705 };
706
707 #define SWEVENT_HLIST_BITS      8
708 #define SWEVENT_HLIST_SIZE      (1 << SWEVENT_HLIST_BITS)
709
710 struct swevent_hlist {
711         struct hlist_head       heads[SWEVENT_HLIST_SIZE];
712         struct rcu_head         rcu_head;
713 };
714
715 #define PERF_ATTACH_CONTEXT     0x01
716 #define PERF_ATTACH_GROUP       0x02
717 #define PERF_ATTACH_TASK        0x04
718
719 /**
720  * struct perf_event - performance event kernel representation:
721  */
722 struct perf_event {
723 #ifdef CONFIG_PERF_EVENTS
724         struct list_head                group_entry;
725         struct list_head                event_entry;
726         struct list_head                sibling_list;
727         struct hlist_node               hlist_entry;
728         int                             nr_siblings;
729         int                             group_flags;
730         struct perf_event               *group_leader;
731         struct pmu                      *pmu;
732
733         enum perf_event_active_state    state;
734         unsigned int                    attach_state;
735         local64_t                       count;
736         atomic64_t                      child_count;
737
738         /*
739          * These are the total time in nanoseconds that the event
740          * has been enabled (i.e. eligible to run, and the task has
741          * been scheduled in, if this is a per-task event)
742          * and running (scheduled onto the CPU), respectively.
743          *
744          * They are computed from tstamp_enabled, tstamp_running and
745          * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
746          */
747         u64                             total_time_enabled;
748         u64                             total_time_running;
749
750         /*
751          * These are timestamps used for computing total_time_enabled
752          * and total_time_running when the event is in INACTIVE or
753          * ACTIVE state, measured in nanoseconds from an arbitrary point
754          * in time.
755          * tstamp_enabled: the notional time when the event was enabled
756          * tstamp_running: the notional time when the event was scheduled on
757          * tstamp_stopped: in INACTIVE state, the notional time when the
758          *      event was scheduled off.
759          */
760         u64                             tstamp_enabled;
761         u64                             tstamp_running;
762         u64                             tstamp_stopped;
763
764         /*
765          * timestamp shadows the actual context timing but it can
766          * be safely used in NMI interrupt context. It reflects the
767          * context time as it was when the event was last scheduled in.
768          *
769          * ctx_time already accounts for ctx->timestamp. Therefore to
770          * compute ctx_time for a sample, simply add perf_clock().
771          */
772         u64                             shadow_ctx_time;
773
774         struct perf_event_attr          attr;
775         u16                             header_size;
776         u16                             id_header_size;
777         u16                             read_size;
778         struct hw_perf_event            hw;
779
780         struct perf_event_context       *ctx;
781         struct file                     *filp;
782
783         /*
784          * These accumulate total time (in nanoseconds) that children
785          * events have been enabled and running, respectively.
786          */
787         atomic64_t                      child_total_time_enabled;
788         atomic64_t                      child_total_time_running;
789
790         /*
791          * Protect attach/detach and child_list:
792          */
793         struct mutex                    child_mutex;
794         struct list_head                child_list;
795         struct perf_event               *parent;
796
797         int                             oncpu;
798         int                             cpu;
799
800         struct list_head                owner_entry;
801         struct task_struct              *owner;
802
803         /* mmap bits */
804         struct mutex                    mmap_mutex;
805         atomic_t                        mmap_count;
806         int                             mmap_locked;
807         struct user_struct              *mmap_user;
808         struct perf_buffer              *buffer;
809
810         /* poll related */
811         wait_queue_head_t               waitq;
812         struct fasync_struct            *fasync;
813
814         /* delayed work for NMIs and such */
815         int                             pending_wakeup;
816         int                             pending_kill;
817         int                             pending_disable;
818         struct irq_work                 pending;
819
820         atomic_t                        event_limit;
821
822         void (*destroy)(struct perf_event *);
823         struct rcu_head                 rcu_head;
824
825         struct pid_namespace            *ns;
826         u64                             id;
827
828         perf_overflow_handler_t         overflow_handler;
829
830 #ifdef CONFIG_EVENT_TRACING
831         struct ftrace_event_call        *tp_event;
832         struct event_filter             *filter;
833 #endif
834
835 #endif /* CONFIG_PERF_EVENTS */
836 };
837
838 enum perf_event_context_type {
839         task_context,
840         cpu_context,
841 };
842
843 /**
844  * struct perf_event_context - event context structure
845  *
846  * Used as a container for task events and CPU events as well:
847  */
848 struct perf_event_context {
849         enum perf_event_context_type    type;
850         struct pmu                      *pmu;
851         /*
852          * Protect the states of the events in the list,
853          * nr_active, and the list:
854          */
855         raw_spinlock_t                  lock;
856         /*
857          * Protect the list of events.  Locking either mutex or lock
858          * is sufficient to ensure the list doesn't change; to change
859          * the list you need to lock both the mutex and the spinlock.
860          */
861         struct mutex                    mutex;
862
863         struct list_head                pinned_groups;
864         struct list_head                flexible_groups;
865         struct list_head                event_list;
866         int                             nr_events;
867         int                             nr_active;
868         int                             is_active;
869         int                             nr_stat;
870         int                             rotate_disable;
871         atomic_t                        refcount;
872         struct task_struct              *task;
873
874         /*
875          * Context clock, runs when context enabled.
876          */
877         u64                             time;
878         u64                             timestamp;
879
880         /*
881          * These fields let us detect when two contexts have both
882          * been cloned (inherited) from a common ancestor.
883          */
884         struct perf_event_context       *parent_ctx;
885         u64                             parent_gen;
886         u64                             generation;
887         int                             pin_count;
888         struct rcu_head                 rcu_head;
889 };
890
891 /*
892  * Number of contexts where an event can trigger:
893  *      task, softirq, hardirq, nmi.
894  */
895 #define PERF_NR_CONTEXTS        4
896
897 /**
898  * struct perf_event_cpu_context - per cpu event context structure
899  */
900 struct perf_cpu_context {
901         struct perf_event_context       ctx;
902         struct perf_event_context       *task_ctx;
903         int                             active_oncpu;
904         int                             exclusive;
905         struct list_head                rotation_list;
906         int                             jiffies_interval;
907         struct pmu                      *active_pmu;
908 };
909
910 struct perf_output_handle {
911         struct perf_event               *event;
912         struct perf_buffer              *buffer;
913         unsigned long                   wakeup;
914         unsigned long                   size;
915         void                            *addr;
916         int                             page;
917         int                             nmi;
918         int                             sample;
919 };
920
921 #ifdef CONFIG_PERF_EVENTS
922
923 extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
924 extern void perf_pmu_unregister(struct pmu *pmu);
925
926 extern int perf_num_counters(void);
927 extern const char *perf_pmu_name(void);
928 extern void __perf_event_task_sched_in(struct task_struct *task);
929 extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
930 extern int perf_event_init_task(struct task_struct *child);
931 extern void perf_event_exit_task(struct task_struct *child);
932 extern void perf_event_free_task(struct task_struct *task);
933 extern void perf_event_delayed_put(struct task_struct *task);
934 extern void perf_event_print_debug(void);
935 extern void perf_pmu_disable(struct pmu *pmu);
936 extern void perf_pmu_enable(struct pmu *pmu);
937 extern int perf_event_task_disable(void);
938 extern int perf_event_task_enable(void);
939 extern void perf_event_update_userpage(struct perf_event *event);
940 extern int perf_event_release_kernel(struct perf_event *event);
941 extern struct perf_event *
942 perf_event_create_kernel_counter(struct perf_event_attr *attr,
943                                 int cpu,
944                                 struct task_struct *task,
945                                 perf_overflow_handler_t callback);
946 extern u64 perf_event_read_value(struct perf_event *event,
947                                  u64 *enabled, u64 *running);
948
949 struct perf_sample_data {
950         u64                             type;
951
952         u64                             ip;
953         struct {
954                 u32     pid;
955                 u32     tid;
956         }                               tid_entry;
957         u64                             time;
958         u64                             addr;
959         u64                             id;
960         u64                             stream_id;
961         struct {
962                 u32     cpu;
963                 u32     reserved;
964         }                               cpu_entry;
965         u64                             period;
966         struct perf_callchain_entry     *callchain;
967         struct perf_raw_record          *raw;
968 };
969
970 static inline
971 void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
972 {
973         data->addr = addr;
974         data->raw  = NULL;
975 }
976
977 extern void perf_output_sample(struct perf_output_handle *handle,
978                                struct perf_event_header *header,
979                                struct perf_sample_data *data,
980                                struct perf_event *event);
981 extern void perf_prepare_sample(struct perf_event_header *header,
982                                 struct perf_sample_data *data,
983                                 struct perf_event *event,
984                                 struct pt_regs *regs);
985
986 extern int perf_event_overflow(struct perf_event *event, int nmi,
987                                  struct perf_sample_data *data,
988                                  struct pt_regs *regs);
989
990 static inline bool is_sampling_event(struct perf_event *event)
991 {
992         return event->attr.sample_period != 0;
993 }
994
995 /*
996  * Return 1 for a software event, 0 for a hardware event
997  */
998 static inline int is_software_event(struct perf_event *event)
999 {
1000         return event->pmu->task_ctx_nr == perf_sw_context;
1001 }
1002
1003 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
1004
1005 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
1006
1007 #ifndef perf_arch_fetch_caller_regs
1008 static inline void
1009 perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1010 #endif
1011
1012 /*
1013  * Take a snapshot of the regs. Skip ip and frame pointer to
1014  * the nth caller. We only need a few of the regs:
1015  * - ip for PERF_SAMPLE_IP
1016  * - cs for user_mode() tests
1017  * - bp for callchains
1018  * - eflags, for future purposes, just in case
1019  */
1020 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1021 {
1022         memset(regs, 0, sizeof(*regs));
1023
1024         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1025 }
1026
1027 static __always_inline void
1028 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1029 {
1030         struct pt_regs hot_regs;
1031
1032         JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
1033         return;
1034
1035 have_event:
1036         if (!regs) {
1037                 perf_fetch_caller_regs(&hot_regs);
1038                 regs = &hot_regs;
1039         }
1040         __perf_sw_event(event_id, nr, nmi, regs, addr);
1041 }
1042
1043 extern atomic_t perf_task_events;
1044
1045 static inline void perf_event_task_sched_in(struct task_struct *task)
1046 {
1047         COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1048 }
1049
1050 static inline
1051 void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1052 {
1053         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1054
1055         COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1056 }
1057
1058 extern void perf_event_mmap(struct vm_area_struct *vma);
1059 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1060 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1061 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1062
1063 extern void perf_event_comm(struct task_struct *tsk);
1064 extern void perf_event_fork(struct task_struct *tsk);
1065
1066 /* Callchains */
1067 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1068
1069 extern void perf_callchain_user(struct perf_callchain_entry *entry,
1070                                 struct pt_regs *regs);
1071 extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
1072                                   struct pt_regs *regs);
1073
1074
1075 static inline void
1076 perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1077 {
1078         if (entry->nr < PERF_MAX_STACK_DEPTH)
1079                 entry->ip[entry->nr++] = ip;
1080 }
1081
1082 extern int sysctl_perf_event_paranoid;
1083 extern int sysctl_perf_event_mlock;
1084 extern int sysctl_perf_event_sample_rate;
1085
1086 static inline bool perf_paranoid_tracepoint_raw(void)
1087 {
1088         return sysctl_perf_event_paranoid > -1;
1089 }
1090
1091 static inline bool perf_paranoid_cpu(void)
1092 {
1093         return sysctl_perf_event_paranoid > 0;
1094 }
1095
1096 static inline bool perf_paranoid_kernel(void)
1097 {
1098         return sysctl_perf_event_paranoid > 1;
1099 }
1100
1101 extern void perf_event_init(void);
1102 extern void perf_tp_event(u64 addr, u64 count, void *record,
1103                           int entry_size, struct pt_regs *regs,
1104                           struct hlist_head *head, int rctx);
1105 extern void perf_bp_event(struct perf_event *event, void *data);
1106
1107 #ifndef perf_misc_flags
1108 #define perf_misc_flags(regs)   (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1109                                  PERF_RECORD_MISC_KERNEL)
1110 #define perf_instruction_pointer(regs)  instruction_pointer(regs)
1111 #endif
1112
1113 extern int perf_output_begin(struct perf_output_handle *handle,
1114                              struct perf_event *event, unsigned int size,
1115                              int nmi, int sample);
1116 extern void perf_output_end(struct perf_output_handle *handle);
1117 extern void perf_output_copy(struct perf_output_handle *handle,
1118                              const void *buf, unsigned int len);
1119 extern int perf_swevent_get_recursion_context(void);
1120 extern void perf_swevent_put_recursion_context(int rctx);
1121 extern void perf_event_enable(struct perf_event *event);
1122 extern void perf_event_disable(struct perf_event *event);
1123 extern void perf_event_task_tick(void);
1124 #else
1125 static inline void
1126 perf_event_task_sched_in(struct task_struct *task)                      { }
1127 static inline void
1128 perf_event_task_sched_out(struct task_struct *task,
1129                             struct task_struct *next)                   { }
1130 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1131 static inline void perf_event_exit_task(struct task_struct *child)      { }
1132 static inline void perf_event_free_task(struct task_struct *task)       { }
1133 static inline void perf_event_delayed_put(struct task_struct *task)     { }
1134 static inline void perf_event_print_debug(void)                         { }
1135 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1136 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1137
1138 static inline void
1139 perf_sw_event(u32 event_id, u64 nr, int nmi,
1140                      struct pt_regs *regs, u64 addr)                    { }
1141 static inline void
1142 perf_bp_event(struct perf_event *event, void *data)                     { }
1143
1144 static inline int perf_register_guest_info_callbacks
1145 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1146 static inline int perf_unregister_guest_info_callbacks
1147 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1148
1149 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1150 static inline void perf_event_comm(struct task_struct *tsk)             { }
1151 static inline void perf_event_fork(struct task_struct *tsk)             { }
1152 static inline void perf_event_init(void)                                { }
1153 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1154 static inline void perf_swevent_put_recursion_context(int rctx)         { }
1155 static inline void perf_event_enable(struct perf_event *event)          { }
1156 static inline void perf_event_disable(struct perf_event *event)         { }
1157 static inline void perf_event_task_tick(void)                           { }
1158 #endif
1159
1160 #define perf_output_put(handle, x) \
1161         perf_output_copy((handle), &(x), sizeof(x))
1162
1163 /*
1164  * This has to have a higher priority than migration_notifier in sched.c.
1165  */
1166 #define perf_cpu_notifier(fn)                                   \
1167 do {                                                            \
1168         static struct notifier_block fn##_nb __cpuinitdata =    \
1169                 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1170         fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,             \
1171                 (void *)(unsigned long)smp_processor_id());     \
1172         fn(&fn##_nb, (unsigned long)CPU_STARTING,               \
1173                 (void *)(unsigned long)smp_processor_id());     \
1174         fn(&fn##_nb, (unsigned long)CPU_ONLINE,                 \
1175                 (void *)(unsigned long)smp_processor_id());     \
1176         register_cpu_notifier(&fn##_nb);                        \
1177 } while (0)
1178
1179 #endif /* __KERNEL__ */
1180 #endif /* _LINUX_PERF_EVENT_H */