perf tools: Add mem access sampling core support
[linux-3.10.git] / tools / perf / util / session.c
1 #include <linux/kernel.h>
2
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <sys/types.h>
6 #include <sys/mman.h>
7
8 #include "evlist.h"
9 #include "evsel.h"
10 #include "session.h"
11 #include "tool.h"
12 #include "sort.h"
13 #include "util.h"
14 #include "cpumap.h"
15 #include "event-parse.h"
16 #include "perf_regs.h"
17 #include "vdso.h"
18
19 static int perf_session__open(struct perf_session *self, bool force)
20 {
21         struct stat input_stat;
22
23         if (!strcmp(self->filename, "-")) {
24                 self->fd_pipe = true;
25                 self->fd = STDIN_FILENO;
26
27                 if (perf_session__read_header(self, self->fd) < 0)
28                         pr_err("incompatible file format (rerun with -v to learn more)");
29
30                 return 0;
31         }
32
33         self->fd = open(self->filename, O_RDONLY);
34         if (self->fd < 0) {
35                 int err = errno;
36
37                 pr_err("failed to open %s: %s", self->filename, strerror(err));
38                 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
39                         pr_err("  (try 'perf record' first)");
40                 pr_err("\n");
41                 return -errno;
42         }
43
44         if (fstat(self->fd, &input_stat) < 0)
45                 goto out_close;
46
47         if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
48                 pr_err("file %s not owned by current user or root\n",
49                        self->filename);
50                 goto out_close;
51         }
52
53         if (!input_stat.st_size) {
54                 pr_info("zero-sized file (%s), nothing to do!\n",
55                         self->filename);
56                 goto out_close;
57         }
58
59         if (perf_session__read_header(self, self->fd) < 0) {
60                 pr_err("incompatible file format (rerun with -v to learn more)");
61                 goto out_close;
62         }
63
64         if (!perf_evlist__valid_sample_type(self->evlist)) {
65                 pr_err("non matching sample_type");
66                 goto out_close;
67         }
68
69         if (!perf_evlist__valid_sample_id_all(self->evlist)) {
70                 pr_err("non matching sample_id_all");
71                 goto out_close;
72         }
73
74         self->size = input_stat.st_size;
75         return 0;
76
77 out_close:
78         close(self->fd);
79         self->fd = -1;
80         return -1;
81 }
82
83 void perf_session__set_id_hdr_size(struct perf_session *session)
84 {
85         u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
86
87         machines__set_id_hdr_size(&session->machines, id_hdr_size);
88 }
89
90 int perf_session__create_kernel_maps(struct perf_session *self)
91 {
92         int ret = machine__create_kernel_maps(&self->machines.host);
93
94         if (ret >= 0)
95                 ret = machines__create_guest_kernel_maps(&self->machines);
96         return ret;
97 }
98
99 static void perf_session__destroy_kernel_maps(struct perf_session *self)
100 {
101         machines__destroy_kernel_maps(&self->machines);
102 }
103
104 struct perf_session *perf_session__new(const char *filename, int mode,
105                                        bool force, bool repipe,
106                                        struct perf_tool *tool)
107 {
108         struct perf_session *self;
109         struct stat st;
110         size_t len;
111
112         if (!filename || !strlen(filename)) {
113                 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
114                         filename = "-";
115                 else
116                         filename = "perf.data";
117         }
118
119         len = strlen(filename);
120         self = zalloc(sizeof(*self) + len);
121
122         if (self == NULL)
123                 goto out;
124
125         memcpy(self->filename, filename, len);
126         self->repipe = repipe;
127         INIT_LIST_HEAD(&self->ordered_samples.samples);
128         INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
129         INIT_LIST_HEAD(&self->ordered_samples.to_free);
130         machines__init(&self->machines);
131
132         if (mode == O_RDONLY) {
133                 if (perf_session__open(self, force) < 0)
134                         goto out_delete;
135                 perf_session__set_id_hdr_size(self);
136         } else if (mode == O_WRONLY) {
137                 /*
138                  * In O_RDONLY mode this will be performed when reading the
139                  * kernel MMAP event, in perf_event__process_mmap().
140                  */
141                 if (perf_session__create_kernel_maps(self) < 0)
142                         goto out_delete;
143         }
144
145         if (tool && tool->ordering_requires_timestamps &&
146             tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
147                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
148                 tool->ordered_samples = false;
149         }
150
151 out:
152         return self;
153 out_delete:
154         perf_session__delete(self);
155         return NULL;
156 }
157
158 static void perf_session__delete_dead_threads(struct perf_session *session)
159 {
160         machine__delete_dead_threads(&session->machines.host);
161 }
162
163 static void perf_session__delete_threads(struct perf_session *session)
164 {
165         machine__delete_threads(&session->machines.host);
166 }
167
168 static void perf_session_env__delete(struct perf_session_env *env)
169 {
170         free(env->hostname);
171         free(env->os_release);
172         free(env->version);
173         free(env->arch);
174         free(env->cpu_desc);
175         free(env->cpuid);
176
177         free(env->cmdline);
178         free(env->sibling_cores);
179         free(env->sibling_threads);
180         free(env->numa_nodes);
181         free(env->pmu_mappings);
182 }
183
184 void perf_session__delete(struct perf_session *self)
185 {
186         perf_session__destroy_kernel_maps(self);
187         perf_session__delete_dead_threads(self);
188         perf_session__delete_threads(self);
189         perf_session_env__delete(&self->header.env);
190         machines__exit(&self->machines);
191         close(self->fd);
192         free(self);
193         vdso__exit();
194 }
195
196 static int process_event_synth_tracing_data_stub(union perf_event *event
197                                                  __maybe_unused,
198                                                  struct perf_session *session
199                                                 __maybe_unused)
200 {
201         dump_printf(": unhandled!\n");
202         return 0;
203 }
204
205 static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
206                                          struct perf_evlist **pevlist
207                                          __maybe_unused)
208 {
209         dump_printf(": unhandled!\n");
210         return 0;
211 }
212
213 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
214                                      union perf_event *event __maybe_unused,
215                                      struct perf_sample *sample __maybe_unused,
216                                      struct perf_evsel *evsel __maybe_unused,
217                                      struct machine *machine __maybe_unused)
218 {
219         dump_printf(": unhandled!\n");
220         return 0;
221 }
222
223 static int process_event_stub(struct perf_tool *tool __maybe_unused,
224                               union perf_event *event __maybe_unused,
225                               struct perf_sample *sample __maybe_unused,
226                               struct machine *machine __maybe_unused)
227 {
228         dump_printf(": unhandled!\n");
229         return 0;
230 }
231
232 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
233                                        union perf_event *event __maybe_unused,
234                                        struct perf_session *perf_session
235                                        __maybe_unused)
236 {
237         dump_printf(": unhandled!\n");
238         return 0;
239 }
240
241 static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
242                                    union perf_event *event __maybe_unused)
243 {
244         dump_printf(": unhandled!\n");
245         return 0;
246 }
247
248 static int process_finished_round(struct perf_tool *tool,
249                                   union perf_event *event,
250                                   struct perf_session *session);
251
252 static void perf_tool__fill_defaults(struct perf_tool *tool)
253 {
254         if (tool->sample == NULL)
255                 tool->sample = process_event_sample_stub;
256         if (tool->mmap == NULL)
257                 tool->mmap = process_event_stub;
258         if (tool->comm == NULL)
259                 tool->comm = process_event_stub;
260         if (tool->fork == NULL)
261                 tool->fork = process_event_stub;
262         if (tool->exit == NULL)
263                 tool->exit = process_event_stub;
264         if (tool->lost == NULL)
265                 tool->lost = perf_event__process_lost;
266         if (tool->read == NULL)
267                 tool->read = process_event_sample_stub;
268         if (tool->throttle == NULL)
269                 tool->throttle = process_event_stub;
270         if (tool->unthrottle == NULL)
271                 tool->unthrottle = process_event_stub;
272         if (tool->attr == NULL)
273                 tool->attr = process_event_synth_attr_stub;
274         if (tool->event_type == NULL)
275                 tool->event_type = process_event_type_stub;
276         if (tool->tracing_data == NULL)
277                 tool->tracing_data = process_event_synth_tracing_data_stub;
278         if (tool->build_id == NULL)
279                 tool->build_id = process_finished_round_stub;
280         if (tool->finished_round == NULL) {
281                 if (tool->ordered_samples)
282                         tool->finished_round = process_finished_round;
283                 else
284                         tool->finished_round = process_finished_round_stub;
285         }
286 }
287  
288 void mem_bswap_32(void *src, int byte_size)
289 {
290         u32 *m = src;
291         while (byte_size > 0) {
292                 *m = bswap_32(*m);
293                 byte_size -= sizeof(u32);
294                 ++m;
295         }
296 }
297
298 void mem_bswap_64(void *src, int byte_size)
299 {
300         u64 *m = src;
301
302         while (byte_size > 0) {
303                 *m = bswap_64(*m);
304                 byte_size -= sizeof(u64);
305                 ++m;
306         }
307 }
308
309 static void swap_sample_id_all(union perf_event *event, void *data)
310 {
311         void *end = (void *) event + event->header.size;
312         int size = end - data;
313
314         BUG_ON(size % sizeof(u64));
315         mem_bswap_64(data, size);
316 }
317
318 static void perf_event__all64_swap(union perf_event *event,
319                                    bool sample_id_all __maybe_unused)
320 {
321         struct perf_event_header *hdr = &event->header;
322         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
323 }
324
325 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
326 {
327         event->comm.pid = bswap_32(event->comm.pid);
328         event->comm.tid = bswap_32(event->comm.tid);
329
330         if (sample_id_all) {
331                 void *data = &event->comm.comm;
332
333                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
334                 swap_sample_id_all(event, data);
335         }
336 }
337
338 static void perf_event__mmap_swap(union perf_event *event,
339                                   bool sample_id_all)
340 {
341         event->mmap.pid   = bswap_32(event->mmap.pid);
342         event->mmap.tid   = bswap_32(event->mmap.tid);
343         event->mmap.start = bswap_64(event->mmap.start);
344         event->mmap.len   = bswap_64(event->mmap.len);
345         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
346
347         if (sample_id_all) {
348                 void *data = &event->mmap.filename;
349
350                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
351                 swap_sample_id_all(event, data);
352         }
353 }
354
355 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
356 {
357         event->fork.pid  = bswap_32(event->fork.pid);
358         event->fork.tid  = bswap_32(event->fork.tid);
359         event->fork.ppid = bswap_32(event->fork.ppid);
360         event->fork.ptid = bswap_32(event->fork.ptid);
361         event->fork.time = bswap_64(event->fork.time);
362
363         if (sample_id_all)
364                 swap_sample_id_all(event, &event->fork + 1);
365 }
366
367 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
368 {
369         event->read.pid          = bswap_32(event->read.pid);
370         event->read.tid          = bswap_32(event->read.tid);
371         event->read.value        = bswap_64(event->read.value);
372         event->read.time_enabled = bswap_64(event->read.time_enabled);
373         event->read.time_running = bswap_64(event->read.time_running);
374         event->read.id           = bswap_64(event->read.id);
375
376         if (sample_id_all)
377                 swap_sample_id_all(event, &event->read + 1);
378 }
379
380 static u8 revbyte(u8 b)
381 {
382         int rev = (b >> 4) | ((b & 0xf) << 4);
383         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
384         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
385         return (u8) rev;
386 }
387
388 /*
389  * XXX this is hack in attempt to carry flags bitfield
390  * throught endian village. ABI says:
391  *
392  * Bit-fields are allocated from right to left (least to most significant)
393  * on little-endian implementations and from left to right (most to least
394  * significant) on big-endian implementations.
395  *
396  * The above seems to be byte specific, so we need to reverse each
397  * byte of the bitfield. 'Internet' also says this might be implementation
398  * specific and we probably need proper fix and carry perf_event_attr
399  * bitfield flags in separate data file FEAT_ section. Thought this seems
400  * to work for now.
401  */
402 static void swap_bitfield(u8 *p, unsigned len)
403 {
404         unsigned i;
405
406         for (i = 0; i < len; i++) {
407                 *p = revbyte(*p);
408                 p++;
409         }
410 }
411
412 /* exported for swapping attributes in file header */
413 void perf_event__attr_swap(struct perf_event_attr *attr)
414 {
415         attr->type              = bswap_32(attr->type);
416         attr->size              = bswap_32(attr->size);
417         attr->config            = bswap_64(attr->config);
418         attr->sample_period     = bswap_64(attr->sample_period);
419         attr->sample_type       = bswap_64(attr->sample_type);
420         attr->read_format       = bswap_64(attr->read_format);
421         attr->wakeup_events     = bswap_32(attr->wakeup_events);
422         attr->bp_type           = bswap_32(attr->bp_type);
423         attr->bp_addr           = bswap_64(attr->bp_addr);
424         attr->bp_len            = bswap_64(attr->bp_len);
425
426         swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
427 }
428
429 static void perf_event__hdr_attr_swap(union perf_event *event,
430                                       bool sample_id_all __maybe_unused)
431 {
432         size_t size;
433
434         perf_event__attr_swap(&event->attr.attr);
435
436         size = event->header.size;
437         size -= (void *)&event->attr.id - (void *)event;
438         mem_bswap_64(event->attr.id, size);
439 }
440
441 static void perf_event__event_type_swap(union perf_event *event,
442                                         bool sample_id_all __maybe_unused)
443 {
444         event->event_type.event_type.event_id =
445                 bswap_64(event->event_type.event_type.event_id);
446 }
447
448 static void perf_event__tracing_data_swap(union perf_event *event,
449                                           bool sample_id_all __maybe_unused)
450 {
451         event->tracing_data.size = bswap_32(event->tracing_data.size);
452 }
453
454 typedef void (*perf_event__swap_op)(union perf_event *event,
455                                     bool sample_id_all);
456
457 static perf_event__swap_op perf_event__swap_ops[] = {
458         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
459         [PERF_RECORD_COMM]                = perf_event__comm_swap,
460         [PERF_RECORD_FORK]                = perf_event__task_swap,
461         [PERF_RECORD_EXIT]                = perf_event__task_swap,
462         [PERF_RECORD_LOST]                = perf_event__all64_swap,
463         [PERF_RECORD_READ]                = perf_event__read_swap,
464         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
465         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
466         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
467         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
468         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
469         [PERF_RECORD_HEADER_MAX]          = NULL,
470 };
471
472 struct sample_queue {
473         u64                     timestamp;
474         u64                     file_offset;
475         union perf_event        *event;
476         struct list_head        list;
477 };
478
479 static void perf_session_free_sample_buffers(struct perf_session *session)
480 {
481         struct ordered_samples *os = &session->ordered_samples;
482
483         while (!list_empty(&os->to_free)) {
484                 struct sample_queue *sq;
485
486                 sq = list_entry(os->to_free.next, struct sample_queue, list);
487                 list_del(&sq->list);
488                 free(sq);
489         }
490 }
491
492 static int perf_session_deliver_event(struct perf_session *session,
493                                       union perf_event *event,
494                                       struct perf_sample *sample,
495                                       struct perf_tool *tool,
496                                       u64 file_offset);
497
498 static int flush_sample_queue(struct perf_session *s,
499                                struct perf_tool *tool)
500 {
501         struct ordered_samples *os = &s->ordered_samples;
502         struct list_head *head = &os->samples;
503         struct sample_queue *tmp, *iter;
504         struct perf_sample sample;
505         u64 limit = os->next_flush;
506         u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
507         unsigned idx = 0, progress_next = os->nr_samples / 16;
508         int ret;
509
510         if (!tool->ordered_samples || !limit)
511                 return 0;
512
513         list_for_each_entry_safe(iter, tmp, head, list) {
514                 if (iter->timestamp > limit)
515                         break;
516
517                 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample);
518                 if (ret)
519                         pr_err("Can't parse sample, err = %d\n", ret);
520                 else {
521                         ret = perf_session_deliver_event(s, iter->event, &sample, tool,
522                                                          iter->file_offset);
523                         if (ret)
524                                 return ret;
525                 }
526
527                 os->last_flush = iter->timestamp;
528                 list_del(&iter->list);
529                 list_add(&iter->list, &os->sample_cache);
530                 if (++idx >= progress_next) {
531                         progress_next += os->nr_samples / 16;
532                         ui_progress__update(idx, os->nr_samples,
533                                             "Processing time ordered events...");
534                 }
535         }
536
537         if (list_empty(head)) {
538                 os->last_sample = NULL;
539         } else if (last_ts <= limit) {
540                 os->last_sample =
541                         list_entry(head->prev, struct sample_queue, list);
542         }
543
544         os->nr_samples = 0;
545
546         return 0;
547 }
548
549 /*
550  * When perf record finishes a pass on every buffers, it records this pseudo
551  * event.
552  * We record the max timestamp t found in the pass n.
553  * Assuming these timestamps are monotonic across cpus, we know that if
554  * a buffer still has events with timestamps below t, they will be all
555  * available and then read in the pass n + 1.
556  * Hence when we start to read the pass n + 2, we can safely flush every
557  * events with timestamps below t.
558  *
559  *    ============ PASS n =================
560  *       CPU 0         |   CPU 1
561  *                     |
562  *    cnt1 timestamps  |   cnt2 timestamps
563  *          1          |         2
564  *          2          |         3
565  *          -          |         4  <--- max recorded
566  *
567  *    ============ PASS n + 1 ==============
568  *       CPU 0         |   CPU 1
569  *                     |
570  *    cnt1 timestamps  |   cnt2 timestamps
571  *          3          |         5
572  *          4          |         6
573  *          5          |         7 <---- max recorded
574  *
575  *      Flush every events below timestamp 4
576  *
577  *    ============ PASS n + 2 ==============
578  *       CPU 0         |   CPU 1
579  *                     |
580  *    cnt1 timestamps  |   cnt2 timestamps
581  *          6          |         8
582  *          7          |         9
583  *          -          |         10
584  *
585  *      Flush every events below timestamp 7
586  *      etc...
587  */
588 static int process_finished_round(struct perf_tool *tool,
589                                   union perf_event *event __maybe_unused,
590                                   struct perf_session *session)
591 {
592         int ret = flush_sample_queue(session, tool);
593         if (!ret)
594                 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
595
596         return ret;
597 }
598
599 /* The queue is ordered by time */
600 static void __queue_event(struct sample_queue *new, struct perf_session *s)
601 {
602         struct ordered_samples *os = &s->ordered_samples;
603         struct sample_queue *sample = os->last_sample;
604         u64 timestamp = new->timestamp;
605         struct list_head *p;
606
607         ++os->nr_samples;
608         os->last_sample = new;
609
610         if (!sample) {
611                 list_add(&new->list, &os->samples);
612                 os->max_timestamp = timestamp;
613                 return;
614         }
615
616         /*
617          * last_sample might point to some random place in the list as it's
618          * the last queued event. We expect that the new event is close to
619          * this.
620          */
621         if (sample->timestamp <= timestamp) {
622                 while (sample->timestamp <= timestamp) {
623                         p = sample->list.next;
624                         if (p == &os->samples) {
625                                 list_add_tail(&new->list, &os->samples);
626                                 os->max_timestamp = timestamp;
627                                 return;
628                         }
629                         sample = list_entry(p, struct sample_queue, list);
630                 }
631                 list_add_tail(&new->list, &sample->list);
632         } else {
633                 while (sample->timestamp > timestamp) {
634                         p = sample->list.prev;
635                         if (p == &os->samples) {
636                                 list_add(&new->list, &os->samples);
637                                 return;
638                         }
639                         sample = list_entry(p, struct sample_queue, list);
640                 }
641                 list_add(&new->list, &sample->list);
642         }
643 }
644
645 #define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct sample_queue))
646
647 static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
648                                     struct perf_sample *sample, u64 file_offset)
649 {
650         struct ordered_samples *os = &s->ordered_samples;
651         struct list_head *sc = &os->sample_cache;
652         u64 timestamp = sample->time;
653         struct sample_queue *new;
654
655         if (!timestamp || timestamp == ~0ULL)
656                 return -ETIME;
657
658         if (timestamp < s->ordered_samples.last_flush) {
659                 printf("Warning: Timestamp below last timeslice flush\n");
660                 return -EINVAL;
661         }
662
663         if (!list_empty(sc)) {
664                 new = list_entry(sc->next, struct sample_queue, list);
665                 list_del(&new->list);
666         } else if (os->sample_buffer) {
667                 new = os->sample_buffer + os->sample_buffer_idx;
668                 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER)
669                         os->sample_buffer = NULL;
670         } else {
671                 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
672                 if (!os->sample_buffer)
673                         return -ENOMEM;
674                 list_add(&os->sample_buffer->list, &os->to_free);
675                 os->sample_buffer_idx = 2;
676                 new = os->sample_buffer + 1;
677         }
678
679         new->timestamp = timestamp;
680         new->file_offset = file_offset;
681         new->event = event;
682
683         __queue_event(new, s);
684
685         return 0;
686 }
687
688 static void callchain__printf(struct perf_sample *sample)
689 {
690         unsigned int i;
691
692         printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
693
694         for (i = 0; i < sample->callchain->nr; i++)
695                 printf("..... %2d: %016" PRIx64 "\n",
696                        i, sample->callchain->ips[i]);
697 }
698
699 static void branch_stack__printf(struct perf_sample *sample)
700 {
701         uint64_t i;
702
703         printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
704
705         for (i = 0; i < sample->branch_stack->nr; i++)
706                 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
707                         i, sample->branch_stack->entries[i].from,
708                         sample->branch_stack->entries[i].to);
709 }
710
711 static void regs_dump__printf(u64 mask, u64 *regs)
712 {
713         unsigned rid, i = 0;
714
715         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
716                 u64 val = regs[i++];
717
718                 printf(".... %-5s 0x%" PRIx64 "\n",
719                        perf_reg_name(rid), val);
720         }
721 }
722
723 static void regs_user__printf(struct perf_sample *sample, u64 mask)
724 {
725         struct regs_dump *user_regs = &sample->user_regs;
726
727         if (user_regs->regs) {
728                 printf("... user regs: mask 0x%" PRIx64 "\n", mask);
729                 regs_dump__printf(mask, user_regs->regs);
730         }
731 }
732
733 static void stack_user__printf(struct stack_dump *dump)
734 {
735         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
736                dump->size, dump->offset);
737 }
738
739 static void perf_session__print_tstamp(struct perf_session *session,
740                                        union perf_event *event,
741                                        struct perf_sample *sample)
742 {
743         u64 sample_type = perf_evlist__sample_type(session->evlist);
744
745         if (event->header.type != PERF_RECORD_SAMPLE &&
746             !perf_evlist__sample_id_all(session->evlist)) {
747                 fputs("-1 -1 ", stdout);
748                 return;
749         }
750
751         if ((sample_type & PERF_SAMPLE_CPU))
752                 printf("%u ", sample->cpu);
753
754         if (sample_type & PERF_SAMPLE_TIME)
755                 printf("%" PRIu64 " ", sample->time);
756 }
757
758 static void dump_event(struct perf_session *session, union perf_event *event,
759                        u64 file_offset, struct perf_sample *sample)
760 {
761         if (!dump_trace)
762                 return;
763
764         printf("\n%#" PRIx64 " [%#x]: event: %d\n",
765                file_offset, event->header.size, event->header.type);
766
767         trace_event(event);
768
769         if (sample)
770                 perf_session__print_tstamp(session, event, sample);
771
772         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
773                event->header.size, perf_event__name(event->header.type));
774 }
775
776 static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
777                         struct perf_sample *sample)
778 {
779         u64 sample_type;
780
781         if (!dump_trace)
782                 return;
783
784         printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
785                event->header.misc, sample->pid, sample->tid, sample->ip,
786                sample->period, sample->addr);
787
788         sample_type = evsel->attr.sample_type;
789
790         if (sample_type & PERF_SAMPLE_CALLCHAIN)
791                 callchain__printf(sample);
792
793         if (sample_type & PERF_SAMPLE_BRANCH_STACK)
794                 branch_stack__printf(sample);
795
796         if (sample_type & PERF_SAMPLE_REGS_USER)
797                 regs_user__printf(sample, evsel->attr.sample_regs_user);
798
799         if (sample_type & PERF_SAMPLE_STACK_USER)
800                 stack_user__printf(&sample->user_stack);
801
802         if (sample_type & PERF_SAMPLE_WEIGHT)
803                 printf("... weight: %" PRIu64 "\n", sample->weight);
804
805         if (sample_type & PERF_SAMPLE_DATA_SRC)
806                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
807 }
808
809 static struct machine *
810         perf_session__find_machine_for_cpumode(struct perf_session *session,
811                                                union perf_event *event)
812 {
813         const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
814
815         if (perf_guest &&
816             ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
817              (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
818                 u32 pid;
819
820                 if (event->header.type == PERF_RECORD_MMAP)
821                         pid = event->mmap.pid;
822                 else
823                         pid = event->ip.pid;
824
825                 return perf_session__findnew_machine(session, pid);
826         }
827
828         return &session->machines.host;
829 }
830
831 static int perf_session_deliver_event(struct perf_session *session,
832                                       union perf_event *event,
833                                       struct perf_sample *sample,
834                                       struct perf_tool *tool,
835                                       u64 file_offset)
836 {
837         struct perf_evsel *evsel;
838         struct machine *machine;
839
840         dump_event(session, event, file_offset, sample);
841
842         evsel = perf_evlist__id2evsel(session->evlist, sample->id);
843         if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
844                 /*
845                  * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
846                  * because the tools right now may apply filters, discarding
847                  * some of the samples. For consistency, in the future we
848                  * should have something like nr_filtered_samples and remove
849                  * the sample->period from total_sample_period, etc, KISS for
850                  * now tho.
851                  *
852                  * Also testing against NULL allows us to handle files without
853                  * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
854                  * future probably it'll be a good idea to restrict event
855                  * processing via perf_session to files with both set.
856                  */
857                 hists__inc_nr_events(&evsel->hists, event->header.type);
858         }
859
860         machine = perf_session__find_machine_for_cpumode(session, event);
861
862         switch (event->header.type) {
863         case PERF_RECORD_SAMPLE:
864                 dump_sample(evsel, event, sample);
865                 if (evsel == NULL) {
866                         ++session->stats.nr_unknown_id;
867                         return 0;
868                 }
869                 if (machine == NULL) {
870                         ++session->stats.nr_unprocessable_samples;
871                         return 0;
872                 }
873                 return tool->sample(tool, event, sample, evsel, machine);
874         case PERF_RECORD_MMAP:
875                 return tool->mmap(tool, event, sample, machine);
876         case PERF_RECORD_COMM:
877                 return tool->comm(tool, event, sample, machine);
878         case PERF_RECORD_FORK:
879                 return tool->fork(tool, event, sample, machine);
880         case PERF_RECORD_EXIT:
881                 return tool->exit(tool, event, sample, machine);
882         case PERF_RECORD_LOST:
883                 if (tool->lost == perf_event__process_lost)
884                         session->stats.total_lost += event->lost.lost;
885                 return tool->lost(tool, event, sample, machine);
886         case PERF_RECORD_READ:
887                 return tool->read(tool, event, sample, evsel, machine);
888         case PERF_RECORD_THROTTLE:
889                 return tool->throttle(tool, event, sample, machine);
890         case PERF_RECORD_UNTHROTTLE:
891                 return tool->unthrottle(tool, event, sample, machine);
892         default:
893                 ++session->stats.nr_unknown_events;
894                 return -1;
895         }
896 }
897
898 static int perf_session__preprocess_sample(struct perf_session *session,
899                                            union perf_event *event, struct perf_sample *sample)
900 {
901         if (event->header.type != PERF_RECORD_SAMPLE ||
902             !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
903                 return 0;
904
905         if (!ip_callchain__valid(sample->callchain, event)) {
906                 pr_debug("call-chain problem with event, skipping it.\n");
907                 ++session->stats.nr_invalid_chains;
908                 session->stats.total_invalid_chains += sample->period;
909                 return -EINVAL;
910         }
911         return 0;
912 }
913
914 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
915                                             struct perf_tool *tool, u64 file_offset)
916 {
917         int err;
918
919         dump_event(session, event, file_offset, NULL);
920
921         /* These events are processed right away */
922         switch (event->header.type) {
923         case PERF_RECORD_HEADER_ATTR:
924                 err = tool->attr(event, &session->evlist);
925                 if (err == 0)
926                         perf_session__set_id_hdr_size(session);
927                 return err;
928         case PERF_RECORD_HEADER_EVENT_TYPE:
929                 return tool->event_type(tool, event);
930         case PERF_RECORD_HEADER_TRACING_DATA:
931                 /* setup for reading amidst mmap */
932                 lseek(session->fd, file_offset, SEEK_SET);
933                 return tool->tracing_data(event, session);
934         case PERF_RECORD_HEADER_BUILD_ID:
935                 return tool->build_id(tool, event, session);
936         case PERF_RECORD_FINISHED_ROUND:
937                 return tool->finished_round(tool, event, session);
938         default:
939                 return -EINVAL;
940         }
941 }
942
943 static void event_swap(union perf_event *event, bool sample_id_all)
944 {
945         perf_event__swap_op swap;
946
947         swap = perf_event__swap_ops[event->header.type];
948         if (swap)
949                 swap(event, sample_id_all);
950 }
951
952 static int perf_session__process_event(struct perf_session *session,
953                                        union perf_event *event,
954                                        struct perf_tool *tool,
955                                        u64 file_offset)
956 {
957         struct perf_sample sample;
958         int ret;
959
960         if (session->header.needs_swap)
961                 event_swap(event, perf_evlist__sample_id_all(session->evlist));
962
963         if (event->header.type >= PERF_RECORD_HEADER_MAX)
964                 return -EINVAL;
965
966         events_stats__inc(&session->stats, event->header.type);
967
968         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
969                 return perf_session__process_user_event(session, event, tool, file_offset);
970
971         /*
972          * For all kernel events we get the sample data
973          */
974         ret = perf_evlist__parse_sample(session->evlist, event, &sample);
975         if (ret)
976                 return ret;
977
978         /* Preprocess sample records - precheck callchains */
979         if (perf_session__preprocess_sample(session, event, &sample))
980                 return 0;
981
982         if (tool->ordered_samples) {
983                 ret = perf_session_queue_event(session, event, &sample,
984                                                file_offset);
985                 if (ret != -ETIME)
986                         return ret;
987         }
988
989         return perf_session_deliver_event(session, event, &sample, tool,
990                                           file_offset);
991 }
992
993 void perf_event_header__bswap(struct perf_event_header *self)
994 {
995         self->type = bswap_32(self->type);
996         self->misc = bswap_16(self->misc);
997         self->size = bswap_16(self->size);
998 }
999
1000 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1001 {
1002         return machine__findnew_thread(&session->machines.host, pid);
1003 }
1004
1005 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
1006 {
1007         struct thread *thread = perf_session__findnew(self, 0);
1008
1009         if (thread == NULL || thread__set_comm(thread, "swapper")) {
1010                 pr_err("problem inserting idle task.\n");
1011                 thread = NULL;
1012         }
1013
1014         return thread;
1015 }
1016
1017 static void perf_session__warn_about_errors(const struct perf_session *session,
1018                                             const struct perf_tool *tool)
1019 {
1020         if (tool->lost == perf_event__process_lost &&
1021             session->stats.nr_events[PERF_RECORD_LOST] != 0) {
1022                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1023                             "Check IO/CPU overload!\n\n",
1024                             session->stats.nr_events[0],
1025                             session->stats.nr_events[PERF_RECORD_LOST]);
1026         }
1027
1028         if (session->stats.nr_unknown_events != 0) {
1029                 ui__warning("Found %u unknown events!\n\n"
1030                             "Is this an older tool processing a perf.data "
1031                             "file generated by a more recent tool?\n\n"
1032                             "If that is not the case, consider "
1033                             "reporting to linux-kernel@vger.kernel.org.\n\n",
1034                             session->stats.nr_unknown_events);
1035         }
1036
1037         if (session->stats.nr_unknown_id != 0) {
1038                 ui__warning("%u samples with id not present in the header\n",
1039                             session->stats.nr_unknown_id);
1040         }
1041
1042         if (session->stats.nr_invalid_chains != 0) {
1043                 ui__warning("Found invalid callchains!\n\n"
1044                             "%u out of %u events were discarded for this reason.\n\n"
1045                             "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1046                             session->stats.nr_invalid_chains,
1047                             session->stats.nr_events[PERF_RECORD_SAMPLE]);
1048         }
1049
1050         if (session->stats.nr_unprocessable_samples != 0) {
1051                 ui__warning("%u unprocessable samples recorded.\n"
1052                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1053                             session->stats.nr_unprocessable_samples);
1054         }
1055 }
1056
1057 #define session_done()  (*(volatile int *)(&session_done))
1058 volatile int session_done;
1059
1060 static int __perf_session__process_pipe_events(struct perf_session *self,
1061                                                struct perf_tool *tool)
1062 {
1063         union perf_event *event;
1064         uint32_t size, cur_size = 0;
1065         void *buf = NULL;
1066         int skip = 0;
1067         u64 head;
1068         int err;
1069         void *p;
1070
1071         perf_tool__fill_defaults(tool);
1072
1073         head = 0;
1074         cur_size = sizeof(union perf_event);
1075
1076         buf = malloc(cur_size);
1077         if (!buf)
1078                 return -errno;
1079 more:
1080         event = buf;
1081         err = readn(self->fd, event, sizeof(struct perf_event_header));
1082         if (err <= 0) {
1083                 if (err == 0)
1084                         goto done;
1085
1086                 pr_err("failed to read event header\n");
1087                 goto out_err;
1088         }
1089
1090         if (self->header.needs_swap)
1091                 perf_event_header__bswap(&event->header);
1092
1093         size = event->header.size;
1094         if (size == 0)
1095                 size = 8;
1096
1097         if (size > cur_size) {
1098                 void *new = realloc(buf, size);
1099                 if (!new) {
1100                         pr_err("failed to allocate memory to read event\n");
1101                         goto out_err;
1102                 }
1103                 buf = new;
1104                 cur_size = size;
1105                 event = buf;
1106         }
1107         p = event;
1108         p += sizeof(struct perf_event_header);
1109
1110         if (size - sizeof(struct perf_event_header)) {
1111                 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
1112                 if (err <= 0) {
1113                         if (err == 0) {
1114                                 pr_err("unexpected end of event stream\n");
1115                                 goto done;
1116                         }
1117
1118                         pr_err("failed to read event data\n");
1119                         goto out_err;
1120                 }
1121         }
1122
1123         if ((skip = perf_session__process_event(self, event, tool, head)) < 0) {
1124                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1125                        head, event->header.size, event->header.type);
1126                 err = -EINVAL;
1127                 goto out_err;
1128         }
1129
1130         head += size;
1131
1132         if (skip > 0)
1133                 head += skip;
1134
1135         if (!session_done())
1136                 goto more;
1137 done:
1138         err = 0;
1139 out_err:
1140         free(buf);
1141         perf_session__warn_about_errors(self, tool);
1142         perf_session_free_sample_buffers(self);
1143         return err;
1144 }
1145
1146 static union perf_event *
1147 fetch_mmaped_event(struct perf_session *session,
1148                    u64 head, size_t mmap_size, char *buf)
1149 {
1150         union perf_event *event;
1151
1152         /*
1153          * Ensure we have enough space remaining to read
1154          * the size of the event in the headers.
1155          */
1156         if (head + sizeof(event->header) > mmap_size)
1157                 return NULL;
1158
1159         event = (union perf_event *)(buf + head);
1160
1161         if (session->header.needs_swap)
1162                 perf_event_header__bswap(&event->header);
1163
1164         if (head + event->header.size > mmap_size)
1165                 return NULL;
1166
1167         return event;
1168 }
1169
1170 /*
1171  * On 64bit we can mmap the data file in one go. No need for tiny mmap
1172  * slices. On 32bit we use 32MB.
1173  */
1174 #if BITS_PER_LONG == 64
1175 #define MMAP_SIZE ULLONG_MAX
1176 #define NUM_MMAPS 1
1177 #else
1178 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1179 #define NUM_MMAPS 128
1180 #endif
1181
1182 int __perf_session__process_events(struct perf_session *session,
1183                                    u64 data_offset, u64 data_size,
1184                                    u64 file_size, struct perf_tool *tool)
1185 {
1186         u64 head, page_offset, file_offset, file_pos, progress_next;
1187         int err, mmap_prot, mmap_flags, map_idx = 0;
1188         size_t  mmap_size;
1189         char *buf, *mmaps[NUM_MMAPS];
1190         union perf_event *event;
1191         uint32_t size;
1192
1193         perf_tool__fill_defaults(tool);
1194
1195         page_offset = page_size * (data_offset / page_size);
1196         file_offset = page_offset;
1197         head = data_offset - page_offset;
1198
1199         if (data_offset + data_size < file_size)
1200                 file_size = data_offset + data_size;
1201
1202         progress_next = file_size / 16;
1203
1204         mmap_size = MMAP_SIZE;
1205         if (mmap_size > file_size)
1206                 mmap_size = file_size;
1207
1208         memset(mmaps, 0, sizeof(mmaps));
1209
1210         mmap_prot  = PROT_READ;
1211         mmap_flags = MAP_SHARED;
1212
1213         if (session->header.needs_swap) {
1214                 mmap_prot  |= PROT_WRITE;
1215                 mmap_flags = MAP_PRIVATE;
1216         }
1217 remap:
1218         buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd,
1219                    file_offset);
1220         if (buf == MAP_FAILED) {
1221                 pr_err("failed to mmap file\n");
1222                 err = -errno;
1223                 goto out_err;
1224         }
1225         mmaps[map_idx] = buf;
1226         map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
1227         file_pos = file_offset + head;
1228
1229 more:
1230         event = fetch_mmaped_event(session, head, mmap_size, buf);
1231         if (!event) {
1232                 if (mmaps[map_idx]) {
1233                         munmap(mmaps[map_idx], mmap_size);
1234                         mmaps[map_idx] = NULL;
1235                 }
1236
1237                 page_offset = page_size * (head / page_size);
1238                 file_offset += page_offset;
1239                 head -= page_offset;
1240                 goto remap;
1241         }
1242
1243         size = event->header.size;
1244
1245         if (size == 0 ||
1246             perf_session__process_event(session, event, tool, file_pos) < 0) {
1247                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1248                        file_offset + head, event->header.size,
1249                        event->header.type);
1250                 err = -EINVAL;
1251                 goto out_err;
1252         }
1253
1254         head += size;
1255         file_pos += size;
1256
1257         if (file_pos >= progress_next) {
1258                 progress_next += file_size / 16;
1259                 ui_progress__update(file_pos, file_size,
1260                                     "Processing events...");
1261         }
1262
1263         if (file_pos < file_size)
1264                 goto more;
1265
1266         err = 0;
1267         /* do the final flush for ordered samples */
1268         session->ordered_samples.next_flush = ULLONG_MAX;
1269         err = flush_sample_queue(session, tool);
1270 out_err:
1271         ui_progress__finish();
1272         perf_session__warn_about_errors(session, tool);
1273         perf_session_free_sample_buffers(session);
1274         return err;
1275 }
1276
1277 int perf_session__process_events(struct perf_session *self,
1278                                  struct perf_tool *tool)
1279 {
1280         int err;
1281
1282         if (perf_session__register_idle_thread(self) == NULL)
1283                 return -ENOMEM;
1284
1285         if (!self->fd_pipe)
1286                 err = __perf_session__process_events(self,
1287                                                      self->header.data_offset,
1288                                                      self->header.data_size,
1289                                                      self->size, tool);
1290         else
1291                 err = __perf_session__process_pipe_events(self, tool);
1292
1293         return err;
1294 }
1295
1296 bool perf_session__has_traces(struct perf_session *session, const char *msg)
1297 {
1298         if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
1299                 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1300                 return false;
1301         }
1302
1303         return true;
1304 }
1305
1306 int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1307                                      const char *symbol_name, u64 addr)
1308 {
1309         char *bracket;
1310         enum map_type i;
1311         struct ref_reloc_sym *ref;
1312
1313         ref = zalloc(sizeof(struct ref_reloc_sym));
1314         if (ref == NULL)
1315                 return -ENOMEM;
1316
1317         ref->name = strdup(symbol_name);
1318         if (ref->name == NULL) {
1319                 free(ref);
1320                 return -ENOMEM;
1321         }
1322
1323         bracket = strchr(ref->name, ']');
1324         if (bracket)
1325                 *bracket = '\0';
1326
1327         ref->addr = addr;
1328
1329         for (i = 0; i < MAP__NR_TYPES; ++i) {
1330                 struct kmap *kmap = map__kmap(maps[i]);
1331                 kmap->ref_reloc_sym = ref;
1332         }
1333
1334         return 0;
1335 }
1336
1337 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
1338 {
1339         return machines__fprintf_dsos(&self->machines, fp);
1340 }
1341
1342 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
1343                                           bool (skip)(struct dso *dso, int parm), int parm)
1344 {
1345         return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
1346 }
1347
1348 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1349 {
1350         struct perf_evsel *pos;
1351         size_t ret = fprintf(fp, "Aggregated stats:\n");
1352
1353         ret += events_stats__fprintf(&session->stats, fp);
1354
1355         list_for_each_entry(pos, &session->evlist->entries, node) {
1356                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1357                 ret += events_stats__fprintf(&pos->hists.stats, fp);
1358         }
1359
1360         return ret;
1361 }
1362
1363 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1364 {
1365         /*
1366          * FIXME: Here we have to actually print all the machines in this
1367          * session, not just the host...
1368          */
1369         return machine__fprintf(&session->machines.host, fp);
1370 }
1371
1372 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1373                                               unsigned int type)
1374 {
1375         struct perf_evsel *pos;
1376
1377         list_for_each_entry(pos, &session->evlist->entries, node) {
1378                 if (pos->attr.type == type)
1379                         return pos;
1380         }
1381         return NULL;
1382 }
1383
1384 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
1385                           struct perf_sample *sample, struct machine *machine,
1386                           int print_sym, int print_dso, int print_symoffset)
1387 {
1388         struct addr_location al;
1389         struct callchain_cursor_node *node;
1390
1391         if (perf_event__preprocess_sample(event, machine, &al, sample,
1392                                           NULL) < 0) {
1393                 error("problem processing %d event, skipping it.\n",
1394                         event->header.type);
1395                 return;
1396         }
1397
1398         if (symbol_conf.use_callchain && sample->callchain) {
1399
1400
1401                 if (machine__resolve_callchain(machine, evsel, al.thread,
1402                                                sample, NULL) != 0) {
1403                         if (verbose)
1404                                 error("Failed to resolve callchain. Skipping\n");
1405                         return;
1406                 }
1407                 callchain_cursor_commit(&callchain_cursor);
1408
1409                 while (1) {
1410                         node = callchain_cursor_current(&callchain_cursor);
1411                         if (!node)
1412                                 break;
1413
1414                         printf("\t%16" PRIx64, node->ip);
1415                         if (print_sym) {
1416                                 printf(" ");
1417                                 symbol__fprintf_symname(node->sym, stdout);
1418                         }
1419                         if (print_dso) {
1420                                 printf(" (");
1421                                 map__fprintf_dsoname(node->map, stdout);
1422                                 printf(")");
1423                         }
1424                         printf("\n");
1425
1426                         callchain_cursor_advance(&callchain_cursor);
1427                 }
1428
1429         } else {
1430                 printf("%16" PRIx64, sample->ip);
1431                 if (print_sym) {
1432                         printf(" ");
1433                         if (print_symoffset)
1434                                 symbol__fprintf_symname_offs(al.sym, &al,
1435                                                              stdout);
1436                         else
1437                                 symbol__fprintf_symname(al.sym, stdout);
1438                 }
1439
1440                 if (print_dso) {
1441                         printf(" (");
1442                         map__fprintf_dsoname(al.map, stdout);
1443                         printf(")");
1444                 }
1445         }
1446 }
1447
1448 int perf_session__cpu_bitmap(struct perf_session *session,
1449                              const char *cpu_list, unsigned long *cpu_bitmap)
1450 {
1451         int i;
1452         struct cpu_map *map;
1453
1454         for (i = 0; i < PERF_TYPE_MAX; ++i) {
1455                 struct perf_evsel *evsel;
1456
1457                 evsel = perf_session__find_first_evtype(session, i);
1458                 if (!evsel)
1459                         continue;
1460
1461                 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1462                         pr_err("File does not contain CPU events. "
1463                                "Remove -c option to proceed.\n");
1464                         return -1;
1465                 }
1466         }
1467
1468         map = cpu_map__new(cpu_list);
1469         if (map == NULL) {
1470                 pr_err("Invalid cpu_list\n");
1471                 return -1;
1472         }
1473
1474         for (i = 0; i < map->nr; i++) {
1475                 int cpu = map->map[i];
1476
1477                 if (cpu >= MAX_NR_CPUS) {
1478                         pr_err("Requested CPU %d too large. "
1479                                "Consider raising MAX_NR_CPUS\n", cpu);
1480                         return -1;
1481                 }
1482
1483                 set_bit(cpu, cpu_bitmap);
1484         }
1485
1486         return 0;
1487 }
1488
1489 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1490                                 bool full)
1491 {
1492         struct stat st;
1493         int ret;
1494
1495         if (session == NULL || fp == NULL)
1496                 return;
1497
1498         ret = fstat(session->fd, &st);
1499         if (ret == -1)
1500                 return;
1501
1502         fprintf(fp, "# ========\n");
1503         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1504         perf_header__fprintf_info(session, fp, full);
1505         fprintf(fp, "# ========\n#\n");
1506 }
1507
1508
1509 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1510                                              const struct perf_evsel_str_handler *assocs,
1511                                              size_t nr_assocs)
1512 {
1513         struct perf_evlist *evlist = session->evlist;
1514         struct event_format *format;
1515         struct perf_evsel *evsel;
1516         char *tracepoint, *name;
1517         size_t i;
1518         int err;
1519
1520         for (i = 0; i < nr_assocs; i++) {
1521                 err = -ENOMEM;
1522                 tracepoint = strdup(assocs[i].name);
1523                 if (tracepoint == NULL)
1524                         goto out;
1525
1526                 err = -ENOENT;
1527                 name = strchr(tracepoint, ':');
1528                 if (name == NULL)
1529                         goto out_free;
1530
1531                 *name++ = '\0';
1532                 format = pevent_find_event_by_name(session->pevent,
1533                                                    tracepoint, name);
1534                 if (format == NULL) {
1535                         /*
1536                          * Adding a handler for an event not in the session,
1537                          * just ignore it.
1538                          */
1539                         goto next;
1540                 }
1541
1542                 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
1543                 if (evsel == NULL)
1544                         goto next;
1545
1546                 err = -EEXIST;
1547                 if (evsel->handler.func != NULL)
1548                         goto out_free;
1549                 evsel->handler.func = assocs[i].handler;
1550 next:
1551                 free(tracepoint);
1552         }
1553
1554         err = 0;
1555 out:
1556         return err;
1557
1558 out_free:
1559         free(tracepoint);
1560         goto out;
1561 }