Merge branches 'perf-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent...
[linux-3.10.git] / tools / perf / util / header.c
1 #define _FILE_OFFSET_BITS 64
2
3 #include "util.h"
4 #include <sys/types.h>
5 #include <byteswap.h>
6 #include <unistd.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <linux/list.h>
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <sys/utsname.h>
13
14 #include "evlist.h"
15 #include "evsel.h"
16 #include "header.h"
17 #include "../perf.h"
18 #include "trace-event.h"
19 #include "session.h"
20 #include "symbol.h"
21 #include "debug.h"
22 #include "cpumap.h"
23
24 static bool no_buildid_cache = false;
25
26 static int event_count;
27 static struct perf_trace_event_type *events;
28
29 static u32 header_argc;
30 static const char **header_argv;
31
32 int perf_header__push_event(u64 id, const char *name)
33 {
34         if (strlen(name) > MAX_EVENT_NAME)
35                 pr_warning("Event %s will be truncated\n", name);
36
37         if (!events) {
38                 events = malloc(sizeof(struct perf_trace_event_type));
39                 if (events == NULL)
40                         return -ENOMEM;
41         } else {
42                 struct perf_trace_event_type *nevents;
43
44                 nevents = realloc(events, (event_count + 1) * sizeof(*events));
45                 if (nevents == NULL)
46                         return -ENOMEM;
47                 events = nevents;
48         }
49         memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
50         events[event_count].event_id = id;
51         strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
52         event_count++;
53         return 0;
54 }
55
56 char *perf_header__find_event(u64 id)
57 {
58         int i;
59         for (i = 0 ; i < event_count; i++) {
60                 if (events[i].event_id == id)
61                         return events[i].name;
62         }
63         return NULL;
64 }
65
66 /*
67  * magic2 = "PERFILE2"
68  * must be a numerical value to let the endianness
69  * determine the memory layout. That way we are able
70  * to detect endianness when reading the perf.data file
71  * back.
72  *
73  * we check for legacy (PERFFILE) format.
74  */
75 static const char *__perf_magic1 = "PERFFILE";
76 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
77 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
78
79 #define PERF_MAGIC      __perf_magic2
80
81 struct perf_file_attr {
82         struct perf_event_attr  attr;
83         struct perf_file_section        ids;
84 };
85
86 void perf_header__set_feat(struct perf_header *header, int feat)
87 {
88         set_bit(feat, header->adds_features);
89 }
90
91 void perf_header__clear_feat(struct perf_header *header, int feat)
92 {
93         clear_bit(feat, header->adds_features);
94 }
95
96 bool perf_header__has_feat(const struct perf_header *header, int feat)
97 {
98         return test_bit(feat, header->adds_features);
99 }
100
101 static int do_write(int fd, const void *buf, size_t size)
102 {
103         while (size) {
104                 int ret = write(fd, buf, size);
105
106                 if (ret < 0)
107                         return -errno;
108
109                 size -= ret;
110                 buf += ret;
111         }
112
113         return 0;
114 }
115
116 #define NAME_ALIGN 64
117
118 static int write_padded(int fd, const void *bf, size_t count,
119                         size_t count_aligned)
120 {
121         static const char zero_buf[NAME_ALIGN];
122         int err = do_write(fd, bf, count);
123
124         if (!err)
125                 err = do_write(fd, zero_buf, count_aligned - count);
126
127         return err;
128 }
129
130 static int do_write_string(int fd, const char *str)
131 {
132         u32 len, olen;
133         int ret;
134
135         olen = strlen(str) + 1;
136         len = ALIGN(olen, NAME_ALIGN);
137
138         /* write len, incl. \0 */
139         ret = do_write(fd, &len, sizeof(len));
140         if (ret < 0)
141                 return ret;
142
143         return write_padded(fd, str, olen, len);
144 }
145
146 static char *do_read_string(int fd, struct perf_header *ph)
147 {
148         ssize_t sz, ret;
149         u32 len;
150         char *buf;
151
152         sz = read(fd, &len, sizeof(len));
153         if (sz < (ssize_t)sizeof(len))
154                 return NULL;
155
156         if (ph->needs_swap)
157                 len = bswap_32(len);
158
159         buf = malloc(len);
160         if (!buf)
161                 return NULL;
162
163         ret = read(fd, buf, len);
164         if (ret == (ssize_t)len) {
165                 /*
166                  * strings are padded by zeroes
167                  * thus the actual strlen of buf
168                  * may be less than len
169                  */
170                 return buf;
171         }
172
173         free(buf);
174         return NULL;
175 }
176
177 int
178 perf_header__set_cmdline(int argc, const char **argv)
179 {
180         int i;
181
182         header_argc = (u32)argc;
183
184         /* do not include NULL termination */
185         header_argv = calloc(argc, sizeof(char *));
186         if (!header_argv)
187                 return -ENOMEM;
188
189         /*
190          * must copy argv contents because it gets moved
191          * around during option parsing
192          */
193         for (i = 0; i < argc ; i++)
194                 header_argv[i] = argv[i];
195
196         return 0;
197 }
198
199 #define dsos__for_each_with_build_id(pos, head) \
200         list_for_each_entry(pos, head, node)    \
201                 if (!pos->has_build_id)         \
202                         continue;               \
203                 else
204
205 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
206                                 u16 misc, int fd)
207 {
208         struct dso *pos;
209
210         dsos__for_each_with_build_id(pos, head) {
211                 int err;
212                 struct build_id_event b;
213                 size_t len;
214
215                 if (!pos->hit)
216                         continue;
217                 len = pos->long_name_len + 1;
218                 len = ALIGN(len, NAME_ALIGN);
219                 memset(&b, 0, sizeof(b));
220                 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
221                 b.pid = pid;
222                 b.header.misc = misc;
223                 b.header.size = sizeof(b) + len;
224                 err = do_write(fd, &b, sizeof(b));
225                 if (err < 0)
226                         return err;
227                 err = write_padded(fd, pos->long_name,
228                                    pos->long_name_len + 1, len);
229                 if (err < 0)
230                         return err;
231         }
232
233         return 0;
234 }
235
236 static int machine__write_buildid_table(struct machine *machine, int fd)
237 {
238         int err;
239         u16 kmisc = PERF_RECORD_MISC_KERNEL,
240             umisc = PERF_RECORD_MISC_USER;
241
242         if (!machine__is_host(machine)) {
243                 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
244                 umisc = PERF_RECORD_MISC_GUEST_USER;
245         }
246
247         err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
248                                           kmisc, fd);
249         if (err == 0)
250                 err = __dsos__write_buildid_table(&machine->user_dsos,
251                                                   machine->pid, umisc, fd);
252         return err;
253 }
254
255 static int dsos__write_buildid_table(struct perf_header *header, int fd)
256 {
257         struct perf_session *session = container_of(header,
258                         struct perf_session, header);
259         struct rb_node *nd;
260         int err = machine__write_buildid_table(&session->host_machine, fd);
261
262         if (err)
263                 return err;
264
265         for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
266                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
267                 err = machine__write_buildid_table(pos, fd);
268                 if (err)
269                         break;
270         }
271         return err;
272 }
273
274 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
275                           const char *name, bool is_kallsyms)
276 {
277         const size_t size = PATH_MAX;
278         char *realname, *filename = zalloc(size),
279              *linkname = zalloc(size), *targetname;
280         int len, err = -1;
281
282         if (is_kallsyms) {
283                 if (symbol_conf.kptr_restrict) {
284                         pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
285                         return 0;
286                 }
287                 realname = (char *)name;
288         } else
289                 realname = realpath(name, NULL);
290
291         if (realname == NULL || filename == NULL || linkname == NULL)
292                 goto out_free;
293
294         len = scnprintf(filename, size, "%s%s%s",
295                        debugdir, is_kallsyms ? "/" : "", realname);
296         if (mkdir_p(filename, 0755))
297                 goto out_free;
298
299         snprintf(filename + len, size - len, "/%s", sbuild_id);
300
301         if (access(filename, F_OK)) {
302                 if (is_kallsyms) {
303                          if (copyfile("/proc/kallsyms", filename))
304                                 goto out_free;
305                 } else if (link(realname, filename) && copyfile(name, filename))
306                         goto out_free;
307         }
308
309         len = scnprintf(linkname, size, "%s/.build-id/%.2s",
310                        debugdir, sbuild_id);
311
312         if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
313                 goto out_free;
314
315         snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
316         targetname = filename + strlen(debugdir) - 5;
317         memcpy(targetname, "../..", 5);
318
319         if (symlink(targetname, linkname) == 0)
320                 err = 0;
321 out_free:
322         if (!is_kallsyms)
323                 free(realname);
324         free(filename);
325         free(linkname);
326         return err;
327 }
328
329 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
330                                  const char *name, const char *debugdir,
331                                  bool is_kallsyms)
332 {
333         char sbuild_id[BUILD_ID_SIZE * 2 + 1];
334
335         build_id__sprintf(build_id, build_id_size, sbuild_id);
336
337         return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
338 }
339
340 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
341 {
342         const size_t size = PATH_MAX;
343         char *filename = zalloc(size),
344              *linkname = zalloc(size);
345         int err = -1;
346
347         if (filename == NULL || linkname == NULL)
348                 goto out_free;
349
350         snprintf(linkname, size, "%s/.build-id/%.2s/%s",
351                  debugdir, sbuild_id, sbuild_id + 2);
352
353         if (access(linkname, F_OK))
354                 goto out_free;
355
356         if (readlink(linkname, filename, size - 1) < 0)
357                 goto out_free;
358
359         if (unlink(linkname))
360                 goto out_free;
361
362         /*
363          * Since the link is relative, we must make it absolute:
364          */
365         snprintf(linkname, size, "%s/.build-id/%.2s/%s",
366                  debugdir, sbuild_id, filename);
367
368         if (unlink(linkname))
369                 goto out_free;
370
371         err = 0;
372 out_free:
373         free(filename);
374         free(linkname);
375         return err;
376 }
377
378 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
379 {
380         bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
381
382         return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
383                                      dso->long_name, debugdir, is_kallsyms);
384 }
385
386 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
387 {
388         struct dso *pos;
389         int err = 0;
390
391         dsos__for_each_with_build_id(pos, head)
392                 if (dso__cache_build_id(pos, debugdir))
393                         err = -1;
394
395         return err;
396 }
397
398 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
399 {
400         int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
401         ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
402         return ret;
403 }
404
405 static int perf_session__cache_build_ids(struct perf_session *session)
406 {
407         struct rb_node *nd;
408         int ret;
409         char debugdir[PATH_MAX];
410
411         snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
412
413         if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
414                 return -1;
415
416         ret = machine__cache_build_ids(&session->host_machine, debugdir);
417
418         for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
419                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
420                 ret |= machine__cache_build_ids(pos, debugdir);
421         }
422         return ret ? -1 : 0;
423 }
424
425 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
426 {
427         bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
428         ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
429         return ret;
430 }
431
432 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
433 {
434         struct rb_node *nd;
435         bool ret = machine__read_build_ids(&session->host_machine, with_hits);
436
437         for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
438                 struct machine *pos = rb_entry(nd, struct machine, rb_node);
439                 ret |= machine__read_build_ids(pos, with_hits);
440         }
441
442         return ret;
443 }
444
445 static int write_trace_info(int fd, struct perf_header *h __used,
446                             struct perf_evlist *evlist)
447 {
448         return read_tracing_data(fd, &evlist->entries);
449 }
450
451
452 static int write_build_id(int fd, struct perf_header *h,
453                           struct perf_evlist *evlist __used)
454 {
455         struct perf_session *session;
456         int err;
457
458         session = container_of(h, struct perf_session, header);
459
460         if (!perf_session__read_build_ids(session, true))
461                 return -1;
462
463         err = dsos__write_buildid_table(h, fd);
464         if (err < 0) {
465                 pr_debug("failed to write buildid table\n");
466                 return err;
467         }
468         if (!no_buildid_cache)
469                 perf_session__cache_build_ids(session);
470
471         return 0;
472 }
473
474 static int write_hostname(int fd, struct perf_header *h __used,
475                           struct perf_evlist *evlist __used)
476 {
477         struct utsname uts;
478         int ret;
479
480         ret = uname(&uts);
481         if (ret < 0)
482                 return -1;
483
484         return do_write_string(fd, uts.nodename);
485 }
486
487 static int write_osrelease(int fd, struct perf_header *h __used,
488                            struct perf_evlist *evlist __used)
489 {
490         struct utsname uts;
491         int ret;
492
493         ret = uname(&uts);
494         if (ret < 0)
495                 return -1;
496
497         return do_write_string(fd, uts.release);
498 }
499
500 static int write_arch(int fd, struct perf_header *h __used,
501                       struct perf_evlist *evlist __used)
502 {
503         struct utsname uts;
504         int ret;
505
506         ret = uname(&uts);
507         if (ret < 0)
508                 return -1;
509
510         return do_write_string(fd, uts.machine);
511 }
512
513 static int write_version(int fd, struct perf_header *h __used,
514                          struct perf_evlist *evlist __used)
515 {
516         return do_write_string(fd, perf_version_string);
517 }
518
519 static int write_cpudesc(int fd, struct perf_header *h __used,
520                        struct perf_evlist *evlist __used)
521 {
522 #ifndef CPUINFO_PROC
523 #define CPUINFO_PROC NULL
524 #endif
525         FILE *file;
526         char *buf = NULL;
527         char *s, *p;
528         const char *search = CPUINFO_PROC;
529         size_t len = 0;
530         int ret = -1;
531
532         if (!search)
533                 return -1;
534
535         file = fopen("/proc/cpuinfo", "r");
536         if (!file)
537                 return -1;
538
539         while (getline(&buf, &len, file) > 0) {
540                 ret = strncmp(buf, search, strlen(search));
541                 if (!ret)
542                         break;
543         }
544
545         if (ret)
546                 goto done;
547
548         s = buf;
549
550         p = strchr(buf, ':');
551         if (p && *(p+1) == ' ' && *(p+2))
552                 s = p + 2;
553         p = strchr(s, '\n');
554         if (p)
555                 *p = '\0';
556
557         /* squash extra space characters (branding string) */
558         p = s;
559         while (*p) {
560                 if (isspace(*p)) {
561                         char *r = p + 1;
562                         char *q = r;
563                         *p = ' ';
564                         while (*q && isspace(*q))
565                                 q++;
566                         if (q != (p+1))
567                                 while ((*r++ = *q++));
568                 }
569                 p++;
570         }
571         ret = do_write_string(fd, s);
572 done:
573         free(buf);
574         fclose(file);
575         return ret;
576 }
577
578 static int write_nrcpus(int fd, struct perf_header *h __used,
579                         struct perf_evlist *evlist __used)
580 {
581         long nr;
582         u32 nrc, nra;
583         int ret;
584
585         nr = sysconf(_SC_NPROCESSORS_CONF);
586         if (nr < 0)
587                 return -1;
588
589         nrc = (u32)(nr & UINT_MAX);
590
591         nr = sysconf(_SC_NPROCESSORS_ONLN);
592         if (nr < 0)
593                 return -1;
594
595         nra = (u32)(nr & UINT_MAX);
596
597         ret = do_write(fd, &nrc, sizeof(nrc));
598         if (ret < 0)
599                 return ret;
600
601         return do_write(fd, &nra, sizeof(nra));
602 }
603
604 static int write_event_desc(int fd, struct perf_header *h __used,
605                             struct perf_evlist *evlist)
606 {
607         struct perf_evsel *attr;
608         u32 nre = 0, nri, sz;
609         int ret;
610
611         list_for_each_entry(attr, &evlist->entries, node)
612                 nre++;
613
614         /*
615          * write number of events
616          */
617         ret = do_write(fd, &nre, sizeof(nre));
618         if (ret < 0)
619                 return ret;
620
621         /*
622          * size of perf_event_attr struct
623          */
624         sz = (u32)sizeof(attr->attr);
625         ret = do_write(fd, &sz, sizeof(sz));
626         if (ret < 0)
627                 return ret;
628
629         list_for_each_entry(attr, &evlist->entries, node) {
630
631                 ret = do_write(fd, &attr->attr, sz);
632                 if (ret < 0)
633                         return ret;
634                 /*
635                  * write number of unique id per event
636                  * there is one id per instance of an event
637                  *
638                  * copy into an nri to be independent of the
639                  * type of ids,
640                  */
641                 nri = attr->ids;
642                 ret = do_write(fd, &nri, sizeof(nri));
643                 if (ret < 0)
644                         return ret;
645
646                 /*
647                  * write event string as passed on cmdline
648                  */
649                 ret = do_write_string(fd, event_name(attr));
650                 if (ret < 0)
651                         return ret;
652                 /*
653                  * write unique ids for this event
654                  */
655                 ret = do_write(fd, attr->id, attr->ids * sizeof(u64));
656                 if (ret < 0)
657                         return ret;
658         }
659         return 0;
660 }
661
662 static int write_cmdline(int fd, struct perf_header *h __used,
663                          struct perf_evlist *evlist __used)
664 {
665         char buf[MAXPATHLEN];
666         char proc[32];
667         u32 i, n;
668         int ret;
669
670         /*
671          * actual atual path to perf binary
672          */
673         sprintf(proc, "/proc/%d/exe", getpid());
674         ret = readlink(proc, buf, sizeof(buf));
675         if (ret <= 0)
676                 return -1;
677
678         /* readlink() does not add null termination */
679         buf[ret] = '\0';
680
681         /* account for binary path */
682         n = header_argc + 1;
683
684         ret = do_write(fd, &n, sizeof(n));
685         if (ret < 0)
686                 return ret;
687
688         ret = do_write_string(fd, buf);
689         if (ret < 0)
690                 return ret;
691
692         for (i = 0 ; i < header_argc; i++) {
693                 ret = do_write_string(fd, header_argv[i]);
694                 if (ret < 0)
695                         return ret;
696         }
697         return 0;
698 }
699
700 #define CORE_SIB_FMT \
701         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
702 #define THRD_SIB_FMT \
703         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
704
705 struct cpu_topo {
706         u32 core_sib;
707         u32 thread_sib;
708         char **core_siblings;
709         char **thread_siblings;
710 };
711
712 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
713 {
714         FILE *fp;
715         char filename[MAXPATHLEN];
716         char *buf = NULL, *p;
717         size_t len = 0;
718         u32 i = 0;
719         int ret = -1;
720
721         sprintf(filename, CORE_SIB_FMT, cpu);
722         fp = fopen(filename, "r");
723         if (!fp)
724                 return -1;
725
726         if (getline(&buf, &len, fp) <= 0)
727                 goto done;
728
729         fclose(fp);
730
731         p = strchr(buf, '\n');
732         if (p)
733                 *p = '\0';
734
735         for (i = 0; i < tp->core_sib; i++) {
736                 if (!strcmp(buf, tp->core_siblings[i]))
737                         break;
738         }
739         if (i == tp->core_sib) {
740                 tp->core_siblings[i] = buf;
741                 tp->core_sib++;
742                 buf = NULL;
743                 len = 0;
744         }
745
746         sprintf(filename, THRD_SIB_FMT, cpu);
747         fp = fopen(filename, "r");
748         if (!fp)
749                 goto done;
750
751         if (getline(&buf, &len, fp) <= 0)
752                 goto done;
753
754         p = strchr(buf, '\n');
755         if (p)
756                 *p = '\0';
757
758         for (i = 0; i < tp->thread_sib; i++) {
759                 if (!strcmp(buf, tp->thread_siblings[i]))
760                         break;
761         }
762         if (i == tp->thread_sib) {
763                 tp->thread_siblings[i] = buf;
764                 tp->thread_sib++;
765                 buf = NULL;
766         }
767         ret = 0;
768 done:
769         if(fp)
770                 fclose(fp);
771         free(buf);
772         return ret;
773 }
774
775 static void free_cpu_topo(struct cpu_topo *tp)
776 {
777         u32 i;
778
779         if (!tp)
780                 return;
781
782         for (i = 0 ; i < tp->core_sib; i++)
783                 free(tp->core_siblings[i]);
784
785         for (i = 0 ; i < tp->thread_sib; i++)
786                 free(tp->thread_siblings[i]);
787
788         free(tp);
789 }
790
791 static struct cpu_topo *build_cpu_topology(void)
792 {
793         struct cpu_topo *tp;
794         void *addr;
795         u32 nr, i;
796         size_t sz;
797         long ncpus;
798         int ret = -1;
799
800         ncpus = sysconf(_SC_NPROCESSORS_CONF);
801         if (ncpus < 0)
802                 return NULL;
803
804         nr = (u32)(ncpus & UINT_MAX);
805
806         sz = nr * sizeof(char *);
807
808         addr = calloc(1, sizeof(*tp) + 2 * sz);
809         if (!addr)
810                 return NULL;
811
812         tp = addr;
813
814         addr += sizeof(*tp);
815         tp->core_siblings = addr;
816         addr += sz;
817         tp->thread_siblings = addr;
818
819         for (i = 0; i < nr; i++) {
820                 ret = build_cpu_topo(tp, i);
821                 if (ret < 0)
822                         break;
823         }
824         if (ret) {
825                 free_cpu_topo(tp);
826                 tp = NULL;
827         }
828         return tp;
829 }
830
831 static int write_cpu_topology(int fd, struct perf_header *h __used,
832                           struct perf_evlist *evlist __used)
833 {
834         struct cpu_topo *tp;
835         u32 i;
836         int ret;
837
838         tp = build_cpu_topology();
839         if (!tp)
840                 return -1;
841
842         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
843         if (ret < 0)
844                 goto done;
845
846         for (i = 0; i < tp->core_sib; i++) {
847                 ret = do_write_string(fd, tp->core_siblings[i]);
848                 if (ret < 0)
849                         goto done;
850         }
851         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
852         if (ret < 0)
853                 goto done;
854
855         for (i = 0; i < tp->thread_sib; i++) {
856                 ret = do_write_string(fd, tp->thread_siblings[i]);
857                 if (ret < 0)
858                         break;
859         }
860 done:
861         free_cpu_topo(tp);
862         return ret;
863 }
864
865
866
867 static int write_total_mem(int fd, struct perf_header *h __used,
868                           struct perf_evlist *evlist __used)
869 {
870         char *buf = NULL;
871         FILE *fp;
872         size_t len = 0;
873         int ret = -1, n;
874         uint64_t mem;
875
876         fp = fopen("/proc/meminfo", "r");
877         if (!fp)
878                 return -1;
879
880         while (getline(&buf, &len, fp) > 0) {
881                 ret = strncmp(buf, "MemTotal:", 9);
882                 if (!ret)
883                         break;
884         }
885         if (!ret) {
886                 n = sscanf(buf, "%*s %"PRIu64, &mem);
887                 if (n == 1)
888                         ret = do_write(fd, &mem, sizeof(mem));
889         }
890         free(buf);
891         fclose(fp);
892         return ret;
893 }
894
895 static int write_topo_node(int fd, int node)
896 {
897         char str[MAXPATHLEN];
898         char field[32];
899         char *buf = NULL, *p;
900         size_t len = 0;
901         FILE *fp;
902         u64 mem_total, mem_free, mem;
903         int ret = -1;
904
905         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
906         fp = fopen(str, "r");
907         if (!fp)
908                 return -1;
909
910         while (getline(&buf, &len, fp) > 0) {
911                 /* skip over invalid lines */
912                 if (!strchr(buf, ':'))
913                         continue;
914                 if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2)
915                         goto done;
916                 if (!strcmp(field, "MemTotal:"))
917                         mem_total = mem;
918                 if (!strcmp(field, "MemFree:"))
919                         mem_free = mem;
920         }
921
922         fclose(fp);
923
924         ret = do_write(fd, &mem_total, sizeof(u64));
925         if (ret)
926                 goto done;
927
928         ret = do_write(fd, &mem_free, sizeof(u64));
929         if (ret)
930                 goto done;
931
932         ret = -1;
933         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
934
935         fp = fopen(str, "r");
936         if (!fp)
937                 goto done;
938
939         if (getline(&buf, &len, fp) <= 0)
940                 goto done;
941
942         p = strchr(buf, '\n');
943         if (p)
944                 *p = '\0';
945
946         ret = do_write_string(fd, buf);
947 done:
948         free(buf);
949         fclose(fp);
950         return ret;
951 }
952
953 static int write_numa_topology(int fd, struct perf_header *h __used,
954                           struct perf_evlist *evlist __used)
955 {
956         char *buf = NULL;
957         size_t len = 0;
958         FILE *fp;
959         struct cpu_map *node_map = NULL;
960         char *c;
961         u32 nr, i, j;
962         int ret = -1;
963
964         fp = fopen("/sys/devices/system/node/online", "r");
965         if (!fp)
966                 return -1;
967
968         if (getline(&buf, &len, fp) <= 0)
969                 goto done;
970
971         c = strchr(buf, '\n');
972         if (c)
973                 *c = '\0';
974
975         node_map = cpu_map__new(buf);
976         if (!node_map)
977                 goto done;
978
979         nr = (u32)node_map->nr;
980
981         ret = do_write(fd, &nr, sizeof(nr));
982         if (ret < 0)
983                 goto done;
984
985         for (i = 0; i < nr; i++) {
986                 j = (u32)node_map->map[i];
987                 ret = do_write(fd, &j, sizeof(j));
988                 if (ret < 0)
989                         break;
990
991                 ret = write_topo_node(fd, i);
992                 if (ret < 0)
993                         break;
994         }
995 done:
996         free(buf);
997         fclose(fp);
998         free(node_map);
999         return ret;
1000 }
1001
1002 /*
1003  * default get_cpuid(): nothing gets recorded
1004  * actual implementation must be in arch/$(ARCH)/util/header.c
1005  */
1006 int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used)
1007 {
1008         return -1;
1009 }
1010
1011 static int write_cpuid(int fd, struct perf_header *h __used,
1012                        struct perf_evlist *evlist __used)
1013 {
1014         char buffer[64];
1015         int ret;
1016
1017         ret = get_cpuid(buffer, sizeof(buffer));
1018         if (!ret)
1019                 goto write_it;
1020
1021         return -1;
1022 write_it:
1023         return do_write_string(fd, buffer);
1024 }
1025
1026 static int write_branch_stack(int fd __used, struct perf_header *h __used,
1027                        struct perf_evlist *evlist __used)
1028 {
1029         return 0;
1030 }
1031
1032 static void print_hostname(struct perf_header *ph, int fd, FILE *fp)
1033 {
1034         char *str = do_read_string(fd, ph);
1035         fprintf(fp, "# hostname : %s\n", str);
1036         free(str);
1037 }
1038
1039 static void print_osrelease(struct perf_header *ph, int fd, FILE *fp)
1040 {
1041         char *str = do_read_string(fd, ph);
1042         fprintf(fp, "# os release : %s\n", str);
1043         free(str);
1044 }
1045
1046 static void print_arch(struct perf_header *ph, int fd, FILE *fp)
1047 {
1048         char *str = do_read_string(fd, ph);
1049         fprintf(fp, "# arch : %s\n", str);
1050         free(str);
1051 }
1052
1053 static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
1054 {
1055         char *str = do_read_string(fd, ph);
1056         fprintf(fp, "# cpudesc : %s\n", str);
1057         free(str);
1058 }
1059
1060 static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
1061 {
1062         ssize_t ret;
1063         u32 nr;
1064
1065         ret = read(fd, &nr, sizeof(nr));
1066         if (ret != (ssize_t)sizeof(nr))
1067                 nr = -1; /* interpreted as error */
1068
1069         if (ph->needs_swap)
1070                 nr = bswap_32(nr);
1071
1072         fprintf(fp, "# nrcpus online : %u\n", nr);
1073
1074         ret = read(fd, &nr, sizeof(nr));
1075         if (ret != (ssize_t)sizeof(nr))
1076                 nr = -1; /* interpreted as error */
1077
1078         if (ph->needs_swap)
1079                 nr = bswap_32(nr);
1080
1081         fprintf(fp, "# nrcpus avail : %u\n", nr);
1082 }
1083
1084 static void print_version(struct perf_header *ph, int fd, FILE *fp)
1085 {
1086         char *str = do_read_string(fd, ph);
1087         fprintf(fp, "# perf version : %s\n", str);
1088         free(str);
1089 }
1090
1091 static void print_cmdline(struct perf_header *ph, int fd, FILE *fp)
1092 {
1093         ssize_t ret;
1094         char *str;
1095         u32 nr, i;
1096
1097         ret = read(fd, &nr, sizeof(nr));
1098         if (ret != (ssize_t)sizeof(nr))
1099                 return;
1100
1101         if (ph->needs_swap)
1102                 nr = bswap_32(nr);
1103
1104         fprintf(fp, "# cmdline : ");
1105
1106         for (i = 0; i < nr; i++) {
1107                 str = do_read_string(fd, ph);
1108                 fprintf(fp, "%s ", str);
1109                 free(str);
1110         }
1111         fputc('\n', fp);
1112 }
1113
1114 static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp)
1115 {
1116         ssize_t ret;
1117         u32 nr, i;
1118         char *str;
1119
1120         ret = read(fd, &nr, sizeof(nr));
1121         if (ret != (ssize_t)sizeof(nr))
1122                 return;
1123
1124         if (ph->needs_swap)
1125                 nr = bswap_32(nr);
1126
1127         for (i = 0; i < nr; i++) {
1128                 str = do_read_string(fd, ph);
1129                 fprintf(fp, "# sibling cores   : %s\n", str);
1130                 free(str);
1131         }
1132
1133         ret = read(fd, &nr, sizeof(nr));
1134         if (ret != (ssize_t)sizeof(nr))
1135                 return;
1136
1137         if (ph->needs_swap)
1138                 nr = bswap_32(nr);
1139
1140         for (i = 0; i < nr; i++) {
1141                 str = do_read_string(fd, ph);
1142                 fprintf(fp, "# sibling threads : %s\n", str);
1143                 free(str);
1144         }
1145 }
1146
1147 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1148 {
1149         struct perf_event_attr attr;
1150         uint64_t id;
1151         void *buf = NULL;
1152         char *str;
1153         u32 nre, sz, nr, i, j;
1154         ssize_t ret;
1155         size_t msz;
1156
1157         /* number of events */
1158         ret = read(fd, &nre, sizeof(nre));
1159         if (ret != (ssize_t)sizeof(nre))
1160                 goto error;
1161
1162         if (ph->needs_swap)
1163                 nre = bswap_32(nre);
1164
1165         ret = read(fd, &sz, sizeof(sz));
1166         if (ret != (ssize_t)sizeof(sz))
1167                 goto error;
1168
1169         if (ph->needs_swap)
1170                 sz = bswap_32(sz);
1171
1172         memset(&attr, 0, sizeof(attr));
1173
1174         /* buffer to hold on file attr struct */
1175         buf = malloc(sz);
1176         if (!buf)
1177                 goto error;
1178
1179         msz = sizeof(attr);
1180         if (sz < msz)
1181                 msz = sz;
1182
1183         for (i = 0 ; i < nre; i++) {
1184
1185                 /*
1186                  * must read entire on-file attr struct to
1187                  * sync up with layout.
1188                  */
1189                 ret = read(fd, buf, sz);
1190                 if (ret != (ssize_t)sz)
1191                         goto error;
1192
1193                 if (ph->needs_swap)
1194                         perf_event__attr_swap(buf);
1195
1196                 memcpy(&attr, buf, msz);
1197
1198                 ret = read(fd, &nr, sizeof(nr));
1199                 if (ret != (ssize_t)sizeof(nr))
1200                         goto error;
1201
1202                 if (ph->needs_swap)
1203                         nr = bswap_32(nr);
1204
1205                 str = do_read_string(fd, ph);
1206                 fprintf(fp, "# event : name = %s, ", str);
1207                 free(str);
1208
1209                 fprintf(fp, "type = %d, config = 0x%"PRIx64
1210                             ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64,
1211                                 attr.type,
1212                                 (u64)attr.config,
1213                                 (u64)attr.config1,
1214                                 (u64)attr.config2);
1215
1216                 fprintf(fp, ", excl_usr = %d, excl_kern = %d",
1217                                 attr.exclude_user,
1218                                 attr.exclude_kernel);
1219
1220                 if (nr)
1221                         fprintf(fp, ", id = {");
1222
1223                 for (j = 0 ; j < nr; j++) {
1224                         ret = read(fd, &id, sizeof(id));
1225                         if (ret != (ssize_t)sizeof(id))
1226                                 goto error;
1227
1228                         if (ph->needs_swap)
1229                                 id = bswap_64(id);
1230
1231                         if (j)
1232                                 fputc(',', fp);
1233
1234                         fprintf(fp, " %"PRIu64, id);
1235                 }
1236                 if (nr && j == nr)
1237                         fprintf(fp, " }");
1238                 fputc('\n', fp);
1239         }
1240         free(buf);
1241         return;
1242 error:
1243         fprintf(fp, "# event desc: not available or unable to read\n");
1244 }
1245
1246 static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp)
1247 {
1248         uint64_t mem;
1249         ssize_t ret;
1250
1251         ret = read(fd, &mem, sizeof(mem));
1252         if (ret != sizeof(mem))
1253                 goto error;
1254
1255         if (h->needs_swap)
1256                 mem = bswap_64(mem);
1257
1258         fprintf(fp, "# total memory : %"PRIu64" kB\n", mem);
1259         return;
1260 error:
1261         fprintf(fp, "# total memory : unknown\n");
1262 }
1263
1264 static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp)
1265 {
1266         ssize_t ret;
1267         u32 nr, c, i;
1268         char *str;
1269         uint64_t mem_total, mem_free;
1270
1271         /* nr nodes */
1272         ret = read(fd, &nr, sizeof(nr));
1273         if (ret != (ssize_t)sizeof(nr))
1274                 goto error;
1275
1276         if (h->needs_swap)
1277                 nr = bswap_32(nr);
1278
1279         for (i = 0; i < nr; i++) {
1280
1281                 /* node number */
1282                 ret = read(fd, &c, sizeof(c));
1283                 if (ret != (ssize_t)sizeof(c))
1284                         goto error;
1285
1286                 if (h->needs_swap)
1287                         c = bswap_32(c);
1288
1289                 ret = read(fd, &mem_total, sizeof(u64));
1290                 if (ret != sizeof(u64))
1291                         goto error;
1292
1293                 ret = read(fd, &mem_free, sizeof(u64));
1294                 if (ret != sizeof(u64))
1295                         goto error;
1296
1297                 if (h->needs_swap) {
1298                         mem_total = bswap_64(mem_total);
1299                         mem_free = bswap_64(mem_free);
1300                 }
1301
1302                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1303                             " free = %"PRIu64" kB\n",
1304                         c,
1305                         mem_total,
1306                         mem_free);
1307
1308                 str = do_read_string(fd, h);
1309                 fprintf(fp, "# node%u cpu list : %s\n", c, str);
1310                 free(str);
1311         }
1312         return;
1313 error:
1314         fprintf(fp, "# numa topology : not available\n");
1315 }
1316
1317 static void print_cpuid(struct perf_header *ph, int fd, FILE *fp)
1318 {
1319         char *str = do_read_string(fd, ph);
1320         fprintf(fp, "# cpuid : %s\n", str);
1321         free(str);
1322 }
1323
1324 static void print_branch_stack(struct perf_header *ph __used, int fd __used,
1325                                FILE *fp)
1326 {
1327         fprintf(fp, "# contains samples with branch stack\n");
1328 }
1329
1330 static int __event_process_build_id(struct build_id_event *bev,
1331                                     char *filename,
1332                                     struct perf_session *session)
1333 {
1334         int err = -1;
1335         struct list_head *head;
1336         struct machine *machine;
1337         u16 misc;
1338         struct dso *dso;
1339         enum dso_kernel_type dso_type;
1340
1341         machine = perf_session__findnew_machine(session, bev->pid);
1342         if (!machine)
1343                 goto out;
1344
1345         misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1346
1347         switch (misc) {
1348         case PERF_RECORD_MISC_KERNEL:
1349                 dso_type = DSO_TYPE_KERNEL;
1350                 head = &machine->kernel_dsos;
1351                 break;
1352         case PERF_RECORD_MISC_GUEST_KERNEL:
1353                 dso_type = DSO_TYPE_GUEST_KERNEL;
1354                 head = &machine->kernel_dsos;
1355                 break;
1356         case PERF_RECORD_MISC_USER:
1357         case PERF_RECORD_MISC_GUEST_USER:
1358                 dso_type = DSO_TYPE_USER;
1359                 head = &machine->user_dsos;
1360                 break;
1361         default:
1362                 goto out;
1363         }
1364
1365         dso = __dsos__findnew(head, filename);
1366         if (dso != NULL) {
1367                 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
1368
1369                 dso__set_build_id(dso, &bev->build_id);
1370
1371                 if (filename[0] == '[')
1372                         dso->kernel = dso_type;
1373
1374                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1375                                   sbuild_id);
1376                 pr_debug("build id event received for %s: %s\n",
1377                          dso->long_name, sbuild_id);
1378         }
1379
1380         err = 0;
1381 out:
1382         return err;
1383 }
1384
1385 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1386                                                  int input, u64 offset, u64 size)
1387 {
1388         struct perf_session *session = container_of(header, struct perf_session, header);
1389         struct {
1390                 struct perf_event_header   header;
1391                 u8                         build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1392                 char                       filename[0];
1393         } old_bev;
1394         struct build_id_event bev;
1395         char filename[PATH_MAX];
1396         u64 limit = offset + size;
1397
1398         while (offset < limit) {
1399                 ssize_t len;
1400
1401                 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1402                         return -1;
1403
1404                 if (header->needs_swap)
1405                         perf_event_header__bswap(&old_bev.header);
1406
1407                 len = old_bev.header.size - sizeof(old_bev);
1408                 if (read(input, filename, len) != len)
1409                         return -1;
1410
1411                 bev.header = old_bev.header;
1412
1413                 /*
1414                  * As the pid is the missing value, we need to fill
1415                  * it properly. The header.misc value give us nice hint.
1416                  */
1417                 bev.pid = HOST_KERNEL_ID;
1418                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1419                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1420                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1421
1422                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1423                 __event_process_build_id(&bev, filename, session);
1424
1425                 offset += bev.header.size;
1426         }
1427
1428         return 0;
1429 }
1430
1431 static int perf_header__read_build_ids(struct perf_header *header,
1432                                        int input, u64 offset, u64 size)
1433 {
1434         struct perf_session *session = container_of(header, struct perf_session, header);
1435         struct build_id_event bev;
1436         char filename[PATH_MAX];
1437         u64 limit = offset + size, orig_offset = offset;
1438         int err = -1;
1439
1440         while (offset < limit) {
1441                 ssize_t len;
1442
1443                 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
1444                         goto out;
1445
1446                 if (header->needs_swap)
1447                         perf_event_header__bswap(&bev.header);
1448
1449                 len = bev.header.size - sizeof(bev);
1450                 if (read(input, filename, len) != len)
1451                         goto out;
1452                 /*
1453                  * The a1645ce1 changeset:
1454                  *
1455                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1456                  *
1457                  * Added a field to struct build_id_event that broke the file
1458                  * format.
1459                  *
1460                  * Since the kernel build-id is the first entry, process the
1461                  * table using the old format if the well known
1462                  * '[kernel.kallsyms]' string for the kernel build-id has the
1463                  * first 4 characters chopped off (where the pid_t sits).
1464                  */
1465                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1466                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1467                                 return -1;
1468                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1469                 }
1470
1471                 __event_process_build_id(&bev, filename, session);
1472
1473                 offset += bev.header.size;
1474         }
1475         err = 0;
1476 out:
1477         return err;
1478 }
1479
1480 static int process_trace_info(struct perf_file_section *section __unused,
1481                               struct perf_header *ph __unused,
1482                               int feat __unused, int fd)
1483 {
1484         trace_report(fd, false);
1485         return 0;
1486 }
1487
1488 static int process_build_id(struct perf_file_section *section,
1489                             struct perf_header *ph,
1490                             int feat __unused, int fd)
1491 {
1492         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1493                 pr_debug("Failed to read buildids, continuing...\n");
1494         return 0;
1495 }
1496
1497 struct feature_ops {
1498         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
1499         void (*print)(struct perf_header *h, int fd, FILE *fp);
1500         int (*process)(struct perf_file_section *section,
1501                        struct perf_header *h, int feat, int fd);
1502         const char *name;
1503         bool full_only;
1504 };
1505
1506 #define FEAT_OPA(n, func) \
1507         [n] = { .name = #n, .write = write_##func, .print = print_##func }
1508 #define FEAT_OPP(n, func) \
1509         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1510                 .process = process_##func }
1511 #define FEAT_OPF(n, func) \
1512         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
1513                 .full_only = true }
1514
1515 /* feature_ops not implemented: */
1516 #define print_trace_info                NULL
1517 #define print_build_id                  NULL
1518
1519 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
1520         FEAT_OPP(HEADER_TRACE_INFO,     trace_info),
1521         FEAT_OPP(HEADER_BUILD_ID,       build_id),
1522         FEAT_OPA(HEADER_HOSTNAME,       hostname),
1523         FEAT_OPA(HEADER_OSRELEASE,      osrelease),
1524         FEAT_OPA(HEADER_VERSION,        version),
1525         FEAT_OPA(HEADER_ARCH,           arch),
1526         FEAT_OPA(HEADER_NRCPUS,         nrcpus),
1527         FEAT_OPA(HEADER_CPUDESC,        cpudesc),
1528         FEAT_OPA(HEADER_CPUID,          cpuid),
1529         FEAT_OPA(HEADER_TOTAL_MEM,      total_mem),
1530         FEAT_OPA(HEADER_EVENT_DESC,     event_desc),
1531         FEAT_OPA(HEADER_CMDLINE,        cmdline),
1532         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
1533         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
1534         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
1535 };
1536
1537 struct header_print_data {
1538         FILE *fp;
1539         bool full; /* extended list of headers */
1540 };
1541
1542 static int perf_file_section__fprintf_info(struct perf_file_section *section,
1543                                            struct perf_header *ph,
1544                                            int feat, int fd, void *data)
1545 {
1546         struct header_print_data *hd = data;
1547
1548         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1549                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1550                                 "%d, continuing...\n", section->offset, feat);
1551                 return 0;
1552         }
1553         if (feat >= HEADER_LAST_FEATURE) {
1554                 pr_warning("unknown feature %d\n", feat);
1555                 return 0;
1556         }
1557         if (!feat_ops[feat].print)
1558                 return 0;
1559
1560         if (!feat_ops[feat].full_only || hd->full)
1561                 feat_ops[feat].print(ph, fd, hd->fp);
1562         else
1563                 fprintf(hd->fp, "# %s info available, use -I to display\n",
1564                         feat_ops[feat].name);
1565
1566         return 0;
1567 }
1568
1569 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
1570 {
1571         struct header_print_data hd;
1572         struct perf_header *header = &session->header;
1573         int fd = session->fd;
1574         hd.fp = fp;
1575         hd.full = full;
1576
1577         perf_header__process_sections(header, fd, &hd,
1578                                       perf_file_section__fprintf_info);
1579         return 0;
1580 }
1581
1582 static int do_write_feat(int fd, struct perf_header *h, int type,
1583                          struct perf_file_section **p,
1584                          struct perf_evlist *evlist)
1585 {
1586         int err;
1587         int ret = 0;
1588
1589         if (perf_header__has_feat(h, type)) {
1590                 if (!feat_ops[type].write)
1591                         return -1;
1592
1593                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
1594
1595                 err = feat_ops[type].write(fd, h, evlist);
1596                 if (err < 0) {
1597                         pr_debug("failed to write feature %d\n", type);
1598
1599                         /* undo anything written */
1600                         lseek(fd, (*p)->offset, SEEK_SET);
1601
1602                         return -1;
1603                 }
1604                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
1605                 (*p)++;
1606         }
1607         return ret;
1608 }
1609
1610 static int perf_header__adds_write(struct perf_header *header,
1611                                    struct perf_evlist *evlist, int fd)
1612 {
1613         int nr_sections;
1614         struct perf_file_section *feat_sec, *p;
1615         int sec_size;
1616         u64 sec_start;
1617         int feat;
1618         int err;
1619
1620         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
1621         if (!nr_sections)
1622                 return 0;
1623
1624         feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
1625         if (feat_sec == NULL)
1626                 return -ENOMEM;
1627
1628         sec_size = sizeof(*feat_sec) * nr_sections;
1629
1630         sec_start = header->data_offset + header->data_size;
1631         lseek(fd, sec_start + sec_size, SEEK_SET);
1632
1633         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
1634                 if (do_write_feat(fd, header, feat, &p, evlist))
1635                         perf_header__clear_feat(header, feat);
1636         }
1637
1638         lseek(fd, sec_start, SEEK_SET);
1639         /*
1640          * may write more than needed due to dropped feature, but
1641          * this is okay, reader will skip the mising entries
1642          */
1643         err = do_write(fd, feat_sec, sec_size);
1644         if (err < 0)
1645                 pr_debug("failed to write feature section\n");
1646         free(feat_sec);
1647         return err;
1648 }
1649
1650 int perf_header__write_pipe(int fd)
1651 {
1652         struct perf_pipe_file_header f_header;
1653         int err;
1654
1655         f_header = (struct perf_pipe_file_header){
1656                 .magic     = PERF_MAGIC,
1657                 .size      = sizeof(f_header),
1658         };
1659
1660         err = do_write(fd, &f_header, sizeof(f_header));
1661         if (err < 0) {
1662                 pr_debug("failed to write perf pipe header\n");
1663                 return err;
1664         }
1665
1666         return 0;
1667 }
1668
1669 int perf_session__write_header(struct perf_session *session,
1670                                struct perf_evlist *evlist,
1671                                int fd, bool at_exit)
1672 {
1673         struct perf_file_header f_header;
1674         struct perf_file_attr   f_attr;
1675         struct perf_header *header = &session->header;
1676         struct perf_evsel *attr, *pair = NULL;
1677         int err;
1678
1679         lseek(fd, sizeof(f_header), SEEK_SET);
1680
1681         if (session->evlist != evlist)
1682                 pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
1683
1684         list_for_each_entry(attr, &evlist->entries, node) {
1685                 attr->id_offset = lseek(fd, 0, SEEK_CUR);
1686                 err = do_write(fd, attr->id, attr->ids * sizeof(u64));
1687                 if (err < 0) {
1688 out_err_write:
1689                         pr_debug("failed to write perf header\n");
1690                         return err;
1691                 }
1692                 if (session->evlist != evlist) {
1693                         err = do_write(fd, pair->id, pair->ids * sizeof(u64));
1694                         if (err < 0)
1695                                 goto out_err_write;
1696                         attr->ids += pair->ids;
1697                         pair = list_entry(pair->node.next, struct perf_evsel, node);
1698                 }
1699         }
1700
1701         header->attr_offset = lseek(fd, 0, SEEK_CUR);
1702
1703         list_for_each_entry(attr, &evlist->entries, node) {
1704                 f_attr = (struct perf_file_attr){
1705                         .attr = attr->attr,
1706                         .ids  = {
1707                                 .offset = attr->id_offset,
1708                                 .size   = attr->ids * sizeof(u64),
1709                         }
1710                 };
1711                 err = do_write(fd, &f_attr, sizeof(f_attr));
1712                 if (err < 0) {
1713                         pr_debug("failed to write perf header attribute\n");
1714                         return err;
1715                 }
1716         }
1717
1718         header->event_offset = lseek(fd, 0, SEEK_CUR);
1719         header->event_size = event_count * sizeof(struct perf_trace_event_type);
1720         if (events) {
1721                 err = do_write(fd, events, header->event_size);
1722                 if (err < 0) {
1723                         pr_debug("failed to write perf header events\n");
1724                         return err;
1725                 }
1726         }
1727
1728         header->data_offset = lseek(fd, 0, SEEK_CUR);
1729
1730         if (at_exit) {
1731                 err = perf_header__adds_write(header, evlist, fd);
1732                 if (err < 0)
1733                         return err;
1734         }
1735
1736         f_header = (struct perf_file_header){
1737                 .magic     = PERF_MAGIC,
1738                 .size      = sizeof(f_header),
1739                 .attr_size = sizeof(f_attr),
1740                 .attrs = {
1741                         .offset = header->attr_offset,
1742                         .size   = evlist->nr_entries * sizeof(f_attr),
1743                 },
1744                 .data = {
1745                         .offset = header->data_offset,
1746                         .size   = header->data_size,
1747                 },
1748                 .event_types = {
1749                         .offset = header->event_offset,
1750                         .size   = header->event_size,
1751                 },
1752         };
1753
1754         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
1755
1756         lseek(fd, 0, SEEK_SET);
1757         err = do_write(fd, &f_header, sizeof(f_header));
1758         if (err < 0) {
1759                 pr_debug("failed to write perf header\n");
1760                 return err;
1761         }
1762         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
1763
1764         header->frozen = 1;
1765         return 0;
1766 }
1767
1768 static int perf_header__getbuffer64(struct perf_header *header,
1769                                     int fd, void *buf, size_t size)
1770 {
1771         if (readn(fd, buf, size) <= 0)
1772                 return -1;
1773
1774         if (header->needs_swap)
1775                 mem_bswap_64(buf, size);
1776
1777         return 0;
1778 }
1779
1780 int perf_header__process_sections(struct perf_header *header, int fd,
1781                                   void *data,
1782                                   int (*process)(struct perf_file_section *section,
1783                                                  struct perf_header *ph,
1784                                                  int feat, int fd, void *data))
1785 {
1786         struct perf_file_section *feat_sec, *sec;
1787         int nr_sections;
1788         int sec_size;
1789         int feat;
1790         int err;
1791
1792         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
1793         if (!nr_sections)
1794                 return 0;
1795
1796         feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
1797         if (!feat_sec)
1798                 return -1;
1799
1800         sec_size = sizeof(*feat_sec) * nr_sections;
1801
1802         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
1803
1804         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
1805         if (err < 0)
1806                 goto out_free;
1807
1808         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
1809                 err = process(sec++, header, feat, fd, data);
1810                 if (err < 0)
1811                         goto out_free;
1812         }
1813         err = 0;
1814 out_free:
1815         free(feat_sec);
1816         return err;
1817 }
1818
1819 static const int attr_file_abi_sizes[] = {
1820         [0] = PERF_ATTR_SIZE_VER0,
1821         [1] = PERF_ATTR_SIZE_VER1,
1822         0,
1823 };
1824
1825 /*
1826  * In the legacy file format, the magic number is not used to encode endianness.
1827  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
1828  * on ABI revisions, we need to try all combinations for all endianness to
1829  * detect the endianness.
1830  */
1831 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
1832 {
1833         uint64_t ref_size, attr_size;
1834         int i;
1835
1836         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
1837                 ref_size = attr_file_abi_sizes[i]
1838                          + sizeof(struct perf_file_section);
1839                 if (hdr_sz != ref_size) {
1840                         attr_size = bswap_64(hdr_sz);
1841                         if (attr_size != ref_size)
1842                                 continue;
1843
1844                         ph->needs_swap = true;
1845                 }
1846                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
1847                          i,
1848                          ph->needs_swap);
1849                 return 0;
1850         }
1851         /* could not determine endianness */
1852         return -1;
1853 }
1854
1855 #define PERF_PIPE_HDR_VER0      16
1856
1857 static const size_t attr_pipe_abi_sizes[] = {
1858         [0] = PERF_PIPE_HDR_VER0,
1859         0,
1860 };
1861
1862 /*
1863  * In the legacy pipe format, there is an implicit assumption that endiannesss
1864  * between host recording the samples, and host parsing the samples is the
1865  * same. This is not always the case given that the pipe output may always be
1866  * redirected into a file and analyzed on a different machine with possibly a
1867  * different endianness and perf_event ABI revsions in the perf tool itself.
1868  */
1869 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
1870 {
1871         u64 attr_size;
1872         int i;
1873
1874         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
1875                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
1876                         attr_size = bswap_64(hdr_sz);
1877                         if (attr_size != hdr_sz)
1878                                 continue;
1879
1880                         ph->needs_swap = true;
1881                 }
1882                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
1883                 return 0;
1884         }
1885         return -1;
1886 }
1887
1888 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
1889                               bool is_pipe, struct perf_header *ph)
1890 {
1891         int ret;
1892
1893         /* check for legacy format */
1894         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
1895         if (ret == 0) {
1896                 pr_debug("legacy perf.data format\n");
1897                 if (is_pipe)
1898                         return try_all_pipe_abis(hdr_sz, ph);
1899
1900                 return try_all_file_abis(hdr_sz, ph);
1901         }
1902         /*
1903          * the new magic number serves two purposes:
1904          * - unique number to identify actual perf.data files
1905          * - encode endianness of file
1906          */
1907
1908         /* check magic number with one endianness */
1909         if (magic == __perf_magic2)
1910                 return 0;
1911
1912         /* check magic number with opposite endianness */
1913         if (magic != __perf_magic2_sw)
1914                 return -1;
1915
1916         ph->needs_swap = true;
1917
1918         return 0;
1919 }
1920
1921 int perf_file_header__read(struct perf_file_header *header,
1922                            struct perf_header *ph, int fd)
1923 {
1924         int ret;
1925
1926         lseek(fd, 0, SEEK_SET);
1927
1928         ret = readn(fd, header, sizeof(*header));
1929         if (ret <= 0)
1930                 return -1;
1931
1932         if (check_magic_endian(header->magic,
1933                                header->attr_size, false, ph) < 0) {
1934                 pr_debug("magic/endian check failed\n");
1935                 return -1;
1936         }
1937
1938         if (ph->needs_swap) {
1939                 mem_bswap_64(header, offsetof(struct perf_file_header,
1940                              adds_features));
1941         }
1942
1943         if (header->size != sizeof(*header)) {
1944                 /* Support the previous format */
1945                 if (header->size == offsetof(typeof(*header), adds_features))
1946                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
1947                 else
1948                         return -1;
1949         } else if (ph->needs_swap) {
1950                 unsigned int i;
1951                 /*
1952                  * feature bitmap is declared as an array of unsigned longs --
1953                  * not good since its size can differ between the host that
1954                  * generated the data file and the host analyzing the file.
1955                  *
1956                  * We need to handle endianness, but we don't know the size of
1957                  * the unsigned long where the file was generated. Take a best
1958                  * guess at determining it: try 64-bit swap first (ie., file
1959                  * created on a 64-bit host), and check if the hostname feature
1960                  * bit is set (this feature bit is forced on as of fbe96f2).
1961                  * If the bit is not, undo the 64-bit swap and try a 32-bit
1962                  * swap. If the hostname bit is still not set (e.g., older data
1963                  * file), punt and fallback to the original behavior --
1964                  * clearing all feature bits and setting buildid.
1965                  */
1966                 for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i)
1967                         header->adds_features[i] = bswap_64(header->adds_features[i]);
1968
1969                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1970                         for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) {
1971                                 header->adds_features[i] = bswap_64(header->adds_features[i]);
1972                                 header->adds_features[i] = bswap_32(header->adds_features[i]);
1973                         }
1974                 }
1975
1976                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1977                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
1978                         set_bit(HEADER_BUILD_ID, header->adds_features);
1979                 }
1980         }
1981
1982         memcpy(&ph->adds_features, &header->adds_features,
1983                sizeof(ph->adds_features));
1984
1985         ph->event_offset = header->event_types.offset;
1986         ph->event_size   = header->event_types.size;
1987         ph->data_offset  = header->data.offset;
1988         ph->data_size    = header->data.size;
1989         return 0;
1990 }
1991
1992 static int perf_file_section__process(struct perf_file_section *section,
1993                                       struct perf_header *ph,
1994                                       int feat, int fd, void *data __used)
1995 {
1996         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
1997                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
1998                           "%d, continuing...\n", section->offset, feat);
1999                 return 0;
2000         }
2001
2002         if (feat >= HEADER_LAST_FEATURE) {
2003                 pr_debug("unknown feature %d, continuing...\n", feat);
2004                 return 0;
2005         }
2006
2007         if (!feat_ops[feat].process)
2008                 return 0;
2009
2010         return feat_ops[feat].process(section, ph, feat, fd);
2011 }
2012
2013 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2014                                        struct perf_header *ph, int fd,
2015                                        bool repipe)
2016 {
2017         int ret;
2018
2019         ret = readn(fd, header, sizeof(*header));
2020         if (ret <= 0)
2021                 return -1;
2022
2023         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2024                 pr_debug("endian/magic failed\n");
2025                 return -1;
2026         }
2027
2028         if (ph->needs_swap)
2029                 header->size = bswap_64(header->size);
2030
2031         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2032                 return -1;
2033
2034         return 0;
2035 }
2036
2037 static int perf_header__read_pipe(struct perf_session *session, int fd)
2038 {
2039         struct perf_header *header = &session->header;
2040         struct perf_pipe_file_header f_header;
2041
2042         if (perf_file_header__read_pipe(&f_header, header, fd,
2043                                         session->repipe) < 0) {
2044                 pr_debug("incompatible file format\n");
2045                 return -EINVAL;
2046         }
2047
2048         session->fd = fd;
2049
2050         return 0;
2051 }
2052
2053 static int read_attr(int fd, struct perf_header *ph,
2054                      struct perf_file_attr *f_attr)
2055 {
2056         struct perf_event_attr *attr = &f_attr->attr;
2057         size_t sz, left;
2058         size_t our_sz = sizeof(f_attr->attr);
2059         int ret;
2060
2061         memset(f_attr, 0, sizeof(*f_attr));
2062
2063         /* read minimal guaranteed structure */
2064         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2065         if (ret <= 0) {
2066                 pr_debug("cannot read %d bytes of header attr\n",
2067                          PERF_ATTR_SIZE_VER0);
2068                 return -1;
2069         }
2070
2071         /* on file perf_event_attr size */
2072         sz = attr->size;
2073
2074         if (ph->needs_swap)
2075                 sz = bswap_32(sz);
2076
2077         if (sz == 0) {
2078                 /* assume ABI0 */
2079                 sz =  PERF_ATTR_SIZE_VER0;
2080         } else if (sz > our_sz) {
2081                 pr_debug("file uses a more recent and unsupported ABI"
2082                          " (%zu bytes extra)\n", sz - our_sz);
2083                 return -1;
2084         }
2085         /* what we have not yet read and that we know about */
2086         left = sz - PERF_ATTR_SIZE_VER0;
2087         if (left) {
2088                 void *ptr = attr;
2089                 ptr += PERF_ATTR_SIZE_VER0;
2090
2091                 ret = readn(fd, ptr, left);
2092         }
2093         /* read perf_file_section, ids are read in caller */
2094         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2095
2096         return ret <= 0 ? -1 : 0;
2097 }
2098
2099 int perf_session__read_header(struct perf_session *session, int fd)
2100 {
2101         struct perf_header *header = &session->header;
2102         struct perf_file_header f_header;
2103         struct perf_file_attr   f_attr;
2104         u64                     f_id;
2105         int nr_attrs, nr_ids, i, j;
2106
2107         session->evlist = perf_evlist__new(NULL, NULL);
2108         if (session->evlist == NULL)
2109                 return -ENOMEM;
2110
2111         if (session->fd_pipe)
2112                 return perf_header__read_pipe(session, fd);
2113
2114         if (perf_file_header__read(&f_header, header, fd) < 0)
2115                 return -EINVAL;
2116
2117         nr_attrs = f_header.attrs.size / f_header.attr_size;
2118         lseek(fd, f_header.attrs.offset, SEEK_SET);
2119
2120         for (i = 0; i < nr_attrs; i++) {
2121                 struct perf_evsel *evsel;
2122                 off_t tmp;
2123
2124                 if (read_attr(fd, header, &f_attr) < 0)
2125                         goto out_errno;
2126
2127                 if (header->needs_swap)
2128                         perf_event__attr_swap(&f_attr.attr);
2129
2130                 tmp = lseek(fd, 0, SEEK_CUR);
2131                 evsel = perf_evsel__new(&f_attr.attr, i);
2132
2133                 if (evsel == NULL)
2134                         goto out_delete_evlist;
2135                 /*
2136                  * Do it before so that if perf_evsel__alloc_id fails, this
2137                  * entry gets purged too at perf_evlist__delete().
2138                  */
2139                 perf_evlist__add(session->evlist, evsel);
2140
2141                 nr_ids = f_attr.ids.size / sizeof(u64);
2142                 /*
2143                  * We don't have the cpu and thread maps on the header, so
2144                  * for allocating the perf_sample_id table we fake 1 cpu and
2145                  * hattr->ids threads.
2146                  */
2147                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2148                         goto out_delete_evlist;
2149
2150                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2151
2152                 for (j = 0; j < nr_ids; j++) {
2153                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2154                                 goto out_errno;
2155
2156                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2157                 }
2158
2159                 lseek(fd, tmp, SEEK_SET);
2160         }
2161
2162         symbol_conf.nr_events = nr_attrs;
2163
2164         if (f_header.event_types.size) {
2165                 lseek(fd, f_header.event_types.offset, SEEK_SET);
2166                 events = malloc(f_header.event_types.size);
2167                 if (events == NULL)
2168                         return -ENOMEM;
2169                 if (perf_header__getbuffer64(header, fd, events,
2170                                              f_header.event_types.size))
2171                         goto out_errno;
2172                 event_count =  f_header.event_types.size / sizeof(struct perf_trace_event_type);
2173         }
2174
2175         perf_header__process_sections(header, fd, NULL,
2176                                       perf_file_section__process);
2177
2178         lseek(fd, header->data_offset, SEEK_SET);
2179
2180         header->frozen = 1;
2181         return 0;
2182 out_errno:
2183         return -errno;
2184
2185 out_delete_evlist:
2186         perf_evlist__delete(session->evlist);
2187         session->evlist = NULL;
2188         return -ENOMEM;
2189 }
2190
2191 int perf_event__synthesize_attr(struct perf_tool *tool,
2192                                 struct perf_event_attr *attr, u16 ids, u64 *id,
2193                                 perf_event__handler_t process)
2194 {
2195         union perf_event *ev;
2196         size_t size;
2197         int err;
2198
2199         size = sizeof(struct perf_event_attr);
2200         size = ALIGN(size, sizeof(u64));
2201         size += sizeof(struct perf_event_header);
2202         size += ids * sizeof(u64);
2203
2204         ev = malloc(size);
2205
2206         if (ev == NULL)
2207                 return -ENOMEM;
2208
2209         ev->attr.attr = *attr;
2210         memcpy(ev->attr.id, id, ids * sizeof(u64));
2211
2212         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2213         ev->attr.header.size = size;
2214
2215         err = process(tool, ev, NULL, NULL);
2216
2217         free(ev);
2218
2219         return err;
2220 }
2221
2222 int perf_event__synthesize_attrs(struct perf_tool *tool,
2223                                    struct perf_session *session,
2224                                    perf_event__handler_t process)
2225 {
2226         struct perf_evsel *attr;
2227         int err = 0;
2228
2229         list_for_each_entry(attr, &session->evlist->entries, node) {
2230                 err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids,
2231                                                   attr->id, process);
2232                 if (err) {
2233                         pr_debug("failed to create perf header attribute\n");
2234                         return err;
2235                 }
2236         }
2237
2238         return err;
2239 }
2240
2241 int perf_event__process_attr(union perf_event *event,
2242                              struct perf_evlist **pevlist)
2243 {
2244         unsigned int i, ids, n_ids;
2245         struct perf_evsel *evsel;
2246         struct perf_evlist *evlist = *pevlist;
2247
2248         if (evlist == NULL) {
2249                 *pevlist = evlist = perf_evlist__new(NULL, NULL);
2250                 if (evlist == NULL)
2251                         return -ENOMEM;
2252         }
2253
2254         evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries);
2255         if (evsel == NULL)
2256                 return -ENOMEM;
2257
2258         perf_evlist__add(evlist, evsel);
2259
2260         ids = event->header.size;
2261         ids -= (void *)&event->attr.id - (void *)event;
2262         n_ids = ids / sizeof(u64);
2263         /*
2264          * We don't have the cpu and thread maps on the header, so
2265          * for allocating the perf_sample_id table we fake 1 cpu and
2266          * hattr->ids threads.
2267          */
2268         if (perf_evsel__alloc_id(evsel, 1, n_ids))
2269                 return -ENOMEM;
2270
2271         for (i = 0; i < n_ids; i++) {
2272                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
2273         }
2274
2275         return 0;
2276 }
2277
2278 int perf_event__synthesize_event_type(struct perf_tool *tool,
2279                                       u64 event_id, char *name,
2280                                       perf_event__handler_t process,
2281                                       struct machine *machine)
2282 {
2283         union perf_event ev;
2284         size_t size = 0;
2285         int err = 0;
2286
2287         memset(&ev, 0, sizeof(ev));
2288
2289         ev.event_type.event_type.event_id = event_id;
2290         memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
2291         strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
2292
2293         ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
2294         size = strlen(ev.event_type.event_type.name);
2295         size = ALIGN(size, sizeof(u64));
2296         ev.event_type.header.size = sizeof(ev.event_type) -
2297                 (sizeof(ev.event_type.event_type.name) - size);
2298
2299         err = process(tool, &ev, NULL, machine);
2300
2301         return err;
2302 }
2303
2304 int perf_event__synthesize_event_types(struct perf_tool *tool,
2305                                        perf_event__handler_t process,
2306                                        struct machine *machine)
2307 {
2308         struct perf_trace_event_type *type;
2309         int i, err = 0;
2310
2311         for (i = 0; i < event_count; i++) {
2312                 type = &events[i];
2313
2314                 err = perf_event__synthesize_event_type(tool, type->event_id,
2315                                                         type->name, process,
2316                                                         machine);
2317                 if (err) {
2318                         pr_debug("failed to create perf header event type\n");
2319                         return err;
2320                 }
2321         }
2322
2323         return err;
2324 }
2325
2326 int perf_event__process_event_type(struct perf_tool *tool __unused,
2327                                    union perf_event *event)
2328 {
2329         if (perf_header__push_event(event->event_type.event_type.event_id,
2330                                     event->event_type.event_type.name) < 0)
2331                 return -ENOMEM;
2332
2333         return 0;
2334 }
2335
2336 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
2337                                         struct perf_evlist *evlist,
2338                                         perf_event__handler_t process)
2339 {
2340         union perf_event ev;
2341         struct tracing_data *tdata;
2342         ssize_t size = 0, aligned_size = 0, padding;
2343         int err __used = 0;
2344
2345         /*
2346          * We are going to store the size of the data followed
2347          * by the data contents. Since the fd descriptor is a pipe,
2348          * we cannot seek back to store the size of the data once
2349          * we know it. Instead we:
2350          *
2351          * - write the tracing data to the temp file
2352          * - get/write the data size to pipe
2353          * - write the tracing data from the temp file
2354          *   to the pipe
2355          */
2356         tdata = tracing_data_get(&evlist->entries, fd, true);
2357         if (!tdata)
2358                 return -1;
2359
2360         memset(&ev, 0, sizeof(ev));
2361
2362         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2363         size = tdata->size;
2364         aligned_size = ALIGN(size, sizeof(u64));
2365         padding = aligned_size - size;
2366         ev.tracing_data.header.size = sizeof(ev.tracing_data);
2367         ev.tracing_data.size = aligned_size;
2368
2369         process(tool, &ev, NULL, NULL);
2370
2371         /*
2372          * The put function will copy all the tracing data
2373          * stored in temp file to the pipe.
2374          */
2375         tracing_data_put(tdata);
2376
2377         write_padded(fd, NULL, 0, padding);
2378
2379         return aligned_size;
2380 }
2381
2382 int perf_event__process_tracing_data(union perf_event *event,
2383                                      struct perf_session *session)
2384 {
2385         ssize_t size_read, padding, size = event->tracing_data.size;
2386         off_t offset = lseek(session->fd, 0, SEEK_CUR);
2387         char buf[BUFSIZ];
2388
2389         /* setup for reading amidst mmap */
2390         lseek(session->fd, offset + sizeof(struct tracing_data_event),
2391               SEEK_SET);
2392
2393         size_read = trace_report(session->fd, session->repipe);
2394
2395         padding = ALIGN(size_read, sizeof(u64)) - size_read;
2396
2397         if (read(session->fd, buf, padding) < 0)
2398                 die("reading input file");
2399         if (session->repipe) {
2400                 int retw = write(STDOUT_FILENO, buf, padding);
2401                 if (retw <= 0 || retw != padding)
2402                         die("repiping tracing data padding");
2403         }
2404
2405         if (size_read + padding != size)
2406                 die("tracing data size mismatch");
2407
2408         return size_read + padding;
2409 }
2410
2411 int perf_event__synthesize_build_id(struct perf_tool *tool,
2412                                     struct dso *pos, u16 misc,
2413                                     perf_event__handler_t process,
2414                                     struct machine *machine)
2415 {
2416         union perf_event ev;
2417         size_t len;
2418         int err = 0;
2419
2420         if (!pos->hit)
2421                 return err;
2422
2423         memset(&ev, 0, sizeof(ev));
2424
2425         len = pos->long_name_len + 1;
2426         len = ALIGN(len, NAME_ALIGN);
2427         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
2428         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2429         ev.build_id.header.misc = misc;
2430         ev.build_id.pid = machine->pid;
2431         ev.build_id.header.size = sizeof(ev.build_id) + len;
2432         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2433
2434         err = process(tool, &ev, NULL, machine);
2435
2436         return err;
2437 }
2438
2439 int perf_event__process_build_id(struct perf_tool *tool __used,
2440                                  union perf_event *event,
2441                                  struct perf_session *session)
2442 {
2443         __event_process_build_id(&event->build_id,
2444                                  event->build_id.filename,
2445                                  session);
2446         return 0;
2447 }
2448
2449 void disable_buildid_cache(void)
2450 {
2451         no_buildid_cache = true;
2452 }